prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from sklearn.datasets import load_breast_cancer, fetch_california_housing
import pandas as pd
import numpy as np
import pickle
import os
import collections
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
def handle_categorical_feat(X_df):
''' It moves the categorical features to the last '''
original_columns = []
one_hot_columns = []
for col_name, dtype in zip(X_df.dtypes.index, X_df.dtypes):
if dtype == object:
one_hot_columns.append(col_name)
else:
original_columns.append(col_name)
X_df = X_df[original_columns + one_hot_columns]
return X_df, one_hot_columns
def load_breast_data():
breast = load_breast_cancer()
feature_names = list(breast.feature_names)
X, y = pd.DataFrame(breast.data, columns=feature_names), pd.Series(breast.target)
dataset = {
'problem': 'classification',
'full': {
'X': X,
'y': y,
},
'd_name': 'breast',
'search_lam': np.logspace(-1, 2.5, 15),
}
return dataset
def load_adult_data():
# https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data
df = pd.read_csv("./datasets/adult.data", header=None)
df.columns = [
"Age", "WorkClass", "fnlwgt", "Education", "EducationNum",
"MaritalStatus", "Occupation", "Relationship", "Race", "Gender",
"CapitalGain", "CapitalLoss", "HoursPerWeek", "NativeCountry", "Income"
]
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols].copy()
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy()
# Make it as 0 or 1
y_df.loc[y_df == ' >50K'] = 1.
y_df.loc[y_df == ' <=50K'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'adult',
'search_lam': np.logspace(-2, 2, 15),
'n_splines': 50,
'onehot_columns': onehot_columns,
}
return dataset
def load_credit_data():
# https://www.kaggle.com/mlg-ulb/creditcardfraud
df = pd.read_csv(r'./datasets/creditcard.csv')
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols]
y_df = df[label]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'credit',
'search_lam': np.logspace(-0.5, 2.5, 8),
}
return dataset
def load_churn_data():
# https://www.kaggle.com/blastchar/telco-customer-churn/downloads/WA_Fn-UseC_-Telco-Customer-Churn.csv/1
df = pd.read_csv(r'./datasets/WA_Fn-UseC_-Telco-Customer-Churn.csv')
train_cols = df.columns[1:-1] # First column is an ID
label = df.columns[-1]
X_df = df[train_cols].copy()
# Handle special case of TotalCharges wronly assinged as object
X_df['TotalCharges'][X_df['TotalCharges'] == ' '] = 0.
X_df.loc[:, 'TotalCharges'] = pd.to_numeric(X_df['TotalCharges'])
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy() # 'Yes, No'
# Make it as 0 or 1
y_df[y_df == 'Yes'] = 1.
y_df[y_df == 'No'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'churn',
'search_lam': np.logspace(0, 3, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_pneumonia_data(folder='/media/intdisk/medical/RaniHasPneumonia/'):
featurename_file = os.path.join(folder, 'featureNames.txt')
col_names = pd.read_csv(featurename_file, delimiter='\t', header=None, index_col=0).iloc[:, 0].values
def read_data(file_path='pneumonia/RaniHasPneumonia/medis9847c.data'):
df = pd.read_csv(file_path, delimiter='\t', header=None)
df = df.iloc[:, :-1] # Remove the last empty wierd column
df.columns = col_names
return df
df_train = read_data(os.path.join(folder, 'medis9847c.data'))
df_test = read_data(os.path.join(folder, 'medis9847c.test'))
df = pd.concat([df_train, df_test], axis=0).reset_index(drop=True)
X_df = df.iloc[:, :-1]
y_df = df.iloc[:, -1]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'test_size': 4352 / 14199,
'd_name': 'pneumonia',
'search_lam': np.logspace(0, 3, 15),
}
return dataset
def load_heart_data():
# https://www.kaggle.com/sonumj/heart-disease-dataset-from-uci
df = pd.read_csv('./datasets/HeartDisease.csv')
label = df.columns[-2]
train_cols = list(df.columns[1:-2]) + [df.columns[-1]]
X_df = df[train_cols]
y_df = df[label]
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
# Impute the missingness as 0
X_df = X_df.apply(lambda col: col if col.dtype == object else col.fillna(0.), axis=0)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'heart',
'search_lam': np.logspace(0, 3, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_mimiciii_data():
df_adult = pd.read_csv('./datasets/adult_icu.gz', compression='gzip')
train_cols = [
'age', 'first_hosp_stay', 'first_icu_stay', 'adult_icu', 'eth_asian',
'eth_black', 'eth_hispanic', 'eth_other', 'eth_white',
'admType_ELECTIVE', 'admType_EMERGENCY', 'admType_NEWBORN',
'admType_URGENT', 'heartrate_min', 'heartrate_max', 'heartrate_mean',
'sysbp_min', 'sysbp_max', 'sysbp_mean', 'diasbp_min', 'diasbp_max',
'diasbp_mean', 'meanbp_min', 'meanbp_max', 'meanbp_mean',
'resprate_min', 'resprate_max', 'resprate_mean', 'tempc_min',
'tempc_max', 'tempc_mean', 'spo2_min', 'spo2_max', 'spo2_mean',
'glucose_min', 'glucose_max', 'glucose_mean', 'aniongap', 'albumin',
'bicarbonate', 'bilirubin', 'creatinine', 'chloride', 'glucose',
'hematocrit', 'hemoglobin', 'lactate', 'magnesium', 'phosphate',
'platelet', 'potassium', 'ptt', 'inr', 'pt', 'sodium', 'bun', 'wbc']
label = 'mort_icu'
X_df = df_adult[train_cols]
y_df = df_adult[label]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'mimiciii',
'search_lam': np.logspace(0, 3, 15),
}
return dataset
def load_mimicii_data():
cols = ['Age', 'GCS', 'SBP', 'HR', 'Temperature',
'PFratio', 'Renal', 'Urea', 'WBC', 'CO2', 'Na', 'K',
'Bilirubin', 'AdmissionType', 'AIDS',
'MetastaticCancer', 'Lymphoma', 'HospitalMortality']
table = pd.read_csv('./datasets/mimic2.data', delimiter=' ', header=None)
table.columns = cols
X_df = table.iloc[:, :-1]
y_df = table.iloc[:, -1]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'mimicii',
'search_lam': np.logspace(-2, 3.5, 15),
}
return dataset
def load_diabetes2_data(load_cache=False):
cache_dataset_path = './datasets/diabetes_cache.pkl'
if load_cache and os.path.exists(cache_dataset_path):
print('Find the diabetes dataset. Load from cache.')
with open(cache_dataset_path, 'rb') as fp:
dataset = pickle.load(fp)
return dataset
df = pd.read_csv('./datasets/dataset_diabetes/diabetic_data.csv')
x_cols = df.columns[2:-1]
y_col = df.columns[-1]
X_df = df[x_cols].copy()
y_df = df[y_col].copy()
y_df.loc[(y_df == 'NO') | (y_df == '>30')] = 0
y_df.loc[y_df == '<30'] = 1
# is_false = (y_df == 'NO')
# y_df.loc[is_false] = 0
# y_df.loc[~is_false] = 1
y_df = y_df.astype(int)
# Preprocess X
X_df.loc[:, 'age'] = X_df.age.apply(lambda s: (int(s[1:s.index('-')]) + int(s[(s.index('-') + 1):-1])) / 2).astype(int)
X_df.loc[:, 'weight'] = X_df.weight.apply(lambda s: 0. if s == '?' else ((float(s[1:s.index('-')]) + float(s[(s.index('-') + 1):-1])) / 2 if '-' in s else float(s[1:])))
X_df.loc[:, 'admission_source_id'] = X_df.admission_source_id.astype('object')
X_df.loc[:, 'admission_type_id'] = X_df.admission_type_id.astype('object')
X_df.loc[:, 'discharge_disposition_id'] = X_df.discharge_disposition_id.astype('object')
X_df.loc[:, 'change'] = X_df.change.apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'diabetesMed'] = X_df.diabetesMed.apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'metformin-pioglitazone'] = X_df['metformin-pioglitazone'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'metformin-rosiglitazone'] = X_df['metformin-rosiglitazone'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'glipizide-metformin'] = X_df['glipizide-metformin'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'troglitazone'] = X_df['troglitazone'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'tolbutamide'] = X_df['tolbutamide'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'acetohexamide'] = X_df['acetohexamide'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df = X_df.drop(['citoglipton', 'examide'], axis=1) # Only have NO in the data
# diag_combined = X_df.apply(lambda x: set(
# [x.diag_1 for i in range(1) if x.diag_1 != '?'] + [x.diag_2 for i in range(1) if x.diag_2 != '?'] + [x.diag_3 for i in range(1) if x.diag_3 != '?']
# ), axis=1)
# diag_combined = diag_combined.apply(collections.Counter)
# diag_multihot_encode = pd.DataFrame.from_records(diag_combined).fillna(value=0).astype(np.uint8)
# diag_multihot_encode.columns = ['diag_%s' % str(c) for c in diag_multihot_encode.columns]
X_df = X_df.drop(['diag_1', 'diag_2', 'diag_3'], axis=1)
# X_df = pd.concat([X_df, diag_multihot_encode], axis=1)
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'diabetes2',
'search_lam': np.logspace(-3, 2, 8),
'n_splines': 50,
'onehot_columns': onehot_columns,
}
with open(cache_dataset_path, 'wb') as op:
pickle.dump(dataset, op)
return dataset
def load_TCGA_data(test_split=0.33, n_splits=20, cosmic=True, random_state=1377, **kwargs):
np.random.seed(random_state)
filename = 'pancancer_cosmic.npz' if cosmic else 'pancancer_parsed.npz'
x = np.load('datasets/TCGA/%s' % filename)['arr_0']
# log transform
x_df = pd.DataFrame(np.log10(x + 1))
# append the column name
transcript_names_path = 'transcript_names_cosmic' if cosmic else 'transcript_names'
x_df.columns = np.load('datasets/TCGA/%s.npy' % transcript_names_path)
# remove the columns with std as 0
x_df = x_df.loc[:, (x.std(axis=0) > 0.)]
covars = pd.read_csv('datasets/TCGA/potential_covariates.tsv', delimiter='\t')
covars['label'] = np.logical_or(covars[['sample_type']] == 'Primary Blood Derived Cancer - Peripheral Blood',
np.logical_or(covars[['sample_type']] == 'Additional Metastatic',
np.logical_or(covars[['sample_type']] == 'Recurrent Tumor',
np.logical_or(covars[['sample_type']] == 'Additional - New Primary',
np.logical_or(covars[['sample_type']] == 'Metastatic',
covars[['sample_type']] == 'Primary Tumor')))))
stratify_lookup = covars.groupby('submitter_id').label.apply(lambda x: len(x))
covars['stratify'] = covars.submitter_id.apply(lambda x: stratify_lookup[x])
covars = covars[['submitter_id', 'label', 'stratify']]
covars['patient_idxes'] = list(range(covars.shape[0]))
def group_shuffle_split():
for _ in range(n_splits):
train_lookups = []
for num_record, df2 in covars.groupby('stratify'):
train_lookup = df2.groupby('submitter_id').apply(lambda x: True)
# randomly change them to be 0
all_idxes = np.arange(len(train_lookup))
np.random.shuffle(all_idxes)
is_test_idxes = all_idxes[:int(len(train_lookup) * test_split)]
train_lookup[is_test_idxes] = False
train_lookups.append(train_lookup)
train_lookups = pd.concat(train_lookups)
covars['is_train'] = covars.submitter_id.apply(lambda x: train_lookups[x])
train_idxes = covars.patient_idxes[covars.is_train].values
test_idxes = covars.patient_idxes[~covars.is_train].values
yield train_idxes, test_idxes
y = covars['label'].astype(float)
stratify = covars['stratify']
dataset = {
'problem': 'classification',
'full': {
'X': x_df,
'y': y,
'ss': group_shuffle_split(),
},
'd_name': 'TCGA-cosmic' if cosmic else 'TCGA-full',
}
return dataset
def load_support2cls2_data():
# http://biostat.mc.vanderbilt.edu/wiki/Main/DataSets
df =
|
pd.read_csv('./datasets/support2/support2.csv')
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# ## Aim: Collect and combine relevant mouse expression data from the Stemformatics data portal
#
# Link: https://www.stemformatics.org/workbench/download_multiple_datasets.
#
# Stemformatics is an established gene expression data portal containing over 420 public gene expression datasets derived from microarray, RNA sequencing and single cell profiling technologies. It includes curated ‘collections’ of data relevant to cell reprogramming, as well as hematopoiesis and leukaemia.
#
# ### Samples
#
# Set the serch field to 'species' and use 'Mus musculus' as search key.
#
# ### Processing steps
#
# - Sample selection
# - Combine selected datasets based on platforms
# - Combine all selected datasets
# In[1]:
import pandas as pd
import numpy as np
import atlas
import handler
import requests
# In[2]:
# inspect the samples metadata
samples = pd.read_csv('/Users/monica/Downloads/export_metadata_samples_v7.2.4.tsv', sep='\t', index_col=2)
samples.head()
# Many of the samples are not healthy blood cells (e.g. iPSCs, AML samples etc.). We will need to select for healthy blood cells from the metadata, and download only the selected samples.
# ### Sample selction
# In[3]:
def select_samples(samples):
'''This function takes the Stemformatics samples metadata and returns samples that are annotated to be blood cells.'''
pos_selected_id = []
neg_selected_id = []
patterns_pos = ['lymp', '[Hh]aem', '[Hh]em', 'HSC','[Mm]ono', '[Bb]-*\ *cell', '[Mm]yelo', 'killer',
'NK', '[Mm]eg', '[Bb]aso', '[Nn]eut', '[Ee]os', '[Pp]las', '[Ee]ryt', '\W[Tt]-*\ *cell', 'DC', '[Dd]endri',
'phage', 'macr']
patterns_neg = ['iPS', 'MSC', 'AML', 'reprogram', 'MAPC', 'KO', 'endothelial', 'LPS', 'mutant', 'Dusp', 'LCMV', 'LSK', 'Chaudhury', 'BLSP',
'Bruttger']
for col in samples.columns:
l = samples[samples[col].notna()]
for p in patterns_pos:
pos_selected_id += l[(l[col].astype(str).str.contains(p) == True)].index.tolist()
for n in patterns_neg:
neg_selected_id += l[(l[col].astype(str).str.contains(n) == True)].index.tolist()
selected = samples.loc[samples.index.isin(set(pos_selected_id))]
return selected.loc[~selected.index.isin(set(neg_selected_id))]
# In[4]:
selected_samples = select_samples(samples)
print(selected_samples.shape)
selected_samples.head() # 324 samples are selected
# ### Combine datasets based on platforms
# In[5]:
# add platform information of to samples metadata
datasets = pd.read_csv('/Users/monica/Downloads/export_metadata_datasets_v7.2.4.tsv', sep='\t', index_col=0)
selected_ds = datasets[datasets.index.isin(set(selected_samples.ds_id))]
selected_samples = pd.merge(selected_samples, selected_ds[['description', 'platform']], left_on='ds_id', right_index=True)
selected_samples.columns
# In[6]:
# Inspect the distribution of platforms from which the samples data were generated
selected_samples.groupby(['platform', 'ds_id']).size()
# In[7]:
# Group selected ds_ids based on platforms
# Note that latforms with not enough dataset representation and small sample size, including Illumina Ref-6, GPL81, MoGene2,
# and MoEx1 are excluded.
RNAseq_id = [7224, 7267, 6655, 6767]
Illu_MouseWG6_id = [6637, 7291]
Affy_Mouse430_id = [6498, 6658, 6659, 6756, 6988, 6087, 6108, 6300]
Affy_MoGene1_id = [6264, 6310, 6313, 6455, 6831, 7131]
# In[8]:
def replace_probes(df, probe_mapping):
'''
Input: A microarray expression dataframe and and a probe mapping table.
Output: The expression dataframe with index changed from probe ids to ensembl gene ids according to the supplied mapping table.
'''
probe_mapping.columns = ['ensembl']
return pd.merge(df, probe_mapping, how='inner', left_index=True, right_index=True).set_index('ensembl')
def find_common_genes(dfs):
'''
Input: A list of expression dataframes.
Output: A list of common genes that are appeared in all dataframes.
'''
common_genes = dfs[0].index
for df in dfs:
common_genes = common_genes.intersection(df.index)
return common_genes
def merge_columns(dfs, common_genes):
'''
Input: A list of expression datasts and a list of common genes.
Output: A combined dataframe of all supplied datasets and keep only the common genes.
'''
matrix = []
for df in dfs:
df = df.loc[df.index.isin(common_genes)] # Filter by common genes probed in all datasets
m = df.groupby(df.index).first() # Resolve rows with duplicate indexes by grouping together with the mean value
matrix.append(m)
return pd.concat(matrix, axis=1)
def merge_by_platform(id_list, probe_mapping, platform):
'''
This function takes a list of dataset ids and a probe mapping table as inputs, and will load the corresponding datasets into
variables formatted as dsxxxx with xxxx indicate the ds_id. If the function is supplied with a probe mapping, it will replace
the data index from probe ids to gene ids according to the probe mappintg table.
'''
serverURL = 'api.stemformatics.org'
headers = {'Content-type': 'application/json'}
load_failed = []
dfs = []
for ds_id in id_list:
try:
result = requests.get('https://%s/expression/%s/raw' % (serverURL, ds_id), headers=headers, verify=False).json()
df = pd.DataFrame(result['data'], index=result['index'], columns=result['columns'])
if probe_mapping is not None:
df = replace_probes(df, probe_mapping)
dfs.append(df)
except ValueError:
load_failed.append(ds_id)
continue
print('Successfully load {} datasets {}'.format(platform, [i for i in id_list if i not in load_failed]))
return
|
pd.concat(dfs, axis=1)
|
pandas.concat
|
import warnings
from collections import Counter
from typing import Dict
from unittest.mock import patch
import numpy as np
import pandas as pd
import pyarrow
import pytest
from pandas import DataFrame
import ray
from ray.data import Dataset
from ray.data.aggregate import Max
from ray.data.preprocessor import Preprocessor, PreprocessorNotFittedException
from ray.data.preprocessors import (
BatchMapper,
Chain,
CustomStatefulPreprocessor,
LabelEncoder,
MinMaxScaler,
OneHotEncoder,
OrdinalEncoder,
SimpleImputer,
StandardScaler,
)
from ray.data.preprocessors.encoder import Categorizer, MultiHotEncoder
from ray.data.preprocessors.hasher import FeatureHasher
from ray.data.preprocessors.normalizer import Normalizer
from ray.data.preprocessors.scaler import MaxAbsScaler, RobustScaler
from ray.data.preprocessors.tokenizer import Tokenizer
from ray.data.preprocessors.transformer import PowerTransformer
from ray.data.preprocessors.utils import simple_hash, simple_split_tokenizer
from ray.data.preprocessors.vectorizer import CountVectorizer, HashingVectorizer
@pytest.fixture
def create_dummy_preprocessors():
class DummyPreprocessorWithNothing(Preprocessor):
_is_fittable = False
class DummyPreprocessorWithPandas(DummyPreprocessorWithNothing):
def _transform_pandas(self, df: "pd.DataFrame") -> "pd.DataFrame":
return df
class DummyPreprocessorWithArrow(DummyPreprocessorWithNothing):
def _transform_arrow(self, table: "pyarrow.Table") -> "pyarrow.Table":
return table
class DummyPreprocessorWithPandasAndArrow(DummyPreprocessorWithNothing):
def _transform_pandas(self, df: "pd.DataFrame") -> "pd.DataFrame":
return df
def _transform_arrow(self, table: "pyarrow.Table") -> "pyarrow.Table":
return table
yield (
DummyPreprocessorWithNothing(),
DummyPreprocessorWithPandas(),
DummyPreprocessorWithArrow(),
DummyPreprocessorWithPandasAndArrow(),
)
def test_standard_scaler():
"""Tests basic StandardScaler functionality."""
col_a = [-1, 0, 1, 2]
col_b = [1, 1, 5, 5]
col_c = [1, 1, 1, None]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
scaler = StandardScaler(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
scaler.transform(ds)
# Fit data.
scaler.fit(ds)
assert scaler.stats_ == {
"mean(B)": 3.0,
"mean(C)": 1.0,
"std(B)": 2.0,
"std(C)": 0.0,
}
# Transform data.
transformed = scaler.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [-1.0, -1.0, 1.0, 1.0]
processed_col_c = [0.0, 0.0, 0.0, None]
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = [1, 2, 3]
pred_col_b = [3, 5, 7]
pred_col_c = [0, 1, 2]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = scaler.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [0.0, 1.0, 2.0]
pred_processed_col_c = [-1.0, 0.0, 1.0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
@patch.object(warnings, "warn")
def test_fit_twice(mocked_warn):
"""Tests that a warning msg should be printed."""
col_a = [-1, 0, 1]
col_b = [1, 3, 5]
col_c = [1, 1, None]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
scaler = MinMaxScaler(["B", "C"])
# Fit data.
scaler.fit(ds)
assert scaler.stats_ == {"min(B)": 1, "max(B)": 5, "min(C)": 1, "max(C)": 1}
ds = ds.map_batches(lambda x: x * 2)
# Fit again
scaler.fit(ds)
# Assert that the fitted state is corresponding to the second ds.
assert scaler.stats_ == {"min(B)": 2, "max(B)": 10, "min(C)": 2, "max(C)": 2}
msg = (
"`fit` has already been called on the preprocessor (or at least one "
"contained preprocessors if this is a chain). "
"All previously fitted state will be overwritten!"
)
mocked_warn.assert_called_once_with(msg)
def test_min_max_scaler():
"""Tests basic MinMaxScaler functionality."""
col_a = [-1, 0, 1]
col_b = [1, 3, 5]
col_c = [1, 1, None]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
scaler = MinMaxScaler(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
scaler.transform(ds)
# Fit data.
scaler.fit(ds)
assert scaler.stats_ == {"min(B)": 1, "max(B)": 5, "min(C)": 1, "max(C)": 1}
transformed = scaler.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [0.0, 0.5, 1.0]
processed_col_c = [0.0, 0.0, None]
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = [1, 2, 3]
pred_col_b = [3, 5, 7]
pred_col_c = [0, 1, 2]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = scaler.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [0.5, 1.0, 1.5]
pred_processed_col_c = [-1.0, 0.0, 1.0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
def test_max_abs_scaler():
"""Tests basic MaxAbsScaler functionality."""
col_a = [-1, 0, 1]
col_b = [1, 3, -5]
col_c = [1, 1, None]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
scaler = MaxAbsScaler(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
scaler.transform(ds)
# Fit data.
scaler.fit(ds)
assert scaler.stats_ == {"abs_max(B)": 5, "abs_max(C)": 1}
transformed = scaler.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [0.2, 0.6, -1.0]
processed_col_c = [1.0, 1.0, None]
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = [1, 2, 3]
pred_col_b = [3, 5, 7]
pred_col_c = [0, 1, -2]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = scaler.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [0.6, 1.0, 1.4]
pred_processed_col_c = [0.0, 1.0, -2.0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
def test_robust_scaler():
"""Tests basic RobustScaler functionality."""
col_a = [-2, -1, 0, 1, 2]
col_b = [-2, -1, 0, 1, 2]
col_c = [-10, 1, 2, 3, 10]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
scaler = RobustScaler(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
scaler.transform(ds)
# Fit data.
scaler.fit(ds)
assert scaler.stats_ == {
"low_quantile(B)": -1,
"median(B)": 0,
"high_quantile(B)": 1,
"low_quantile(C)": 1,
"median(C)": 2,
"high_quantile(C)": 3,
}
transformed = scaler.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [-1.0, -0.5, 0, 0.5, 1.0]
processed_col_c = [-6, -0.5, 0, 0.5, 4]
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = [1, 2, 3]
pred_col_b = [3, 5, 7]
pred_col_c = [0, 1, 2]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = scaler.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [1.5, 2.5, 3.5]
pred_processed_col_c = [-1.0, -0.5, 0.0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
def test_ordinal_encoder():
"""Tests basic OrdinalEncoder functionality."""
col_a = ["red", "green", "blue", "red"]
col_b = ["warm", "cold", "hot", "cold"]
col_c = [1, 10, 5, 10]
col_d = [["warm"], [], ["hot", "warm", "cold"], ["cold", "cold"]]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c, "D": col_d})
ds = ray.data.from_pandas(in_df)
encoder = OrdinalEncoder(["B", "C", "D"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
encoder.transform(ds)
# Fit data.
encoder.fit(ds)
assert encoder.stats_ == {
"unique_values(B)": {"cold": 0, "hot": 1, "warm": 2},
"unique_values(C)": {1: 0, 5: 1, 10: 2},
"unique_values(D)": {"cold": 0, "hot": 1, "warm": 2},
}
# Transform data.
transformed = encoder.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [2, 0, 1, 0]
processed_col_c = [0, 2, 1, 2]
processed_col_d = [[2], [], [1, 2, 0], [0, 0]]
expected_df = pd.DataFrame.from_dict(
{
"A": processed_col_a,
"B": processed_col_b,
"C": processed_col_c,
"D": processed_col_d,
}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = ["blue", "yellow", None]
pred_col_b = ["cold", "warm", "other"]
pred_col_c = [10, 1, 20]
pred_col_d = [["cold", "warm"], [], ["other", "cold"]]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c, "D": pred_col_d}
)
pred_out_df = encoder.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [0, 2, None]
pred_processed_col_c = [2, 0, None]
pred_processed_col_d = [[0, 2], [], [None, 0]]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
"D": pred_processed_col_d,
}
)
assert pred_out_df.equals(pred_expected_df)
# Test null behavior.
null_col = [1, None]
nonnull_col = [1, 1]
null_df = pd.DataFrame.from_dict({"A": null_col})
null_ds = ray.data.from_pandas(null_df)
nonnull_df = pd.DataFrame.from_dict({"A": nonnull_col})
nonnull_ds = ray.data.from_pandas(nonnull_df)
null_encoder = OrdinalEncoder(["A"])
# Verify fit fails for null values.
with pytest.raises(ValueError):
null_encoder.fit(null_ds)
null_encoder.fit(nonnull_ds)
# Verify transform fails for null values.
with pytest.raises(ValueError):
null_encoder.transform(null_ds)
null_encoder.transform(nonnull_ds)
# Verify transform_batch fails for null values.
with pytest.raises(ValueError):
null_encoder.transform_batch(null_df)
null_encoder.transform_batch(nonnull_df)
def test_ordinal_encoder_no_encode_list():
"""Tests OrdinalEncoder with encode_lists=False."""
col_a = ["red", "green", "blue", "red"]
col_b = ["warm", "cold", "hot", "cold"]
col_c = [1, 10, 5, 10]
col_d = [["warm"], [], ["hot", "warm", "cold"], ["cold", "cold"]]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c, "D": col_d})
ds = ray.data.from_pandas(in_df)
encoder = OrdinalEncoder(["B", "C", "D"], encode_lists=False)
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
encoder.transform(ds)
# Fit data.
encoder.fit(ds)
assert encoder.stats_ == {
"unique_values(B)": {"cold": 0, "hot": 1, "warm": 2},
"unique_values(C)": {1: 0, 5: 1, 10: 2},
"unique_values(D)": {
tuple(): 0,
("cold", "cold"): 1,
("hot", "warm", "cold"): 2,
("warm",): 3,
},
}
# Transform data.
print("transform")
transformed = encoder.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [2, 0, 1, 0]
processed_col_c = [0, 2, 1, 2]
processed_col_d = [3, 0, 2, 1]
expected_df = pd.DataFrame.from_dict(
{
"A": processed_col_a,
"B": processed_col_b,
"C": processed_col_c,
"D": processed_col_d,
}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = ["blue", "yellow", None]
pred_col_b = ["cold", "warm", "other"]
pred_col_c = [10, 1, 20]
pred_col_d = [["cold", "cold"], [], ["other", "cold"]]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c, "D": pred_col_d}
)
pred_out_df = encoder.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [0, 2, None]
pred_processed_col_c = [2, 0, None]
pred_processed_col_d = [1, 0, None]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
"D": pred_processed_col_d,
}
)
assert pred_out_df.equals(pred_expected_df)
def test_one_hot_encoder():
"""Tests basic OneHotEncoder functionality."""
col_a = ["red", "green", "blue", "red"]
col_b = ["warm", "cold", "hot", "cold"]
col_c = [1, 10, 5, 10]
col_d = [["warm"], [], ["hot", "warm", "cold"], ["cold", "cold"]]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c, "D": col_d})
ds = ray.data.from_pandas(in_df)
encoder = OneHotEncoder(["B", "C", "D"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
encoder.transform(ds)
# Fit data.
encoder.fit(ds)
assert encoder.stats_ == {
"unique_values(B)": {"cold": 0, "hot": 1, "warm": 2},
"unique_values(C)": {1: 0, 5: 1, 10: 2},
"unique_values(D)": {
tuple(): 0,
("cold", "cold"): 1,
("hot", "warm", "cold"): 2,
("warm",): 3,
},
}
# Transform data.
transformed = encoder.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b_cold = [0, 1, 0, 1]
processed_col_b_hot = [0, 0, 1, 0]
processed_col_b_warm = [1, 0, 0, 0]
processed_col_c_1 = [1, 0, 0, 0]
processed_col_c_5 = [0, 0, 1, 0]
processed_col_c_10 = [0, 1, 0, 1]
processed_col_d_empty = [0, 1, 0, 0]
processed_col_d_cold_cold = [0, 0, 0, 1]
processed_col_d_hot_warm_cold = [0, 0, 1, 0]
processed_col_d_warm = [1, 0, 0, 0]
expected_df = pd.DataFrame.from_dict(
{
"A": processed_col_a,
"B_cold": processed_col_b_cold,
"B_hot": processed_col_b_hot,
"B_warm": processed_col_b_warm,
"C_1": processed_col_c_1,
"C_5": processed_col_c_5,
"C_10": processed_col_c_10,
"D_()": processed_col_d_empty,
"D_('cold', 'cold')": processed_col_d_cold_cold,
"D_('hot', 'warm', 'cold')": processed_col_d_hot_warm_cold,
"D_('warm',)": processed_col_d_warm,
}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = ["blue", "yellow", None]
pred_col_b = ["cold", "warm", "other"]
pred_col_c = [10, 1, 20]
pred_col_d = [["cold", "cold"], [], ["other", "cold"]]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c, "D": pred_col_d}
)
pred_out_df = encoder.transform_batch(pred_in_df)
pred_processed_col_a = ["blue", "yellow", None]
pred_processed_col_b_cold = [1, 0, 0]
pred_processed_col_b_hot = [0, 0, 0]
pred_processed_col_b_warm = [0, 1, 0]
pred_processed_col_c_1 = [0, 1, 0]
pred_processed_col_c_5 = [0, 0, 0]
pred_processed_col_c_10 = [1, 0, 0]
processed_col_d_empty = [0, 1, 0]
processed_col_d_cold_cold = [1, 0, 0]
processed_col_d_hot_warm_cold = [0, 0, 0]
processed_col_d_warm = [0, 0, 0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B_cold": pred_processed_col_b_cold,
"B_hot": pred_processed_col_b_hot,
"B_warm": pred_processed_col_b_warm,
"C_1": pred_processed_col_c_1,
"C_5": pred_processed_col_c_5,
"C_10": pred_processed_col_c_10,
"D_()": processed_col_d_empty,
"D_('cold', 'cold')": processed_col_d_cold_cold,
"D_('hot', 'warm', 'cold')": processed_col_d_hot_warm_cold,
"D_('warm',)": processed_col_d_warm,
}
)
assert pred_out_df.equals(pred_expected_df)
# Test null behavior.
null_col = [1, None]
nonnull_col = [1, 1]
null_df = pd.DataFrame.from_dict({"A": null_col})
null_ds = ray.data.from_pandas(null_df)
nonnull_df = pd.DataFrame.from_dict({"A": nonnull_col})
nonnull_ds = ray.data.from_pandas(nonnull_df)
null_encoder = OneHotEncoder(["A"])
# Verify fit fails for null values.
with pytest.raises(ValueError):
null_encoder.fit(null_ds)
null_encoder.fit(nonnull_ds)
# Verify transform fails for null values.
with pytest.raises(ValueError):
null_encoder.transform(null_ds)
null_encoder.transform(nonnull_ds)
# Verify transform_batch fails for null values.
with pytest.raises(ValueError):
null_encoder.transform_batch(null_df)
null_encoder.transform_batch(nonnull_df)
def test_one_hot_encoder_with_limit():
"""Tests basic OneHotEncoder functionality with limit."""
col_a = ["red", "green", "blue", "red"]
col_b = ["warm", "cold", "hot", "cold"]
col_c = [1, 10, 5, 10]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
encoder = OneHotEncoder(["B", "C"], limit={"B": 2})
ds_out = encoder.fit_transform(ds)
assert len(ds_out.to_pandas().columns) == 1 + 2 + 3
def test_multi_hot_encoder():
"""Tests basic MultiHotEncoder functionality."""
col_a = ["red", "green", "blue", "red"]
col_b = ["warm", "cold", "hot", "cold"]
col_c = [1, 10, 5, 10]
col_d = [["warm"], [], ["hot", "warm", "cold"], ["cold", "cold"]]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c, "D": col_d})
ds = ray.data.from_pandas(in_df)
encoder = MultiHotEncoder(["B", "C", "D"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
encoder.transform(ds)
# Fit data.
encoder.fit(ds)
assert encoder.stats_ == {
"unique_values(B)": {"cold": 0, "hot": 1, "warm": 2},
"unique_values(C)": {1: 0, 5: 1, 10: 2},
"unique_values(D)": {"cold": 0, "hot": 1, "warm": 2},
}
# Transform data.
transformed = encoder.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]]
processed_col_c = [[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]
processed_col_d = [[0, 0, 1], [0, 0, 0], [1, 1, 1], [2, 0, 0]]
expected_df = pd.DataFrame.from_dict(
{
"A": processed_col_a,
"B": processed_col_b,
"C": processed_col_c,
"D": processed_col_d,
}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = ["blue", "yellow", None]
pred_col_b = ["cold", "warm", "other"]
pred_col_c = [10, 1, 20]
pred_col_d = [["cold", "warm"], [], ["other", "cold"]]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c, "D": pred_col_d}
)
pred_out_df = encoder.transform_batch(pred_in_df)
print(pred_out_df.to_string())
pred_processed_col_a = ["blue", "yellow", None]
pred_processed_col_b = [[1, 0, 0], [0, 0, 1], [0, 0, 0]]
pred_processed_col_c = [[0, 0, 1], [1, 0, 0], [0, 0, 0]]
pred_processed_col_d = [[1, 0, 1], [0, 0, 0], [1, 0, 0]]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
"D": pred_processed_col_d,
}
)
assert pred_out_df.equals(pred_expected_df)
# Test null behavior.
null_col = [1, None]
nonnull_col = [1, 1]
null_df = pd.DataFrame.from_dict({"A": null_col})
null_ds = ray.data.from_pandas(null_df)
nonnull_df = pd.DataFrame.from_dict({"A": nonnull_col})
nonnull_ds = ray.data.from_pandas(nonnull_df)
null_encoder = OneHotEncoder(["A"])
# Verify fit fails for null values.
with pytest.raises(ValueError):
null_encoder.fit(null_ds)
null_encoder.fit(nonnull_ds)
# Verify transform fails for null values.
with pytest.raises(ValueError):
null_encoder.transform(null_ds)
null_encoder.transform(nonnull_ds)
# Verify transform_batch fails for null values.
with pytest.raises(ValueError):
null_encoder.transform_batch(null_df)
null_encoder.transform_batch(nonnull_df)
def test_multi_hot_encoder_with_limit():
"""Tests basic MultiHotEncoder functionality with limit."""
col_a = ["red", "green", "blue", "red"]
col_b = ["warm", "cold", "hot", "cold"]
col_c = [1, 10, 5, 10]
col_d = [["warm"], [], ["hot", "warm", "cold"], ["cold", "cold"]]
in_df =
|
pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c, "D": col_d})
|
pandas.DataFrame.from_dict
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright [2020] [Indian Institute of Science, Bangalore]
SPDX-License-Identifier: Apache-2.0
"""
__name__ = "Instantiate a city and dump instantiations as json"
import os, sys
import json
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import time
#Data-processing Functions
from modules.processDemographics import *
from modules.processGeoData import *
# Functions to instantiate individuals to houses, schools, workplaces and community centres
from modules.assignHouses import *
from modules.assignSchools import *
from modules.assignWorkplaces import *
# get the city and target population as inputs
def instantiate(city, targetPopulation, averageStudents, averageWorkforce):
#create directory to store parsed data
path = "data/%s_population%s_students%s"%(city, targetPopulation, averageStudents)
if not os.path.exists(path):
os.mkdir(path)
targetPopulation = int(targetPopulation)
averageStudents = int(averageStudents)
averageWorkforce = float(averageWorkforce)
print("processing data ready ...")
start = time.time()
cityGeojson = "data/base/"+city+"/city.geojson"
cityGeoDF = parse_geospatial_data(cityGeojson)
if "cityProfile.json" in os.listdir("data/base/"+city):
cityProfile = "data/base/"+city+"/cityProfile.json"
ageDistribution, householdDistribution, schoolDistribution, householdSizes, maxWorkplaceDistance = process_city_profile(cityProfile)
demographicsData = pd.read_csv("data/base/"+city+"/demographics.csv")
housesData = pd.read_csv("data/base/"+city+"/households.csv")
employmentData = pd.read_csv("data/base/"+city+"/employment.csv")
print("processing data completed completed in ", time.time() - start)
print("getting parameters ready ...")
start = time.time()
demographicsData = process_data(demographicsData, housesData, employmentData, targetPopulation, ageDistribution)
totalPopulation = demographicsData['totalPopulation'].values.sum()
unemployed_fraction = demographicsData['unemployed'].values.sum() / (demographicsData['employed'].values.sum() + demographicsData['unemployed'].values.sum())
totalNumberOfWards = len(demographicsData['wardNo'].values)
averageHouseholds = totalPopulation / demographicsData['totalHouseholds'].values.sum()
commonArea = commonAreaLocation(cityGeoDF)
print("getting parameters ready completed in ", time.time() - start)
#assignment of individuals to households
print("instantiating individuals to households...")
start = time.time()
print("computed unemployment fraction = ", unemployed_fraction)
households, individuals = assign_individuals_to_houses(targetPopulation, totalNumberOfWards, ageDistribution, householdSizes, householdDistribution, unemployed_fraction)
print("instantiating individuals to households completed in ", time.time() - start)
print("instantiating individual location by house location...")
start = time.time()
households, individuals = houseLocation(cityGeoDF, individuals, households)
print("instantiating individual location by house location completed in ", time.time() - start)
individuals = individuals.sort_values("id")
individuals = individuals.drop_duplicates()
households = households.sort_values("id")
individuals.to_json(path+"/individuals.json", orient='records')
households[['id', 'wardNo' ,'lat', 'lon']].to_json(path+"/houses.json", orient='records')
individuals = individuals.drop_duplicates()
#split the individuals by workplace type
individuals = {name: individuals.loc[individuals['workplaceType'] == name, :] for name in individuals['workplaceType'].unique()}
print("instantiating individuals to workplaces...")
start = time.time()
workplaces, individuals[1] = assign_workplaces(cityGeoDF, individuals[1], maxWorkplaceDistance)
print("instantiating individuals to workplaces completed in ", time.time() - start)
print("instantiating individuals to schools...")
start = time.time()
individuals[2], schools = assign_schools(individuals[2], cityGeoDF, schoolDistribution,averageStudents)
print("instantiating individuals to schools completed in ", time.time() - start)
#join the individuals
individuals = pd.concat(individuals.values(), ignore_index=True)
print("additonal data processing...")
start = time.time()
#associate individuals to common areas (by distance)
def getDistances(row, cc):
houseNo = row['household']
lat1 = row['lat']
lon1 = row['lon']
# lat1 = households.loc[households['id']==houseNo, 'lat'].values[0]
# lon1 = households.loc[households['id']==houseNo, 'lon'].values[0]
assignedWard = row["wardNo"]
lat2 = cc.loc[cc['wardNo']==assignedWard, 'lat'].values[0]
lon2 = cc.loc[cc['wardNo']==assignedWard, 'lon'].values[0]
return distance(lat1, lon1, lat2, lon2)
#Combining the IDs for schools and workplaces
schoolID = schools['ID'].values[-1]
workplaceID = [schoolID+1 + index for index in workplaces['ID'].values]
workplaces['ID'] = workplaceID
individuals = individuals.sort_values("id")
households = households.sort_values("wardIndex")
workplaces = workplaces.sort_values("ID")
schools = schools.sort_values("ID")
commonArea = commonArea.sort_values("wardIndex")
individuals['CommunityCentreDistance'] = individuals.apply(getDistances, axis=1, args=(commonArea,))
demographicsData['fracPopulation'] = demographicsData.apply(lambda row: row['totalPopulation']/demographicsData['totalPopulation'].values.sum(), axis=1)
print("additonal data processing completed in ", time.time() - start)
flag = (len(np.where(pd.isnull(individuals.loc[individuals['workplaceType']<0.5,'school'])==False)[0])==0 and \
len(np.where(pd.isnull(individuals.loc[individuals['workplaceType']<0.5,'workplace'])==False)[0])==0 and \
len(np.where(pd.isnull(individuals.loc[individuals['workplaceType']>1.5,'workplace'])==False)[0])==0 and \
len(np.where(pd.isnull(individuals.loc[individuals['workplaceType']<1.5,'school'])==False)[0])==0 and \
len(np.where(pd.isnull(individuals.loc[individuals['workplaceType']>1.5,'school'])==True)[0])==0 and \
len(np.where(
|
pd.isnull(individuals.loc[individuals['workplaceType']==1,'workplace'])
|
pandas.isnull
|
"""
Methods to edit existing protocols to add:
- new traces
- reversal ramp (activating prepulse + deactivating ramp)
- interleaved pulse trains
"""
import pyabf
import numpy as np
import pandas as pd
import glob
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
plt.style.use("dark_background")
cmap = plt.cm.get_cmap("coolwarm")
# where output will be saved, if saving is enabled
out_dir = "./data/protocols/"
# whether to save output
save_output = False
def find_abf_epochs(fname):
data_dir = r"C:/Users/delbe/Downloads/wut/wut/Post_grad/UBC/Research/lab/data_files/delbert/"
fname += ".abf"
for root, dirs, files in os.walk(data_dir):
for name in files:
if fname in name:
path = os.path.join(root, name)
break
# open abf file
abf = pyabf.ABF(path)
# return epochs
p1s = []
levels = []
for i in abf.sweepList:
abf.setSweep(i)
p1s.append(abf.sweepEpochs.p1s)
levels.append(abf.sweepEpochs.levels)
return p1s, levels
class edit_existing_protocol():
def __init__(self, fname, out_name, csv_path=None):
if csv_path is None:
csv_path = r"C:/Users/delbe/Downloads/wut/wut/Post_grad/UBC/Research/lab/Github_repos/hcn-gating-kinetics/data/current_time_course/Pooled_2020/"
try:
df = pd.read_csv(csv_path + fname + ".csv", header=None, index_col=0)
# extract voltage command
N = int(df.shape[1]/2)
# check that there are equal number of current and voltage sweeps
if df.shape[1] != 2*N:
print(" Uneven number of current/voltage sweeps. Deleting last voltage sweep.")
df = df.iloc[:,:-1]
N = int(df.shape[1]/2)
# voltage protocol
self.df = df.iloc[:,N:]
except:
print(" %s not found in csv_path." % fname)
try:
# CellML csv export
df = pd.read_csv(fname, header=0, index_col=None)
df.index *= 1/2
self.df = df
except:
print(" Could not open `fname` as file.")
exit()
# sampling frequency
self.khz = int( 1/(df.index[1] - df.index[0]))
self.fname = fname
self.out_name = out_name
def create_reversal_ramp(self, Vact=-120, Tact=3000, Vramp=[-50, 10], Tramp=150, Tcap=10):
"""
Create steps for reversal ramp: maximally activating prepulse, followed by deactivating ramp.
`Vact` = prepulse voltage \\
`Tact` = duration of activation prepulse \\
`Vramp` = start and end voltages of deactivating ramp \\
`Tramp` = duration of reversal ramp
`Tcap` = short pulse of same voltage as `Vramp[0]` to cancel capacitive currents
Returns `ramp`, array containing prepulse and ramp command
"""
# duration of capacitive step
Tcap = 20
# slope of reversal ramp
dvdt = (Vramp[1] - Vramp[0])/Tramp
# times of reversal ramp
Tramp = np.arange(0, Tramp+Tcap, 1/self.khz)
# convert Tcap to samples
Tcap = int(Tcap * self.khz)
ramp = Tramp.copy()
ramp[:Tcap] = Vramp[0]
ramp[Tcap:] = [(dvdt*(t-Tramp[Tcap]) + Vramp[0]) for t in Tramp[Tcap:]]
return ramp
def create_leak_ramp(self, volts=[-35, 35], thalf=500, khz=2, add_MT_step=True):
"""
Create array of voltages for leak ramp
"""
if add_MT_step:
out = np.zeros((thalf*2*khz + 2000*khz,))
else:
out = np.zeros((thalf*2*khz,))
out[:1000*khz] = -35
ts = np.arange(0, thalf, 1/khz)
out[1000*khz:(thalf+1000)*khz] = (ts*(volts[1]-volts[0])/(thalf)) + volts[0]
if add_MT_step:
out[(thalf+1000)*khz:-1000*khz] = (ts*(volts[0]-volts[1])/thalf) + volts[1]
t = 1000
while t > 400:
out[-t*khz:-(t-200)*khz] = -35
out[-(t-200)*khz:-(t-400)*khz] = 20
t -= 400
out[-t*khz:] = -35
else:
out[(thalf+1000)*khz:] = (ts*(volts[0]-volts[1])/thalf) + volts[1]
return out
def add_leak_ramp(self):
leak_ramp = self.create_leak_ramp(khz=int(1/self.df.index[1]))
out = pd.DataFrame(np.array([leak_ramp,]*self.df.shape[1])).T
try:
out = pd.concat([out, self.df], axis=0, ignore_index=True).reset_index(drop=True)
except:
exit()
out.index *= 1/self.khz
plt.plot(out.index * 1e-3, out)
plt.show()
return out
def add_traces(self, N=2, addto=-1):
"""
Returns start and stop points for varying-level pulse, assuming two-step protocol.
`N` = number of traces to add \\
`addto` = adds to the end if `addto = -1` or to the beginning if `addto = 0`. New traces take duration of nth pulse, where `n = addto` if `addto` is 0 or -1.
"""
if addto in [0, -1]:
pass
else:
raise Exception(" `addto` must be one of 0 (add new traces to the beginning) or -1 (add new traces to the end).")
exit()
# get pulse durations and levels addto abf file
p1s, levels = find_abf_epochs(self.fname)
# find indices of varying-level pulses in each sweep
dL = [[j for j in range(len(L)) if (L[j] - levels[0][j]) != 0][0] for L in levels[1:]]
# print(" Indices of varying-level pulses in protocol. \n", dL)
if any(x != dL[0] for x in dL[1:]):
raise Exception(" Indices of varying-level pulses are not the same.")
exit()
# index of varying-level pulse
idx = dL[0]
# difference in level between varying-level pulses
dv = abs(levels[0][idx] - levels[1][idx])
# copy N columns addto self.df
new_sweeps = self.df.iloc[:,-N:].copy()
# add new traces addto end of protocol
if addto < 0:
# start and stop points of varying-level pulse
t0, t1, t2 = p1s[-1][idx:idx+3]
# initial voltage of new traces
# v0 = levels[-1][idx] + dv
v0 = levels[-1][idx]
else:
t0, t1, t2 = p1s[0][idx:idx+3]
v0 = levels[0][idx]
# add N sweeps of increasing voltage addto the end of the recording
for i in range(N):
new_sweeps.iloc[t0:t1+1,i] = v0
v0 += dv
# halve duration of deactivating pulse
# dt = int((t2 - t1)/2) + t1 + 1
# new_sweeps.iloc[dt:,i] = -35
# remove leak ramp
new_sweeps.iloc[t2:, i] = -35
# plt.plot(new_sweeps.iloc[:,i])
# plt.show()
# exit()
# concatenate new sweeps into self.df
if addto < 0:
self.df = pd.concat([self.df, new_sweeps], axis=1)
else:
self.df = pd.concat([new_sweeps, self.df], axis=1)
# remove post-deactivation ramps in all sweeps
for i in range(self.df.shape[1]-N):
t2 = p1s[i][idx+2]
self.df.iloc[t2+1:,i] = -35
def add_interleaved_train(self, numlist=[], period=4000, volt=0, dt=500, spacing=2):
"""
Interleave sweeps of `self.df` with sweep of same total duration containing trains of fixed-druation, fixed-voltage steps
`numlist` = list of indices after which train will be inserted; if empty, uses `spacing` to interleave trains instead \\
`period` = time between steps \\
`volt` = level of steps \\
`dt` = duration of steps \\
`spacing` = how to add sweeps, e.g. between every nth
"""
period = int(period * self.khz)
dt = int(dt * self.khz)
# number of sweeps
N = int(self.df.shape[1]/spacing)
# don't add trains after final test sweep
if N*spacing > self.df.shape[1]:
N -= 1
# create trains
train = np.ones((self.df.shape[0], 1)) * -35
# padding between start and end of sweep
pad = int(5000 * self.khz)
def create_train(v):
# onset of first step
t0 = pad
while t0 < (len(train) - pad):
train[t0:t0+dt+1] = v
t0 += period + 1
if not isinstance(volt, list):
create_train(volt)
# count number of added trains
t = 0
if len(numlist) > 0:
for (i, n) in enumerate(numlist):
if isinstance(volt, list):
create_train(volt[i])
self.df.insert(loc=n+t, column="Train%d" % t, value=train)
t += 1
else:
for i in range(1, self.df.shape[1], spacing):
if isinstance(volt, list):
create_train(volt[i-1])
self.df.insert(loc=i+int(), column="Train%d" % t, value=train)
t += 1
def add_reversal_ramp(self, traces=[], Vact=-115, Tact=2000,
Vramp=[-50, 20], Tramp=140, SS=15000,
ramp_spacing = -1,
save_output=save_output, add_new=False):
"""
Add reversal ramp to voltage protocol given by `df`
`traces` = indices of sweeps to add reversal ramp to. If empty, added to all sweeps, if possible, i.e. if enough time for both original protocol and satisfying sweep-to-sweep interval `SS`. \\
`Vact` = activation potential \\
`Tact` = duration of activation step \\
`Vramp` = start and end voltages of reversal ramp \\
`Tramp` = duration of reversal ramp. \\
`SS` = sweep-to-sweep interval \\
`ramp_spacing` = non-zero; every `nth` sweep to add reversal ramp to.
If 1, adds to the first only.
If -1, adds to the first and last.
If -2, adds to the last only.
Else, adds every nth.
NOTE: A capacitive pulse of 10ms at Vramp[0] is added before the reversal ramp to account for capacitive currents.
"""
# minimum number of samples to add reversal ramp to the end of the protocol
total = int((Tact + Tramp*1.1 + SS)*self.khz)
# create reversal ramp
ramp = self.create_reversal_ramp(Vact=Vact, Tact=Tact, Vramp=Vramp, Tramp=Tramp)
Tact = int(Tact * self.khz)
Tramp = ramp.shape[0]
# indices of sweeps to add reversal ramp to
if ramp_spacing == 0:
raise Exception(" `ramp_spacing` must be non-zero.")
exit()
else:
if ramp_spacing > 1:
to_add = list(range(0, self.df.shape[1], ramp_spacing))
elif ramp_spacing == 1:
to_add = [0]
elif ramp_spacing == -1:
to_add = [0, self.df.shape[1]-1]
elif ramp_spacing == -2:
to_add = [self.df.shape[1]-1]
if self.df.shape[1]-1 in to_add:
pass
else:
to_add.append(self.df.shape[1]-1)
if add_new:
out = np.zeros((Tact+Tramp+2000*self.khz, ))
out[:Tact] = Vact
out[Tact:-2000*self.khz] = ramp
out[-2000*self.khz:] = -35
out = pd.DataFrame(out)
print(out.shape)
self.df =
|
pd.concat([self.df, out], axis=0)
|
pandas.concat
|
#%%
import pytest
import json
from pathlib import Path
import numpy as np
import pandas as pd
from PIL import Image
from PIL import ImageChops
from raymon import types as rt
"""Basic tests"""
def test_laod_abc():
with pytest.raises(TypeError):
rdf = rt.RaymonDataType()
"""Image tests"""
def test_image_invalid():
with pytest.raises(ValueError):
rdf = rt.DataFrame(data=None)
def test_image_lossless():
img = Image.open(Path(__file__).parents[0] / "Lenna.png")
rimg = rt.Image(img, lossless=True)
jcr = rimg.to_jcr()
rimg_rest = rt.load_jcr(jcr)
diff = ImageChops.difference(rimg.data, rimg_rest.data)
assert not diff.getbbox()
def test_image_lossy():
img = Image.open(Path(__file__).parents[0] / "Lenna.png")
rimg = rt.Image(img)
jcr = rimg.to_jcr()
rimg_rest = rt.load_jcr(jcr)
diff = ImageChops.difference(rimg.data, rimg_rest.data)
# This may have differences
assert diff.getbbox()
"""Numpy tests"""
def test_np_invalid():
# tmp_file = tmp_path / 'df.json'
arr = [1, 2, 3, 4]
with pytest.raises(ValueError):
rdf = rt.Numpy(data=arr)
def test_np_save_load():
# tmp_file = tmp_path / 'df.json'
arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [10, 20, 30, 40]])
wrapped = rt.Numpy(data=arr)
wrapped_jcr = wrapped.to_jcr()
wrapped_restored = rt.load_jcr(wrapped_jcr)
assert (wrapped.data == wrapped_restored.data).all().all()
"""Pandas tests"""
def test_series_invalid():
# tmp_file = tmp_path / 'df.json'
df = np.array([1, 2, 3, 4])
with pytest.raises(ValueError):
rdf = rt.Series(data=df)
def test_series_save_load():
# tmp_file = tmp_path / 'df.json'
series = pd.Series([1, 2, 3, 4])
rseries = rt.Series(data=series)
series_jcr = rseries.to_jcr()
series_restored = rt.load_jcr(series_jcr)
assert (rseries.data.values == series_restored.data.values).all()
def test_df_invalid():
# tmp_file = tmp_path / 'df.json'
df =
|
pd.Series([1, 2, 3, 4])
|
pandas.Series
|
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/maggot_models/')
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
except:
pass
from pymaid_creds import url, name, password, token
import pymaid
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import numpy.random as random
import networkx as nx
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
#rm = pymaid.CatmaidInstance(url, token, name, password)
# load previously generated paths
all_edges_combined =
|
pd.read_csv('interhemisphere/csv/all_paired_edges.csv', index_col=0)
|
pandas.read_csv
|
# Author: <NAME>
# Module: Siamese LSTM with Fully Connected Layers
# Competition : Quora question pairs
#packages required
import os
import re
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,f1_score,confusion_matrix
import gensim
import nltk
import tensorflow as tf
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.layers import Input, Embedding, LSTM, Merge,Bidirectional
import keras.backend as K
from keras.optimizers import Adadelta
from keras.callbacks import ModelCheckpoint
from keras.layers import LeakyReLU,Dense,Dropout,Lambda
from keras import metrics
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate,Concatenate
#Download these if nltk doesn't have
nltk.download('punkt')
nltk.download('stopwords')
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
stopword = stopwords.words('english')
#Train data
train_data = pd.read_csv('train.csv')
print(len(train_data))
train_data.drop(["qid1","qid2","id"],inplace=True,axis=1)
train_labels = train_data["is_duplicate"].astype(int)
train_labels = train_labels.as_matrix()
print(train_labels[0:2])
# Load Google pre trained vectors:
# Mention the correct path to your bin/txt file
google_w2v_model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True)
# This is the stanford Glove Model download 'glove.42B.300d.zip',Zip it and keep in your directory. Each vector is 300 dimension
def load_glove_model():
glove_model = {}
with open('glove.42B.300d.txt','r') as f: # Path to your Glove File
for line in f.readlines():
splitline = line.split()
word = splitline[0]
embedding = np.array([float(val) for val in splitline[1:]])
glove_model[word] = embedding
return glove_model
#This function loads the glove model
glove_w2v_model = load_glove_model()
# Preprocess the data using this function.
# It return list of tokens after preprocessing
def preprocess(text):
text = re.sub(r"it\'s","it is",str(text))
text = re.sub(r"i\'d","i would",str(text))
text = re.sub(r"don\'t","do not",str(text))
text = re.sub(r"he\'s","he is",str(text))
text = re.sub(r"there\'s","there is",str(text))
text = re.sub(r"that\'s","that is",str(text))
text = re.sub(r"can\'t", "can not", text)
text = re.sub(r"cannot", "can not ", text)
text = re.sub(r"what\'s", "what is", text)
text = re.sub(r"What\'s", "what is", text)
text = re.sub(r"\'ve ", " have ", text)
text = re.sub(r"n\'t", " not ", text)
text = re.sub(r"i\'m", "i am ", text)
text = re.sub(r"I\'m", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\'s"," is",text)
text = re.sub(r"[0-9]"," ",str(text))
sents = word_tokenize(text)
return sents
# Divide the training data into Train set and Validation set
# Train data --> 98%
# Validation data --> 2%
X_train,X_val,y_train,y_val = train_test_split(train_data,train_labels,test_size=0.02,random_state=0)
# Since the split is done on pandas dataframe the indices need to be reset to begin from 0
# This function resets the indices
def resetindex(data):
data.reset_index(drop=True,inplace=True)
return data
X_train = resetindex(X_train) # Reset the train set indices
X_val = resetindex(X_val) # Reset the validation set indices
train_length = len(X_train)
val_length = len(X_val)
max_sentence_length = 20 # Maximum number of words per sentence to be considered
embedding_dim = 300 # Each word is converted to 300 dimensional vector
train_ques1 = np.zeros((train_length,max_sentence_length,embedding_dim)) # Vectors of question1 in train set
train_ques2 = np.zeros((train_length,max_sentence_length,embedding_dim)) # Vectors of question2 in train set
val_ques1 = np.zeros((val_length,max_sentence_length,embedding_dim)) # Vectors of question1 in validation set
val_ques2 = np.zeros((val_length,max_sentence_length,embedding_dim)) # Vectors of question2 in validation set
# This function is to add padding to sentences if the sentence length is less than max_sentence_length
# There are 2 types of intializations for words not in both the word vector models
# 1. Intialize with some random numbers
# 2. Intialize with zeros
# I have intizlized to zeros because it gave better results than random intialization.
# You can uncomment if you want to intialize randomly
# You can also intialize using normal distribution -----> np.random.normal(0,1,embedding_dim)
def pad_sentences(start,vectors):
for i in range(start,max_sentence_length):
vectors[i,:] = np.random.rand(embedding_dim)
# This function checks if word is in both pretrained models
# If word exits then it is added and count is increased
# If word doesn't exists it intialized to zero if the count is less than max_sentence_length
# You can also use Embedding layer of keras
# For that you have to create a dictionary of words that doesn't exist in Word2vec models
# Then you have to create indices for Out of Vocabulary words
# Also the question number has to bes stored along with words to map corresponding vectors to that question
# This method is easier.
def convert_to_vectors(sentence):
sents = preprocess(sentence)
vectors = np.zeros((max_sentence_length,embedding_dim))
count = 0
for sent in sents:
if sent not in stopword: # Check if word is not a stopword
if sent in glove_w2vmodel: # Check if word is in glove model
vectors[count,:] = glove_w2vmodel[sent]
count+=1
elif sent in google_w2v_model: # Check if word is in google word2vec pretrained model
vectors[count,:] = google_w2v_model[sent]
count+=1
if(count==max_sentence_length): # If count of words equals max_sentence_length return vectors
return vectors
if(count<max_sentence_length):
pad_sentences(count,vectors)
return vectors
def generate_train_vectors():
for i in range(train_length):
train_ques1[i,:,:] = convert_to_vectors(X_train["question1"][i])
train_ques2[i,:,:] = convert_to_vectors(X_train["question2"][i])
# Generate vectors for Train set
generate_train_vectors()
def generate_validation_vectors():
for i in range(val_length):
val_ques1[i,:,:] = convert_to_vectors(X_val["question1"][i])
val_ques2[i,:,:] = convert_to_vectors(X_val["question2"][i])
# Generate vectors for validation set
generate_validation_vectors()
# Siamese LSTM Model with 3 Fully Connected Layers
def generate_model(n_hidden1,n_hidden2):
left_input = Input(shape=(max_sentence_length,embedding_dim))
right_input = Input(shape=(max_sentence_length,embedding_dim))
lstm1 = LSTM(n_hidden1,return_sequences=True)
lstm2 = LSTM(n_hidden2,return_sequences=False)
lstm1_ques1 = lstm1(left_input)
lstm1_ques2 = lstm1(right_input)
lstm2_ques1 = lstm2(lstm1_ques1)
lstm2_ques2 = lstm2(lstm1_ques2)
# Concatenates the outputs from 2 identical networks
combined_output = Concatenate([lstm2_ques1,lstm2_ques2])
# Fully connected Dense Layer
dense1 = Dense(200,activation='relu')(combined_output)
dense1_batch = BatchNormalization()(dense1)
dense1_out = Dropout(0.2)(dense1_batch)
dense2 = Dense(64,activation='relu')(dense1_out)
dense2_batch = BatchNormalization()(dense2)
dense2_out = Dropout(0.2)(dense2_batch)
dense3 = Dense(8,activation='relu')(dense2_out)
pred = Dense(1,activation='sigmoid')(dense3)
model = Model(inputs=[left_input,right_input],outputs=[pred])
# Mean squared loss is used to measure loss
model.compile(optimizer='rmsprop',loss='mean_squared_error',metrics=['accuracy'])
return model
# hidden sizes of LSTM
n_hidden1 = 64
n_hidden2 = 16
model = generate_model(n_hidden1,n_hidden2)
model.fit([train_ques1,train_ques2],y_train,validation_data=([val_ques1,val_ques2],y_val),batch_size=256,epochs=25)
model.save('siamese-dense-lstm.h5')
test_data = pd.read_csv('test.csv')
test_length = len(test_data)
print(test_length)
print(test_data.head(3))
test_data.drop(["test_id",],axis=1,inplace=True)
test_ques1 = np.zeros((test_length,max_sentence_length,embedding_dim))
test_ques2 = np.zeros((test_length,max_sentence_length,embedding_dim))
def generate_test_vectors():
for i in range(test_length):
test_ques1[i,:,:] = convert_to_vectors(test_data["question1"][i])
test_ques2[i,:,:] = convert_to_vectors(test_data["question2"][i])
generate_test_vectors()
pred = model.predict([test_ques1,test_ques2],batch_size=4096)
predictions = np.zeros(test_length,dtype='int32')
for i in range(test_length):
if(pred[i]>=0.5):
predictions[i] = int(1)
print(len(predictions))
test =
|
pd.DataFrame({'is_duplicate':predictions})
|
pandas.DataFrame
|
import math
import pytest
import pandas as pd
import numpy as np
from primrose.transformers.impute import ColumnSpecificImpute
def test_fit():
d = {
"col1": [1, 2, 3],
"col2": [4, 6, 11.2],
"col3": [1, 1, 1],
"col4": [3, 2, 3],
"col5": [1, 2, 3],
"col6": [1, 2, 3],
"col7": [None, None, None],
"col8": np.nan,
}
df = pd.DataFrame(data=d)
imputer = ColumnSpecificImpute(
columns_to_zero=["col1"],
columns_to_mean=["col2"],
columns_to_median=["col3"],
columns_to_mode=["col4", "col7", "col8"],
columns_to_infinity=["col5"],
columns_to_neg_infinity=["col6"],
)
imputer.fit(df)
encoder = imputer.encoder
assert encoder["col1"] == 0
assert math.isclose(encoder["col2"], 21.2 / 3, abs_tol=0.001)
assert encoder["col3"] == 1
assert encoder["col4"] == 3
assert math.isclose(encoder["col5"], 999999999, abs_tol=0.001)
assert math.isclose(encoder["col6"], -999999999, abs_tol=0.001)
assert encoder["col7"] == 0
assert encoder["col8"] == 0
def test_fit2():
d = {
"col1": [1, 2, 3],
"col2": [4, 6, 11.2],
"col3": [1, 1, 1],
"col4": [3, 2, 3],
"col5": [1, 2, 3],
"col6": [1, 2, 3],
}
df = pd.DataFrame(data=d)
imputer = ColumnSpecificImpute(
columns_to_zero=["col1"],
columns_to_mean=["col2", "col1"],
columns_to_median=["col3"],
columns_to_mode=["col4"],
columns_to_infinity=["col5"],
columns_to_neg_infinity=["col6"],
)
with pytest.raises(Exception) as e:
imputer.fit(df)
assert "There are columns in multiple lists {'col1'}" in str(e)
def test_fit3():
d = {
"col1": [1, 2, 3],
"col2": [4, 6, 11.2],
"col3": [1, 1, 1],
"col4": [3, 2, 3],
"col5": [1, 2, 3],
"col6": [1, 2, 3],
}
df =
|
pd.DataFrame(data=d)
|
pandas.DataFrame
|
# Import libraries
import json
import matplotlib.pyplot as plt, numpy as np, pandas as pd
from sklearn.neighbors import radius_neighbors_graph
from scipy.sparse.csgraph import connected_components
# Contact spacing
dist1 = 0.2
dist2 = 0.5
# Get connected components and distances
data = pd.read_csv("output/dispcont.csv", names=["x", "y", "w", "one"], usecols=["x", "y"])
data["cc"] = connected_components(radius_neighbors_graph(data.values, 0.06))[1]
ccs = data.groupby("cc").count().reset_index().sort_values("x")[-4:]["cc"].values
data["dist"] = np.sqrt(data["x"]**2 + data["y"]**2)
# Get points
refpts = []
pts = []
for cc in ccs:
# Filter to connected component
ccdata = data.loc[data["cc"] == cc]
# Reference point that is closest to center
refpt = ccdata.loc[ccdata["dist"].idxmin()]
ccdata["refdist"] = np.sqrt((ccdata["x"]-refpt["x"])**2 + (ccdata["y"]-refpt["y"])**2)
# Get first two points
closest_to_target_dist = ccdata.iloc[(ccdata["refdist"]-dist1).abs().argsort()]
pt1 = closest_to_target_dist.iloc[0]
for i, pt2 in closest_to_target_dist.iterrows():
if np.abs((pt2["x"]-pt1["x"])**2 + (pt2["y"]-pt1["y"])**2) > dist1/3:
break
# Get second two points
closest_to_target_dist = ccdata.iloc[(ccdata["refdist"]-dist2).abs().argsort()]
pt3 = closest_to_target_dist.iloc[0]
for i, pt4 in closest_to_target_dist.iterrows():
if np.abs((pt4["x"]-pt3["x"])**2 + (pt4["y"]-pt3["y"])**2) > dist2/3:
break
# Add points to list
refpts.append(refpt)
pts += [pt1, pt2, pt3, pt4]
# Convert to DataFrame
pts =
|
pd.DataFrame(pts)
|
pandas.DataFrame
|
'''💹💰💷💶💴💵💸🤖👩💻🧑💻👨💻📉📈📊📰'''
import os
import time
import pytz
import json
import logging
import requests
import datetime
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import streamlit as st
from streamlit import caching
import matplotlib.pyplot as plt
from configparser import ConfigParser
from requests.exceptions import ConnectionError
from src.db import DataBase
from src.utils_stocks import get_curr_price
from src.utils_general import get_yahoo_link
from src.utils_general import get_google_link
from src.utils_general import suppress_stdout
logging.getLogger().setLevel(logging.CRITICAL)
# directories
DIR_DB = os.path.join(os.getcwd(), 'data', 'db')
DIR_DEMO = os.path.join(os.getcwd(), 'data', 'demo')
F_CFG = os.path.join(os.getcwd(), 'config.ini')
# constants and objects
cfg = ConfigParser()
cfg.read(F_CFG)
CFG_SECTION = 'deploy_webapp'
IS_DEMO = cfg.getint(CFG_SECTION, 'IS_DEMO')
F_DEMO_DF_C = os.path.join(DIR_DEMO, 'df_c.parquet')
F_DEMO_DF_PROBA_SM = os.path.join(DIR_DEMO, 'df_proba_sm.parquet')
DATI_OLD = '19930417_0000'
if IS_DEMO:
db = DataBase([], dir_db=DIR_DEMO)
else:
db = DataBase([], dir_db=DIR_DB)
# system strings
ERROR_EXCEPTION = '{} - {}'
ERROR_CONNECTION = 'Connection error! Try again in a few seconds.'
TEXT_PAGE_TITLE = 'Five Minute Midas'
TEXT_TITLE = '''# Five Minute Midas 📈
### Predicting profitable day trading positions for *{}*.
---'''
TEXT_ADVICE = '\n ### Try changing the **Profit Probability.**'
TEXT_SYMBOLS_FOUND = '### {} of {} symbols selected.{}\n---'
TEXT_FIG = '''## {} - {} {}
#### {} - {}
{}
'''
TEXT_FIG_MULTI = '## All Symbols Summary'
TEXT_LINKS = '''[G-News]({}), [Y-Finance]({})'''
TEXT_BUTTON1 = 'Refresh Cache'
TEXT_BUTTON3 = 'or Show All'
TEXT_EXPLAIN = 'Explain'
TEXT_STR_EXPLAIN_1 = 'Latest price: ${}, {} from day before'
TEXT_STR_EXPLAIN_2 = '- At {}, there was {}% chance of profit. Actual profit: {}%'
TEXT_STR_EXPLAIN_3 = '''Price Chart
- Red Line - Volume Weighted Average Price (VWAP)
- Red Point - Bullish RSI Div, current profit *negative*
- Green Point - Bullish RSI Div, current profit *positive*'''
TEXT_STR_EXPLAIN_4 = '''RSI Chart (14 Periods)
- Orange Line - *Overbought* Indicator
- Green Line - *Oversold* Indicator'''
TEXT_DESCRIPTION = 'Company Description'
TEXT_SELECTBOX = '' #'Symbol - Industry - Profit Probability (Latest)'
TEXT_SELECT_DEFAULT = 'Choose a Symbol...'
TEXT_SLIDER1 = 'Profit Probability (Latest)'
TEXT_SLIDER2 = 'Historical Prediction Range'
TEXT_SIDEBAR_HEADER = '### Advanced Settings'
TEXT_SIDEBAR_INPUT1 = 'Add Symbols (e.g. BYND IBM)'
TEXT_SIDEBAR_INPUT2 = 'Remove Symbols (e.g. SPOT BA)'
TEXT_SIDEBAR_INPUT3 = 'Current Positions (e.g. TSLA 630)'
TEXT_SIDEBAR_INPUT4 = 'Simulate Time Cutoff (e.g. 0945)'
TEXT_SIDEBAR_RADIO = 'Sort By'
TEXT_SIDEBAR_BUTTON = 'Show Current Profits'
TEXT_SIDEBAR_WARN_DEMO = 'Feature disabled for demo.'
TEXT_SIDEBAR_ERROR = 'Empty or invalid input.'
TEXT_SIDEBAR_INFO = '''### Information
- 💻 See code: [GitHub](https://github.com/MichaelOw/five-minute-midas)
- 🤖 Developer: [Michael](https://www.linkedin.com/in/michael-ow/)
- 📰 Read article: [Medium](https://michael-ow.medium.com/how-i-used-a-random-forest-classifier-to-day-trade-for-2-months-part-i-9c00d96d254c)
'''
dt_sort_params = {
'Profit Probability (Latest)':'proba_last',
'Profit Probability (Max)':'proba_max',
'Prediction Time (Latest)':'datetime_last',
'Symbol':'sym',
}
@st.cache()
def get_predictions_summary():
'''Makes API call to get and return dataframe containing predictions summary
Returns:
df (pandas.Dataframe)
'''
if IS_DEMO:
df_proba_sm_demo = pd.read_parquet(F_DEMO_DF_PROBA_SM)
return df_proba_sm_demo
# api call to get df_proba
url = 'http://localhost:5000/df_proba_sm'
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
r = requests.post(url, data='', headers=headers)
data = json.loads(r.text)
df = pd.DataFrame(**data)
for col in ['datetime_last', 'datetime_update']:
df[col] = pd.to_datetime(df[col]).dt.round('min')
return df
@st.cache()
def get_predictions(ls_sym, time_str):
'''Makes API call to get and return dataframe containing all prices, indicators
and predictions; Based on input list of symbol and time cutoff
Args:
ls_sym (List of str)
time_str (str)
Returns:
df_c (pandas.Dataframe)
'''
if IS_DEMO:
df_c = pd.read_parquet(F_DEMO_DF_C)
index = (df_c['sym'].isin(ls_sym))&(df_c['datetime'].dt.strftime('%H%M')<=time_str)
df_c = df_c[index]
return df_c
dt_sym = {'ls_sym':ls_sym, 'time_str':time_str}
url = 'http://localhost:5000/df_c'
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
r = requests.post(url, json=json.dumps(dt_sym), headers=headers)
data = json.loads(r.text)
df_c = pd.DataFrame(**data)
df_c['datetime'] = pd.to_datetime(df_c['datetime'], unit='ms')
return df_c
def get_df_sym(ls_sym, db):
'''Returns dataframe of symbols with additional
information, based on input list of symbols
Args:
ls_sym (List of str)
db (Database object)
Returns:
df_sym (pandas.Dataframe)
'''
if len(ls_sym)==1: ls_sym = ls_sym + ls_sym #tuples with one element have incompatible trailing comma
q='''
SELECT sym, long_name, sec, ind, summary
FROM stocks
WHERE sym IN {}
'''.format(tuple(ls_sym))
df_sym =
|
pd.read_sql(q, db.conn)
|
pandas.read_sql
|
# Author: <NAME>
# License: MIT
from typing import Any
import pandas as pd
import numpy as np
def xor():
pass
def _xor_process(data, key) -> Any:
'''
This method is a 2-way processor
'''
data_len = len(data)
for c in range(data_len):
data = (data[:c] +chr(ord(data[c]) ^ ord(key)) + data[c+1:])
return data
def encrypt_data(key, path: str=None, dataframe: pd.DataFrame=None) -> pd.DataFrame:
'''
This function encrypts the provided dataset.
Example
-------
>>> from encrypy.or import xor
>>> encoded_data = base.encrypt_data(path, dataframe)
path: str, default = None
Path parameter should hold the path to the dataset.
or
dataframe: pd.DataFrame, default = None
An initialized pandas DataFrame can be passed as an argument.
Returns:
DataFrame Object
'''
encrypt_lst = []
encrypt_arr = []
if not path==None:
bin = pd.read_csv(path)
bin = bin.astype(str)
for r in range(len(bin)):
row = bin.loc[r]
for k, v in enumerate(row.T):
code = _xor_process(v, key)
encrypt_lst.append(code)
if len(encrypt_lst) == len(bin.columns):
store = {
(int(n_row) for n_row in range(k)): np.array([a for a in encrypt_lst])
}
encrypt_arr.append(pd.DataFrame(store).T)
encrypt_lst = []
encoded_arr_np = np.concatenate(encrypt_arr)
return pd.DataFrame(encoded_arr_np, columns=bin.columns)
elif not dataframe.empty :
bin =
|
pd.DataFrame(dataframe)
|
pandas.DataFrame
|
import glob
import json
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from scipy.stats import f_oneway
from scipy.stats import ttest_ind
from utils.args_parser import mkdir
from utils.constants import Cte
class ResultsManager:
def __init__(self, root_dir, objective_mmd=False):
self.root_dir = root_dir
self.save_dir = f"images/{root_dir.replace(os.sep, '_')}"
mkdir(f"images/{root_dir.replace(os.sep, '_')}")
df_all = create_df_results(root_dir, add_time=True)
df_all = df_all.rename(columns={c: c.replace('/', '_') for c in df_all.columns})
df_all['model_params_num_hidden_dec'] = df_all['model_params_h_dim_list_dec'].apply(get_number_hidden_layers)
df_all['model_params_num_hidden_enc'] = df_all['model_params_h_dim_list_enc'].apply(get_number_hidden_layers)
df_all['model_params_h_dim_list_enc'] = df_all['model_params_h_dim_list_enc'].apply(process_list_params)
df_all['model_params_h_dim_list_dec'] = df_all['model_params_h_dim_list_dec'].apply(process_list_params)
df_all['model_params_h_dim_list'] = 'dec_' + df_all['model_params_h_dim_list_dec'] + '_enc_' + df_all[
'model_params_h_dim_list_enc']
df_all.rename(columns={"model_name": "Model",
"dataset_name": "Dataset",
"dataset_params_equations_type": "SEM"}, inplace=True)
print(f"Number of experiments: {len(df_all)}")
print(f"Datasets: {df_all['Dataset'].unique()}")
print(f"Models: {df_all['Model'].unique()}")
print(f"Architectures: {df_all['model_params_architecture'].unique()}")
columns_list = list(df_all.columns)
self.columns_list = columns_list
df_all['Model'] = df_all['Model'].replace({'mcvae': 'MultiCVAE',
'vcause_piwae': 'VACA',
'vcause': 'VACA',
'carefl': 'CAREFL'}
)
# metrics_dict = {'IWAE 100': ['test_iwae_100'],
# 'MMD Obs.': ['test_observation_mmd1'],
# 'MMD Inter.': get_elements(columns_list, ['test', 'mmd', 'inter', 'children'], ['mmd1_lb']),
# 'MeanE.': get_elements(columns_list, ['test', 'mse_mean', '_inter_', 'children']),
# 'StdE.': get_elements(columns_list, ['test', 'mse_std', 'inter', 'children']),
# 'MSE CF': get_elements(columns_list, ['test', '_cf_', 'x_mse', 'children', 'noise'], ['std']),
# 'SSE CF': get_elements(columns_list, ['test', '_cf_', 'x_mse_std', 'children', 'noise']),
# 'MRE CF N': get_elements(columns_list, ['test', '_cf_', 'x_mse', 'children'], ['std', 'noise']),
# 'SDRE CF N': get_elements(columns_list, ['test', '_cf_', 'x_mse_std', 'children'], ['noise'])
# }
metrics_dict = {'IWAE 100': ['test_iwae_100'],
'MMD Obs.': ['test_observation_mmd1'],
'MMD Inter.': get_elements(columns_list, ['test', 'mmd', 'inter', 'children'], ['mmd1_lb']),
'MeanE.': get_elements(columns_list, ['test', 'mse_mean', '_inter_', 'children']),
'StdE.': get_elements(columns_list, ['test', 'mse_std', 'inter', 'children']),
'MSE CF': get_elements(columns_list, ['test', '_cf_', 'x_mse', 'children', 'noise'],
['std', 'x1', 'age']),
'SSE CF': get_elements(columns_list,
['test', '_cf_', 'x_mse_std', 'children', 'noise', 'x1', 'age']),
'MRE CF N': get_elements(columns_list, ['test', '_cf_', 'x_mse', 'children'],
['std', 'noise', 'x1', 'age']),
'SDRE CF N': get_elements(columns_list, ['test', '_cf_', 'x_mse_std', 'children'],
['noise', 'x1', 'age'])
}
self.metrics_dict = metrics_dict
for key, values in metrics_dict.items():
if key in ['test_iwae_100', 'test_observation_mmd1']:
assert len(values) == 1
df_all[key] = df_all[values[0]]
continue
print(key)
print_cols(values)
df_all[key] = df_all[values].mean(1)
self.df = df_all
self.df_best = None
# Hyperparameters cross-validated
self.cv_dict = {'CAREFL': ['model_params_n_layers',
'model_params_n_hidden'],
'VACA': ['dataset_params_likelihood_names',
'model_params_h_dim_list',
'model_params_dropout_adj_pa_rate',
'model_params_dropout_adj_pa_prob_keep_self',
'model_params_residual'
],
'MultiCVAE': ['model_params_z_dim',
'dataset_params_lambda_',
'model_params_h_dim_list',
'model_params_drop_rate',
]}
# Objective metrics for each model
if not objective_mmd:
self.objective_metric = {'CAREFL': 'valid_log_px',
'VACA': 'valid_iwae_100',
'MultiCVAE': 'valid_iwae_100'}
else:
self.objective_metric = {'CAREFL': 'MMD Obs.',
'VACA': 'MMD Obs.',
'MultiCVAE': 'MMD Obs.'}
# Minimun number of hidden layers in the decoder (model_params_num_hidden_dec) per dataset
self.min_h_layers = {Cte.TRIANGLE: 1,
Cte.CHAIN: 1,
Cte.LOAN: 2,
Cte.COLLIDER: 0,
Cte.MGRAPH: 0,
Cte.ADULT: 2
}
self.dataset_name_order = ['collider', 'mgraph', 'triangle', 'chain', 'loan', 'adult']
self.sem_name_order = ['linear', 'non-linear', 'non-additive']
self.model_name_order = ['MultiCVAE', 'CAREFL', 'VACA']
def filter_valid_configurations(self, df):
cond = df['Model'] != 'VACA'
for dataset, min_h_layers in self.min_h_layers.items():
cond_i = (df.model_params_num_hidden_dec >= min_h_layers) & (df.Dataset == dataset)
cond = cond | cond_i
return df[cond]
def load_df_best(self, safe=0, dim_z=4):
'''
we need dimension z to remove those experiments that we use for the experiments on cross validating dim(z)
'''
print('\n\nComputing best configurations for each model and SEM:')
cols = ['Model', 'Dataset', 'SEM', 'json_filename', 'num_parameters']
cols.extend(get_elements(self.columns_list, ['dataset_params']))
cols.extend(get_elements(self.columns_list, ['model_params']))
metrics_cols = list(set(list(self.objective_metric.values())))
cols.extend(metrics_cols)
cols.extend(list(self.metrics_dict.keys()))
cols = list(set(cols))
df = self.df.copy()[cols]
df = self.filter_valid_configurations(df)
best_models_file = os.path.join(self.save_dir, 'best_models.txt')
best_models_list = []
for dataset_name, df_dataset in df.groupby('Dataset'):
for m_name, df_m in df_dataset.groupby('Model'):
print('--------')
if m_name == 'VACA':
df_m = df_m[df_m.model_params_z_dim == dim_z]
for d_name, df_md in df_m.groupby('SEM'):
print(f'{dataset_name} : {m_name} : {d_name}')
with open(best_models_file, 'a') as f:
f.write(f'{dataset_name} : {m_name} : {d_name}\n')
df_md_g = df_md.groupby(self.cv_dict[m_name], dropna=False).agg(['mean', 'std', 'count'])[
self.objective_metric[m_name]]
if safe > 0:
for best_config, df_best_config in df_md_g.sort_values(
by='mean').iterrows():
print(f"len: {df_best_config['count']}")
if df_best_config['count'] >= (safe - 1):
break
else:
best_config = df_md_g['mean'].idxmax()
df_best_md = df_md.copy()
for k, v in zip(self.cv_dict[m_name], best_config):
with open(best_models_file, 'a') as f:
f.write(f'\t{k}: {v}\n')
print(f'\t{k}: {v}')
df_best_md = df_best_md[df_best_md[k] == v]
print(f"Num of entries: {len(df_best_md)}")
with open(best_models_file, 'a') as f:
best = df_best_md.loc[df_best_md[self.objective_metric[m_name]].idxmax()]
f.write(f"\t{best['json_filename']}\n")
f.write(f"\tnum_parameters: {best['num_parameters']}\n")
print(df_best_md.loc[df_best_md[self.objective_metric[m_name]].idxmax()]['json_filename'])
get_unique_parameteres(self.columns_list,
df_i=df_best_md,
type_list=['model'])
my_mean, my_std, _ = df_md_g.loc[best_config]
print(f"{self.objective_metric[m_name]}: {my_mean:.3f} +- {my_std:.3f}\n")
if safe > 0: assert len(df_best_md) >= (
safe - 1), f'Number of elements different from number of seeds {len(df_best_md)}'
best_models_list.append(df_best_md)
df_best = pd.concat(best_models_list)
print('\n\nModels we are comparing:')
for m in df_best['Model'].unique():
print(f"\t{m}")
self.df_best = df_best
# def load_df_best(self, safe=0, dim_z=4):
# '''
# we need dimension z to remove those experiments that we use for the experiments on cross validating dim(z)
# '''
# print('\n\nComputing best configurations for each model and SEM:')
# cols = ['Model', 'Dataset', 'SEM', 'json_filename', 'num_parameters']
# cols.extend(get_elements(self.columns_list, ['dataset_params']))
# cols.extend(get_elements(self.columns_list, ['model_params']))
# metrics_cols = list(set(list(self.objective_metric.values())))
# cols.extend(metrics_cols)
# cols.extend(list(self.metrics_dict.keys()))
# df = self.df.copy()[cols]
#
# df = self.filter_valid_configurations(df)
#
# best_models_file = os.path.join(self.save_dir, 'best_models.txt')
#
# best_models_list = []
# for dataset_name, df_dataset in df.groupby('Dataset'):
# for m_name, df_m in df_dataset.groupby('Model'):
# print('--------')
# if m_name == 'VACA':
# df_m = df_m[df_m.model_params_z_dim == dim_z]
# for d_name, df_md in df_m.groupby('SEM'):
# print(f'{dataset_name} : {m_name} : {d_name}')
#
# with open(best_models_file, 'a') as f:
# f.write(f'{dataset_name} : {m_name} : {d_name}\n')
# df_md_g = df_md.groupby(self.cv_dict[m_name], dropna=False).agg(['mean', 'std'])[
# self.objective_metric[m_name]]
#
# best_config = df_md_g['mean'].idxmax()
# df_best_md = df_md.copy()
#
# for k, v in zip(self.cv_dict[m_name], best_config):
# with open(best_models_file, 'a') as f:
# f.write(f'\t{k}: {v}\n')
# print(f'\t{k}: {v}')
# df_best_md = df_best_md[df_best_md[k] == v]
#
# print(f"Num of entries: {len(df_best_md)}")
# with open(best_models_file, 'a') as f:
# best = df_best_md.loc[df_best_md[self.objective_metric[m_name]].idxmax()]
# f.write(f"\t{best['json_filename']}\n")
# f.write(f"\tnum_parameters: {best['num_parameters']}\n")
# print(df_best_md.loc[df_best_md[self.objective_metric[m_name]].idxmax()]['json_filename'])
# get_unique_parameteres(self.columns_list,
# df_i=df_best_md,
# type_list=['model'])
#
# my_mean, my_std = df_md_g.loc[best_config]
# print(f"{self.objective_metric[m_name]}: {my_mean:.3f} +- {my_std:.3f}\n")
# if safe > 0: assert len(df_best_md) >= (safe-1), f'Number of elements different from number of seeds { len(df_best_md)}'
# best_models_list.append(df_best_md)
#
# df_best = pd.concat(best_models_list)
#
# print('\n\nModels we are comparing:')
#
# for m in df_best['Model'].unique():
# print(f"\t{m}")
#
# self.df_best = df_best
def generate_latex_table_comparison(self, metrics_to_plot=None,
include_num_params=True):
# Table 2 in the paper
if not isinstance(metrics_to_plot, list):
metrics_to_plot = [1, 2, 3, 4, 7, 8]
cols_metrics = list(self.metrics_dict.keys())
if include_num_params:
cols_metrics.append('Num. parameters')
metrics_to_plot.append(9)
for i, c in enumerate(cols_metrics):
add = 'True' if i in metrics_to_plot else 'False'
print(f"({i}) [{add}] {c}")
df_latex = self.df_best.copy()
group_by_columns = ['Dataset', 'SEM', 'Model']
dataset_dict = {'collider': 0,
'triangle': 1,
'loan': 2,
'm_graph': 3,
'chain': 4,
Cte.ADULT: 5}
sem_dict = {'linear': 0,
'non-linear': 1,
'non-additive': 2
}
model_dict = {'MultiCVAE': 0,
'CAREFL': 1,
'VACA': 2
}
df_latex['Dataset'] = df_latex['Dataset'].replace(dataset_dict)
df_latex['Model'] = df_latex['Model'].replace(model_dict)
df_latex['SEM'] = df_latex['SEM'].replace(sem_dict)
if include_num_params:
df_latex['Num. parameters'] = df_latex['num_parameters']
print(f"Number of elements to create the table: {len(df_latex)}")
df_mean = df_latex.groupby(group_by_columns).mean()[cols_metrics] * 100
if include_num_params:
df_mean['Num. parameters'] = df_mean['Num. parameters'] / 100
df_mean = df_mean.rename(index={v: k for k, v in dataset_dict.items()},
level=0).rename(index={v: k for k, v in sem_dict.items()},
level=1).rename(index={v: k for k, v in model_dict.items()},
level=2).applymap(lambda x: '{0:.2f}'.format(x))
df_std = df_latex.groupby(group_by_columns).std()[cols_metrics] * 100
if include_num_params:
df_std['Num. parameters'] = df_std['Num. parameters'] / 100
df_std = df_std.rename(index={v: k for k, v in dataset_dict.items()},
level=0).rename(index={v: k for k, v in sem_dict.items()},
level=1).rename(index={v: k for k, v in model_dict.items()},
level=2).applymap(lambda x: '{0:.2f}'.format(x))
df_comparison = df_mean + '$\pm$' + df_std
table_file = os.path.join(self.save_dir, f'my_table_all.tex')
with open(table_file, 'w') as tf:
tf.write(df_comparison.iloc[:, metrics_to_plot].to_latex(escape=False))
return df_comparison
def generate_latex_table_propositions(self):
raise NotImplementedError
def budget(self, only_valid=True, filter_and=None):
print('\nComputing budget')
df = self.df.copy()
if only_valid:
df = self.filter_valid_configurations(df)
if isinstance(filter_and, dict):
cond = df['Model'] == 'VACA'
for col, values in filter_and.items():
cond_i = df[col].isin(values)
cond = cond & cond_i
cond = cond | (df['Model'] != 'VACA')
df = df[cond]
groupby = ['Dataset', 'SEM', 'Model']
print(df.groupby(groupby).count()['json_filename'])
def time_complexity(self, n=None,
replace=False,
time_list=None,
max_num_parameters=None,
ylim=None,
font_scale=1):
df = self.df.copy()
if time_list is None:
train_time_str = 'Total training time (min)'
train_col = 'train_time_total'
else:
train_time_str = time_list[0]
train_col = time_list[1]
num_params_str = 'Num. parameters'
groupby_cols = ['Model', 'Dataset']
metrics_cols = [train_time_str, num_params_str, 'model_params_num_hidden_dec']
cols_time = [*groupby_cols, *metrics_cols, 'train_epochs']
# cond_1 = (df['model_params_z_dim'] == 4) & (df['Model'] == 'VACA')
# cond_2 = df['Model'] == 'MultiCVAE'
# cond_3 = df['Model'] == 'CAREFL'
# cond = cond_1 | cond_2 | cond_3
# df = df[cond]
cond = (df.model_params_num_hidden_dec > 0) | (df.model_params_num_hidden_dec == -1)
df_time = df[cond]
if isinstance(max_num_parameters, int):
df_time = df_time[df_time.num_parameters < max_num_parameters]
df_time = self.order_by_model(df_time)
df_time = df_time.rename(columns={train_col: train_time_str})
df_time = df_time.rename(columns={"num_parameters": num_params_str})[cols_time]
df_time[train_time_str] = df_time[train_time_str] / 60
print(df_time.groupby(groupby_cols).agg(['mean', 'std', 'max'])[metrics_cols])
print(f'\nHow many experiments have we run for each model and dataset?')
for (m_name, d_name), df_g in df_time.groupby(groupby_cols):
print(f"{m_name} {d_name}: {len(df_g)}")
print('\nPlotting training time for the three different models')
plt.close('all')
ax = sns.boxplot(x="Model", y=train_time_str, data=df_time)
ax.set(ylim=ylim)
plt.show()
ax.get_figure().savefig(os.path.join(self.save_dir, 'time_complexity_all.png'))
g = sns.catplot(x="Model", y=train_time_str, data=df_time, showfliers=False,
kind="box", legend=True,
hue='Dataset'
)
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
g.set_xlabels('')
plt.show()
g.savefig(os.path.join(self.save_dir, f'time_complexity_all_per_dataset.png'))
print('dagasgagsdg')
print(df_time['Dataset'].unique())
df_time = df_time.rename(columns={'train_epochs': 'Num. Epochs'})
df_time = self.order_by_dataset(df_time)
g = sns.catplot(x="Model", y='Num. Epochs', data=df_time, showfliers=True,
kind="box", legend=False,
hue='Dataset'
)
plt.legend(loc='best')
g.set_xlabels('')
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
g.savefig(os.path.join(self.save_dir, f'time_complexity_all_epochs_per_dataset.png'))
print(df_time.groupby(['Model']).agg(['mean', 'std'])[train_time_str])
print(f'\nAre the training times significantly different?')
if n is not None:
time_carefl = df_time[df_time.Model == 'CAREFL'][train_time_str].sample(n, replace=replace)
time_vcause = df_time[df_time.Model == 'VACA'][train_time_str].sample(n, replace=replace)
time_multicvae = df_time[df_time.Model == 'MultiCVAE'][train_time_str].sample(n, replace=replace)
else:
time_carefl = df_time[df_time.Model == 'CAREFL'][train_time_str]
time_vcause = df_time[df_time.Model == 'VACA'][train_time_str]
time_multicvae = df_time[df_time.Model == 'MultiCVAE'][train_time_str]
statistic, pvalue = ttest_ind(time_vcause, time_carefl)
print(f'p-value of the T-test for VACA and CAREFL: {pvalue:.4f}')
statistic, pvalue = ttest_ind(time_multicvae, time_carefl)
print(f'p-value of the T-test for CAREFL and MultiCVAE: {pvalue:.4f}')
statistic, pvalue = ttest_ind(time_multicvae, time_vcause)
print(f'p-value of the T-test for VACA and MultiCVAE: {pvalue:.4f}')
statistic, pvalue = f_oneway(list(time_carefl.values),
list(time_multicvae.values),
list(time_vcause.values))
print(f'p-value of the f_oneway for : {pvalue:.4f}')
print(f'\nAre the training times significantly different PER DATASET?')
if font_scale != 1:
sns.set(font_scale=font_scale)
sns.set_style("white")
for d_name, df_data in df_time.groupby(['Dataset']):
print(f'\nDataset: {d_name}')
time_carefl = df_data[df_data.Model == 'CAREFL'][train_time_str]
time_vcause = df_data[df_data.Model == 'VACA'][train_time_str]
time_multicvae = df_data[df_data.Model == 'MultiCVAE'][train_time_str]
statistic, pvalue = f_oneway(list(time_carefl.values.flatten()),
list(time_multicvae.values.flatten()),
list(time_vcause.values.flatten()))
print(f'p-value of the f_oneway for : {pvalue:.4f}')
statistic, pvalue = ttest_ind(list(time_carefl.values.flatten()), list(time_vcause.values.flatten()))
print(f'p-value of the T-test for VACA and CAREFL: {pvalue:.4f}')
df_data = self.order_by_model(df_data)
g = sns.catplot(x="Model", y=train_time_str, data=df_data, showfliers=False,
kind="box", legend=False,
)
g.set_xlabels('')
g.fig.suptitle(f'{d_name}')
plt.show()
g.savefig(os.path.join(self.save_dir, f'time_complexity_all_{d_name}.png'))
# Number of parameters
for d_name, df_data in df_time.groupby(['Dataset']):
print(f'\nDataset: {d_name}')
df_data = self.order_by_model(df_data)
g = sns.catplot(x="Model", y=num_params_str, data=df_data, showfliers=False,
kind="box", legend=False,
)
g.set_xlabels('')
g.fig.suptitle(f'{d_name}')
plt.show()
g.savefig(os.path.join(self.save_dir, f'num_params_per_model_{d_name}.png'))
print('\nPlotting training time versus number of parameters of the three models')
ax = sns.scatterplot(data=df_time, x=num_params_str, y=train_time_str, hue="Model")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
ax.get_figure().savefig(os.path.join(self.save_dir, 'time_complexity_num_params.png'))
# Compare time mean and std across datatasets and model
print(df_time.groupby(['Dataset', 'Model']).agg(['mean', 'std'])[train_time_str])
def time_complexity_VACA(self):
df = self.df.copy()
train_time_str = 'Total training time (min)'
groupby_cols = ['Model', 'Dataset']
metrics_cols = [train_time_str, 'model_params_num_hidden_dec']
cols = [*groupby_cols, *metrics_cols]
df = df[df['Model'] == 'VACA']
df = df.rename(columns={"train_time_total": train_time_str})[cols]
print(df.groupby(groupby_cols).agg(['mean', 'std', 'median'])[train_time_str])
def VACA_decoder_layers(self, sem,
filter_=None):
df = self.df.copy()
df = df[df['Model'] == 'VACA']
df = df[df['SEM'] == sem]
if filter_ is not None:
for key, values in filter_.items():
df = df[df[key].isin(values)]
df.rename(columns={"model_params_num_hidden_dec": r'$N_h$'}, inplace=True)
groupby_cols = ['Dataset', r'$N_h$']
metrics_cols = ['MMD Obs.', 'MMD Inter.']
cols = [*groupby_cols, *metrics_cols]
df = self.order_by_dataset(df)
df = df[cols]
print(df.groupby(groupby_cols).agg(['mean', 'std', 'count'])[metrics_cols] * 100)
for nlayers in [0, 1, 2]:
print(f'nlayers: {nlayers}')
df_n = df[df[r'$N_h$'] == nlayers]
my_str = ''
data_str = ''
for d_name in self.dataset_name_order:
df_data = df_n[df_n.Dataset == d_name]
if len(df_data) == 0: continue
for met in metrics_cols:
mean = df_data[met].mean() * 100
std = df_data[met].std() * 100
my_str += ' & ' + f"{mean:.2f}" + ' $\pm$ ' + f"{std:.2f}"
data_str += f" {d_name}"
print(f'nlayers: {nlayers} dataset: {data_str}')
print(f"{my_str} \\\\")
def order_by_dataset(self, df):
return self._order_df(df,
col_name='Dataset',
col_values_list=self.dataset_name_order)
def order_by_model(self, df):
return self._order_df(df,
col_name='Model',
col_values_list=self.model_name_order)
def _order_df(self, df, col_name, col_values_list):
df_out = df.copy()
col_dict = {name: i for i, name in enumerate(col_values_list)}
df_out[col_name] = df_out[col_name].replace(col_dict)
df_out = df_out.sort_values(by=[col_name])
col_dict = {i: name for i, name in enumerate(col_values_list)}
df_out[col_name] = df_out[col_name].replace(col_dict)
return df_out
def VACA_dimension_z(self, limit_dim_z=None,
filter_=None):
df_z = self.df[self.df.Model == 'VACA'].copy()
if filter_ is not None:
for key, value in filter_.items():
df_z = df_z[df_z[key] == value]
df_z.rename(columns={"model_params_z_dim": "dim(z)"}, inplace=True)
df_z.rename(columns={"num_parameters": "Num. parameters"}, inplace=True)
df_z = self.order_by_dataset(df_z)
for dim_z, df_dim in df_z.groupby('dim(z)'):
print(f'dim_z: {dim_z}')
my_str = ''
data_str = ''
for d_name in self.dataset_name_order:
df_data = df_dim[df_dim.Dataset == d_name]
if len(df_data) == 0: continue
data_str += f" {d_name}"
for s_name in self.sem_name_order:
df_sem = df_data[df_data.SEM == s_name]
if len(df_sem) == 0: continue
data_str += f" {s_name}"
my_str += ' & ' + f"{df_sem['Num. parameters'].mean():.0f}"
print(f'dim_z: {dim_z} dataset: {data_str}')
print(f"{my_str} \\\\")
if limit_dim_z: df_z = df_z[df_z['dim(z)'] <= limit_dim_z]
print(f"Number of experiments: {len(df_z)}")
metrics = ['MMD Obs.', 'MMD Inter.', 'MSE CF']
df_g = df_z.groupby(['Dataset', 'SEM', 'dim(z)']).agg(['mean', 'std', 'count'])[metrics]
print(df_g)
return df_g
def VACA_dimension_z_sem(self, limit_dim_z=None,
sem='non-linear',
filter_=None,
y_lim=None,
font_scale=1):
cols_metrics = list(self.metrics_dict.keys())
groupby_z = ['model_params_z_dim', 'Dataset', 'SEM']
metrics_z = cols_metrics
cols_z = [*groupby_z, *metrics_z]
df_z = self.df[self.df.Model == 'VACA'].copy()
df_z = df_z[df_z.SEM == sem]
if filter_ is not None:
for key, value in filter_.items():
df_z = df_z[df_z[key] == value]
df_z.rename(columns={"model_params_z_dim": "dim(z)"}, inplace=True)
if limit_dim_z: df_z = df_z[df_z['dim(z)'] <= limit_dim_z]
df_z = self.order_by_dataset(df_z)
df_z.rename(columns={"num_parameters": "Num. parameters"}, inplace=True)
print(f"Number of experiments: {len(df_z)}")
metrics = ['MMD Obs.', 'MMD Inter.', 'MSE CF']
# df_g = df_z.groupby(['dim(z)']).agg(['mean', 'std'])[metrics]
print(df_z.groupby(['dim(z)']).agg(['mean', 'std', 'count'])[metrics])
# x = 'dim(z)'
# hue = 'Dataset'
hue = 'dim(z)'
x = 'Dataset'
if font_scale != 1:
sns.set(font_scale=font_scale)
sns.set_style("white")
for i, met in enumerate(metrics):
g = sns.catplot(x=x, y=met, data=df_z, showfliers=False,
kind="box", legend=False, hue=hue
)
# plt.legend(loc='best')
if isinstance(y_lim, list):
g.set(ylim=y_lim[i])
g.fig.subplots_adjust(top=0.9) # adjust the Figure in rp
g.fig.suptitle(f'SEM: {sem}')
plt.show()
my_str = ''.join(filter(str.isalnum, met)).lower()
g.savefig(os.path.join(self.save_dir, f'dimension_z_{my_str}_{sem}.png'))
# Plot number of parameters
fig, ax = plt.subplots()
_ = sns.lineplot(x='dim(z)',
y='Num. parameters',
data=df_z,
legend=True,
hue='Dataset',
ax=ax)
fig.subplots_adjust(top=0.9) # adjust the Figure in rp
fig.suptitle(f'SEM: {sem}')
plt.show()
my_str = ''.join(filter(str.isalnum, met)).lower()
fig.savefig(os.path.join(self.save_dir, f'dimension_z_{sem}_num_params.png'))
# print(df_z.groupby(['Dataset', 'dim(z)']).mean()[['Num. parameters']])
return
def cross_validate_nn(self, only_valid=True, model_name='VACA', metrics_to_use=[1, 2, 3, 4, 7, 8], debug=True):
print('\nCross validating nn')
cols_metrics = list(self.metrics_dict.keys())
metrics_list = []
for i, c in enumerate(cols_metrics):
if i in metrics_to_use:
metrics_list.append(c)
add = 'True'
else:
add = 'False'
print(f"({i}) [{add}] {c}")
df = self.df.copy()
if only_valid:
df = self.filter_valid_configurations(df).copy()
groupby = ['Dataset', 'SEM']
if model_name == 'VACA':
df = df[df.Model == 'VACA']
df['model_params_h_dim_list_enc'] = df['model_params_h_dim_list_enc'].apply(lambda x: x.split('_')[0])
df['model_params_h_dim_list_dec'] = df['model_params_h_dim_list_dec'].apply(lambda x: x.split('_')[0])
df = df[df.model_params_h_dim_list_dec == df.model_params_h_dim_list_enc]
df['model_params_h_dim_list'] = df['model_params_h_dim_list'].apply(lambda x: x.split('_')[-1])
groupby.append('model_params_h_dim_list')
elif model_name == 'CAREFL':
df = df[df.Model == 'CAREFL']
groupby.append('model_params_n_hidden')
all_cols = [*groupby, *metrics_list]
if debug:
return all_cols, df
df = df[all_cols]
df[metrics_list] = df[metrics_list] * 100
df_mean = df.groupby(groupby).mean()[metrics_list].applymap(lambda x: '{0:.2f}'.format(x))
print(df_mean)
df_std = df.groupby(groupby).std()[metrics_list].applymap(lambda x: '{0:.2f}'.format(x))
print(df_std)
df_comparison = df_mean + '$\pm$' + df_std
table_file = os.path.join(self.save_dir, f'my_table_nn_{model_name}.tex')
with open(table_file, 'w') as tf:
tf.write(df_comparison.to_latex(escape=False))
df_count = df.groupby(groupby).count()[metrics_list]
print(df_count)
table_file = os.path.join(self.save_dir, f'my_table_nn_count_{model_name}.tex')
with open(table_file, 'w') as tf:
tf.write(df_count.to_latex(escape=False))
return
def print_cols(my_cols):
for c in my_cols:
print(c)
print('')
def create_df_results(root_dir, add_time=False):
experiment_results = []
for json_file_name in glob.glob(os.path.join(root_dir, '**', 'output.json'), recursive=True):
with open(json_file_name) as json_file:
json_exper = json.load(json_file)
json_exper['json_filename'] = json_file_name
if add_time:
json_file_name_time = os.path.join(os.path.dirname(json_file_name), 'time.json')
if os.path.exists(json_file_name_time):
with open(json_file_name_time) as json_file:
json_exper_time = json.load(json_file)
json_exper['train_time_total'] = json_exper_time['train_time_total']
json_exper['train_time_avg_per_epoch'] = json_exper_time['train_time_avg_per_epoch']
json_exper['train_epochs'] = json_exper['train_time_total'] / json_exper['train_time_avg_per_epoch']
experiment_results.append(json_exper)
return
|
pd.DataFrame.from_dict(experiment_results)
|
pandas.DataFrame.from_dict
|
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from nilearn.datasets.utils import _fetch_files
from scipy import sparse
class StudyID(str):
pass
class TfIDf(float):
pass
NS_DATA_URL = "https://github.com/neurosynth/neurosynth-data/raw/master/"
def fetch_study_metadata(
data_dir: Path, version: int = 7, verbose: int = 1
) -> pd.DataFrame:
"""
Download if needed the `metadata.tsv.gz` file from Neurosynth and load
it into a pandas DataFrame.
The metadata table contains the metadata for each study. Each study (ID)
is stored on its own line. These IDs are in the same order as the id
column of the associated `coordinates.tsv.gz` file, but the rows will
differ because the coordinates file will contain multiple rows per
study. They are also in the same order as the rows in the
`features.npz` files for the same version.
The metadata will therefore have N rows, N being the number of studies
in the Neurosynth dataset. The columns (for version 7) are:
- id
- doi
- space
- title
- authors
- year
- journal
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
Returns
-------
pd.DataFrame
the study metadata dataframe
"""
metadata_filename = f"data-neurosynth_version-{version}_metadata.tsv.gz"
metadata_file = _fetch_files(
data_dir,
[
(
metadata_filename,
NS_DATA_URL + metadata_filename,
{},
),
],
verbose=verbose,
)[0]
metadata =
|
pd.read_table(metadata_file)
|
pandas.read_table
|
import os
import glob
import re
import pandas as pd
import numpy as np
from datetime import date
from .build_primary_pset_tables import build_primary_pset_tables, build_cell_df, build_compound_df, build_tissue_df
from .build_experiment_tables import build_experiment_tables, build_experiment_df
from .build_gene_compound_tissue_dataset_tables import build_gene_compound_tissue_dataset_df
from .write_pset_table import write_pset_table
from .build_dataset_join_tables import build_dataset_join_dfs, build_dataset_cell_df
# -- Enable logging
from loguru import logger
import sys
logger_config = {
"handlers": [
{"sink": sys.stdout, "colorize": True, "format":
"<green>{time}</green> <level>{message}</level>"},
{"sink": f"logs/build_all_pset_tables.log",
"serialize": True, # Write logs as JSONs
"enqueue": True}, # Makes logging queue based and thread safe
]
}
logger.configure(**logger_config)
@logger.catch
def build_all_pset_tables(
pset_dict: dict,
pset_name: str,
procdata_dir: str,
gene_sig_dir: str
) -> None:
"""
Build all tables for a dataset and write them to a directory of all processed data.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`str`] The name of the PSet
@param procdata_dir: [`str`] The file path to the directory containing processed data
@param gene_sig_dir: [`str`] The file path to the directory containing gene_compounds data
@return: [`None`]
"""
pset_dfs = {}
# Build primary tables (relating to cells, compounds, tissues, genes)
logger.info('Building primary tables...')
pset_dfs = build_primary_pset_tables(pset_dict, pset_name)
logger.info('Building dataset join tables...')
pset_dfs = {**pset_dfs, **build_dataset_join_dfs(
pset_dict, pset_name, pset_dfs)}
# Build experiment tables
logger.info('Building experiment tables...')
# FIX: Modified to use pre-3.9 syntax to ensure backwards compatibility
pset_dfs = {**pset_dfs, **build_experiment_tables(
pset_dict, pset_name, pset_dfs['cell'])}
# Build summary/stats tables
logger.info('Building mol_cell table...')
pset_dfs['mol_cell'] = build_mol_cell_df(
pset_dict, pset_name, pset_dfs['dataset_cell'])
logger.info('Building dataset_statistics table...')
pset_dfs['dataset_statistics'] = build_dataset_stats_df(
pset_dict, pset_name, pset_dfs)
# Write all tables to CSV files
for df_name in pset_dfs.keys():
write_pset_table(pset_dfs[df_name], df_name, pset_name, procdata_dir)
log_file = open(os.path.join(procdata_dir, pset_name,
f'{pset_name}_log.txt'), "w")
log_file.write('Wrote the following PSet tables: \n')
log_file.writelines(f'{table}\n' for table in pset_dfs.keys())
log_file.write(f'on {date.today()}')
log_file.close()
@logger.catch
def build_mol_cell_df(
pset_dict: dict,
pset_name: str,
dataset_cell_df: pd.DataFrame=None,
molecularTypes: list=['rna', 'rnaseq', 'cnv', 'mutation']
) -> pd.DataFrame:
"""
Builds a table that summarizes the number of profiles, per cell line, per molecular data
type, in this dataset. (Only considers molecular data types for which there are sens stats?)
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param gene_compound_df: [`pd.DataFrame`] The gene_compound table for this PSet
@param dataset_cell_df: [`pd.DataFrame`] A table containing all the cells in this
PSet and the PSet name
@return: [`pd.DataFrame`] The table with the number of profiles for each cell line,
for each molecular data type
"""
mol_cell_df = pd.DataFrame(
columns=['cell_id', 'dataset_id', 'mDataType', 'num_prof'])
if 'molecularProfiles' in pset_dict:
profiles_dict = pset_dict['molecularProfiles']
molecularTypes = list(profiles_dict.keys())
else:
profiles_dict = None
if dataset_cell_df is None:
dataset_cell_df = build_dataset_cell_df(
pset_dict, pset_name, cell_df=None)
for mDataType in molecularTypes:
if isinstance(profiles_dict, dict):
# Get the number of times each cellid appears in colData for that mDataType
num_profiles = profiles_dict[mDataType]['colData']['cellid'] \
.value_counts()
# Join with datasets cells on cellid
df = pd.merge(dataset_cell_df, num_profiles,
left_on='cell_id', right_on=num_profiles.index, how='left')
# Rename num_profiles column
df.rename(columns={'cellid': 'num_prof'}, inplace=True)
# Set mDataType column to the current molecular type
df['mDataType'] = mDataType
else:
# If PSet contains no molecular profiles, set num_prof to 0
# for all celll lines and all molecular data types
df = dataset_cell_df.copy()
df['mDataType'] = mDataType
df['num_prof'] = 0
# Append to mol_cell_df
mol_cell_df = mol_cell_df.append(df)
# Replace any NaN in the num_profiles column with 0
mask = mol_cell_df.query('num_prof.isna()').index
mol_cell_df.loc[mask, 'num_prof'] = 0
mol_cell_df['num_prof'] = mol_cell_df['num_prof'].astype('int32')
return mol_cell_df
@logger.catch
def build_dataset_stats_df(pset_dict, pset_name, pset_dfs=None):
"""
Summarizes how many cell lines, tissues, compounds, and experiments
are contained within the dataset.
@param pset_dict: [`dict`] A nested dictionary containing all tables in
the PSet
@param pset_name: [`string`] The name of the PSet
@param pset_dfs: [`dict`] A dictionary of tables from the PSet,
with table names as the keys
@return: [`pd.DataFrame`] A one-row table with the summary stats for
this PSet
"""
if pset_dfs is None:
pset_dfs = {}
if 'tissue' not in pset_dfs:
pset_dfs['tissue'] = build_tissue_df(pset_dict)
if 'cell' not in pset_dfs:
pset_dfs['cell'] = build_cell_df(pset_dict)
if 'compound' not in pset_dfs:
pset_dfs['compound'] = build_compound_df(pset_dict)
if 'experiment' not in pset_dfs:
pset_dfs['experiment'] = build_experiment_df(
pset_dict, pset_name, pset_dfs['cell'])
if 'gene' not in pset_dfs:
pset_dfs['gene'] =
|
pd.DataFrame()
|
pandas.DataFrame
|
import unittest
import pandas as pd
from data_profiler.profilers import OrderColumn
from . import test_utils
from unittest.mock import patch, MagicMock
from collections import defaultdict
# This is taken from: https://github.com/rlworkgroup/dowel/pull/36/files
# undo when cpython#4800 is merged.
unittest.case._AssertWarnsContext.__enter__ = test_utils.patched_assert_warns
class TestOrderColumn(unittest.TestCase):
@staticmethod
def _update_order(data):
df = pd.Series(data).apply(str)
profiler = OrderColumn(df.name)
profiler.update(df)
return profiler.order
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = OrderColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.sample_size, 0)
self.assertIsNone(profiler.order)
def test_descending(self):
data = ['za', 'z', 'c', 'a']
order = self._update_order(data)
self.assertEqual(order, 'descending')
data = [5, 3, 2]
order = self._update_order(data)
self.assertEqual(order, 'descending')
def test_ascending(self):
data = ['a', 'b', 'z', 'za']
order = self._update_order(data)
self.assertEqual(order, 'ascending')
data = [2, 3, 11]
order = self._update_order(data)
self.assertEqual(order, 'ascending')
def test_constant_value(self):
data = ['a']
order = self._update_order(data)
self.assertEqual(order, 'constant value')
data = ['a', 'a', 'a', 'a', 'a']
order = self._update_order(data)
self.assertEqual(order, 'constant value')
def test_random(self):
data = ['a', 'b', 'ab']
order = self._update_order(data)
self.assertEqual(order, 'random')
data = [1, 11, 4]
order = self._update_order(data)
self.assertEqual(order, 'random')
def test_batch_updates(self):
data = ['a', 'a', 'a']
df = pd.Series(data)
profiler = OrderColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.order, 'constant value')
data = ['a', 'b', 'c']
df = pd.Series(data)
profiler.update(df)
self.assertEqual(profiler.order, 'ascending')
# previous was ascending, should stay ascending bc now receiving const
data = ['c', 'c', 'c']
df = pd.Series(data)
profiler.update(df)
self.assertEqual(profiler.order, 'ascending')
# previous was ascending, should be random now receiving descending
data = ['c', 'b', 'a']
df = pd.Series(data)
profiler.update(df)
self.assertEqual(profiler.order, 'random')
def test_profile(self):
data = [1]
df = pd.Series(data).apply(str)
profiler = OrderColumn(df.name)
expected_profile = dict(
order='constant value',
times={'order' : 2.0}
)
time_array = [float(x) for x in range(4, 0, -1)]
with patch('time.time', side_effect = lambda: time_array.pop()):
profiler.update(df)
profile = profiler.profile
# key and value populated correctly
self.assertDictEqual(expected_profile, profile)
def test_profile_merge(self):
data = [1, 2, 3, 4, 5, 6]
df = pd.Series(data).apply(str)
profiler = OrderColumn("placeholder_name")
profiler.update(df)
data2 = [7, 8, 9, 10]
df2 = pd.Series(data2).apply(str)
profiler2 = OrderColumn("placeholder_name")
profiler2.update(df2)
data3 = [2, 3, 4]
df3 = pd.Series(data3).apply(str)
profiler3 = OrderColumn("placeholder_name")
profiler3.update(df3)
data4 = [3, 3, 3, 3]
df4 =
|
pd.Series(data4)
|
pandas.Series
|
# -*- coding: utf-8 -*-
import subprocess
import json
import os
import io
from multiprocessing import Pool
import multiprocessing
import multiprocessing.pool
from operator import itemgetter
import random
import string
import pickle
import copy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import pysam
import mip_classes as mod
import pandas as pd
from pandas.errors import MergeError
import gzip
from primer3 import calcHeterodimerTm
import primer3
import traceback
from msa_to_vcf import msa_to_vcf as msa_to_vcf
import itertools
import sys
import allel
from Bio import SeqIO
print("functions reloading")
# backbone dictionary
mip_backbones = {
"hybrid_bb": "AGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNNNN",
"hybrid_split": "NNNNAGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNN",
"hybrid_split_hp": "AGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNN",
"gc_bb": "GCAGATCGGAAGAGCACACCTCGCCAAGCTTTCGGCNNNNNNNNNNNN",
"slx_bb": "CTTCAGCTTCCCGATCCGACGGTAGTGTNNNNNNNNNNNN"
}
"""
# Below class allows processors from a pool from multiprocessing module to
create processor pools of their own.
# http://mindcache.io/2015/08/09/python-multiprocessing-module-daemonic-processes-are-not-allowed-to-have-children.html
class NoDaemonProcess(multiprocessing.Process):
# make 'daemon' attribute always return False
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class NoDaemonProcessPool(multiprocessing.pool.Pool):
Process = NoDaemonProcess
"""
# above code was broken when switching to python 3. Below is taken from:
# https://stackoverflow.com/questions/6974695/python-process-pool-non-daemonic/8963618#8963618
class NoDaemonProcess(multiprocessing.Process):
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
class NoDaemonContext(type(multiprocessing.get_context())):
Process = NoDaemonProcess
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class NoDaemonProcessPool(multiprocessing.pool.Pool):
def __init__(self, *args, **kwargs):
kwargs['context'] = NoDaemonContext()
super(NoDaemonProcessPool, self).__init__(*args, **kwargs)
# Exception wrapper for multiprocessing taken from
# https://stackoverflow.com/questions/6126007/python-getting-a-traceback-from-a-multiprocessing-process/26096355#26096355
class ExceptionWrapper(object):
def __init__(self, ee, exc):
self.ee = ee
self.exc = exc
__, __, self.tb = sys.exc_info()
def re_raise(self):
print(self.exc)
raise self.ee.with_traceback(self.tb)
###############################################################
# Region prep related functions
###############################################################
def coordinate_to_target(coordinates, snp_locations, capture_size):
""" Create MIP targets starting from a snp file that is produced offline,
usually from Annovar. This is a tab separated file with the following
chr1 2595307 2595307 A G rs3748816.
This can be generalized to any target with coordinates.
"""
# create target regions to cover all snps
# start by getting snps on same chromosome together
snp_chroms = {}
reference_snp_locations = rsl = coordinates
for r in rsl:
chrom = rsl[r]["chrom"]
try:
snp_chroms[chrom].append([rsl[r]["begin"],
rsl[r]["end"]])
except KeyError:
snp_chroms[chrom] = [[rsl[r]["begin"],
rsl[r]["end"]]]
# merge snps that are too close to get separate regions
# the length should be twice the capture size
merged_snp_chroms = {}
for c in snp_chroms:
merged_snp_chroms[c] = merge_overlap(snp_chroms[c], 2 * capture_size)
# create regions for alignment
for c in merged_snp_chroms:
regions = merged_snp_chroms[c]
for r in regions:
snps_in_region = []
for s in reference_snp_locations:
if ((reference_snp_locations[s]["chrom"] == c)
and (r[0] <= reference_snp_locations[s]["begin"]
<= reference_snp_locations[s]["end"] <= r[1])):
snps_in_region.append(s)
r.append(snps_in_region)
for reg in regions:
snps = reg[2]
reg_begin = reg[0]
reg_end = reg[1]
reg_locations = []
for s in snps:
s_locations = []
locations = snp_locations[s]
ref_location = reference_snp_locations[s]
ref_begin = ref_location["begin"]
ref_end = ref_location["end"]
left_flank_buffer = ref_begin - reg_begin + capture_size
right_flank_buffer = reg_end - ref_end + capture_size
for l in locations:
snp_chrom = l["chrom"]
snp_begin = l["begin"]
snp_end = l["end"]
tar_begin = snp_begin - left_flank_buffer
tar_end = snp_end + right_flank_buffer
s_locations.append([snp_chrom, tar_begin, tar_end])
reg_locations.append(s_locations)
reg.append(reg_locations)
# create target coordinate for each region
target_coordinates = {}
for c in merged_snp_chroms:
regions = merged_snp_chroms[c]
for reg in regions:
region_name = "-".join(reg[2])
region_targets = reg[3][0]
for i in range(len(region_targets)):
reg_name = region_name + "-" + str(i)
if reg_name in target_coordinates:
print((reg_name, " is already in targets!"))
else:
target_coordinates[reg_name] = region_targets[i]
return target_coordinates
def rsid_to_target(resource_dir, snp_file):
""" Create MIP targets starting from a snp file that is produced offline,
usually from Annovar. This is a tab separated file with the following
content: chr1 2595307 2595307 A G rs3748816.
This can be generalized to any target with coordinates.
"""
# one snp can have multiple locations on the reference genome,
# this can happen with snps in regions where there are multiple different
# assemblies (HLA locus, for example). So first step is to get each of
# these locations in the genome.
snp_locations = {}
capture_types = {}
with io.open(os.path.join(resource_dir, snp_file),
encoding="utf-8") as infile:
for line in infile:
newline = line.strip().split("\t")
rsid = newline[5]
try:
# update the location dictionary if the rsid is already present
temp_dic = {"chrom": newline[0],
"begin": int(newline[1]),
"end": int(newline[2]),
"ref_base": newline[3],
"alt_bases": [newline[4]]}
# check if this location is already in the dict
# append the new alternative base to the dict
for snp in snp_locations[rsid]:
if ((snp["begin"] == temp_dic["begin"])
and (snp["end"] == temp_dic["end"])
and (snp["chrom"] == temp_dic["chrom"])
and (snp["ref_base"] == temp_dic["ref_base"])):
snp["alt_bases"].append(temp_dic["alt_bases"][0])
break
else:
# add the snp dict if the location is different than what
# is present in the location dict.
snp_locations[rsid].append(temp_dic)
except KeyError:
# add the new rsid to location dict if it is not already there
snp_locations[rsid] = [temp_dic]
capture_types[rsid] = newline[6]
# one reference location for each snp is required
# alternative assambly chromosomes have an underscore in their names,
# so that will be utilized to get the location in the orignal assembly,
# i.e. the chromosome that does not have the underscore
# (chr7 and not chr7_alt08)
reference_snp_locations = {}
problem_snps = []
for s in snp_locations:
if len(snp_locations[s]) == 1:
reference_snp_locations[s] = snp_locations[s][0]
else:
for i in range(len(snp_locations[s])):
if len(snp_locations[s][i]["chrom"].split("_")) == 1:
reference_snp_locations[s] = snp_locations[s][i]
break
else:
print(("Short chromosome name not found! "
"Please check the output list."))
problem_snps.append(s)
reference_snp_locations[s]["capture_type"] = capture_types[s]
return reference_snp_locations, snp_locations
def gene_to_target(gene_list, species):
target_coordinates = {}
for gene in gene_list:
e = get_exons(get_gene(gene,
get_file_locations()[species]["refgene"],
alternative_chr=1))
try:
target_coordinates[gene] = {"chrom": e["chrom"],
"begin": e["begin"],
"end": e["end"]}
except KeyError:
target_coordinates[gene] = {"chrom": np.nan,
"begin": np.nan,
"end": np.nan}
return target_coordinates
def gene_to_target_exons(gene_list, species, exon_list):
target_coordinates = {}
for i in range(len(gene_list)):
gene = gene_list[i]
exons_wanted = exon_list[i]
gene_exons = get_exons(get_gene(gene,
get_file_locations()[species]["refgene"],
alternative_chr=1))
exons = gene_exons["exons"]
if gene_exons["orientation"] == "-":
exons.reverse()
if exons_wanted == "all":
for j in range(len(exons)):
e = exons[j]
tar_name = "-".join([gene, "exon", str(j)])
target_coordinates[tar_name] = {"chrom": gene_exons["chrom"],
"begin": e[0],
"end": e[1]}
else:
for j in exons_wanted:
try:
e = exons[j]
tar_name = "-".join(gene, "exon", str(j))
target_coordinates[tar_name] = {
"chrom": gene_exons["chrom"],
"begin": e[0],
"end": e[1]}
except IndexError:
print(("Exon ", j, " does not exist for gene ", gene))
return target_coordinates
def parse_alignment(reg_file):
""" Create a rinfo dictionary from a rinfo file."""
reg_dic = {}
with open(reg_file, "r") as infile:
for line in infile:
if line.startswith("REGION"):
newline = line.strip().split("\t")
key1 = newline[1].split(":")[0]
key2 = newline[1].split(":")[1]
if key1 not in reg_dic:
reg_dic[key1] = {key2: {"copyname": newline[2],
"chr": int(newline[3][3:]),
"begin": int(newline[4]),
"end": int(newline[5]),
"ori": (newline[6] == "F")}}
else:
reg_dic[key1][key2] = {"copyname": newline[2],
"chr": int(newline[3][3:]),
"begin": int(newline[4]),
"end": int(newline[5]),
"ori": (newline[6] == "F")}
return reg_dic
def update_rinfo_file(rinfo_file, update_file, output_file):
"""Update a rinfo file with the lines provided in the update_file.
This function will read all lines from a rinfo file and an update file.
First two columns of rinfo files describe the parameters while the
rest assign values. All the lines in the update file which share the
first column with a line in the original file will replace that line
in the original file. All other lines in the original file will remain.
"""
# read the update file
update_dict = {}
with open(update_file) as infile:
for line in infile:
if not line.startswith("#"):
newline = line.strip().split("\t")
update_dict[(newline[0], newline[1])] = line
# read the rinfo file and update as appropriate
with open(rinfo_file) as infile, open(output_file, "w") as outfile:
for line in infile:
if not line.startswith("#"):
newline = line.strip().split("\t")
line_key = (newline[0], newline[1])
try:
outfile.write(update_dict[line_key])
except KeyError:
outfile.write(line)
else:
outfile.write(line)
def get_target_coordinates(res_dir, species, capture_size,
coordinates_file=None, snps_file=None,
genes_file=None):
"""Extract MIP target coordinates from provided files."""
capture_types = {}
# Get target coordinates specified as genomic coordinates
if coordinates_file is None:
region_coordinates = {}
coord_names = []
else:
coordinates_file = os.path.join(res_dir, coordinates_file)
try:
coord_df = pd.read_table(coordinates_file, index_col=False)
coord_names = coord_df["Name"].tolist()
coord_df.rename(columns={"Name": "name", "Chrom": "chrom",
"Start": "begin", "End": "end"}, inplace=True)
region_coordinates = coord_df.set_index("name").to_dict(
orient="index")
# update capture types of targets
for g in region_coordinates:
if g not in capture_types:
capture_types[g] = region_coordinates[g]["CaptureType"]
except IOError:
print(("Target coordinates file {} could not be found.").format(
(coordinates_file)))
region_coordinates = {}
coord_names = []
# Get Gene target coordinates
if genes_file is None:
gene_coordinates = {}
gene_names = []
else:
# get the alias file (gene name to gene id mapping) if available
try:
with open(get_file_locations()[species]["alias"]) as infile:
alias = json.load(infile)
except (KeyError, IOError):
pass
try:
genes_file = os.path.join(res_dir, genes_file)
genes_df = pd.read_table(genes_file, index_col=False)
gene_names = genes_df["Gene"].tolist()
genes = genes_df.set_index("Gene").to_dict(orient="index")
gene_id_to_gene = {}
gene_ids = []
gene_coordinates = {}
for g in genes:
try:
if np.isnan(genes[g]["GeneID"]):
try:
gene_id = alias[g]
genes[g]["GeneID"] = gene_id
except KeyError:
print("""Alias for gene %s is not found.
Either provide a gene ID or use an alias
which is present in refgene file.""" % g)
continue
except NameError:
print(""" Gene ID is not provided for %s.
If gene name will be used to extract gene
ID an alias dictionary must be specified.
""" % g)
continue
except TypeError:
pass
gene_ids.append(genes[g]["GeneID"])
gene_id_to_gene[genes[g]["GeneID"]] = g
capture_types[g] = genes[g]["CaptureType"]
gene_id_coordinates = gene_to_target(gene_ids, species)
for gid in gene_id_coordinates:
gene_coordinates[gene_id_to_gene[gid]] = gene_id_coordinates[
gid]
except IOError:
print(("Target genes file {} could not be found.").format(
(genes_file)))
gene_coordinates = {}
gene_names = []
if snps_file is None:
snp_coordinates = {}
else:
# Get SNP target coordinates
try:
snps_file = os.path.join(res_dir, snps_file)
snp_df = pd.read_table(snps_file, index_col=False,
dtype={"Start": int, "End": int})
snp_df.rename(columns={"Name": "name", "Chrom": "chrom",
"Start": "begin", "End": "end"},
inplace=True)
snp_coordinates = snp_df.set_index("name").to_dict(orient="index")
for g in snp_coordinates:
if g not in capture_types:
capture_types[g] = "targets"
except IOError:
print(("Target SNPs file {} could not be found.").format(
(snps_file)))
snp_coordinates = {}
# merge coordinates dictionaries
all_coordinates = {}
all_coordinates.update(snp_coordinates)
all_coordinates.update(gene_coordinates)
all_coordinates.update(region_coordinates)
# Fix names that has unwanted characters
for c in list(all_coordinates.keys()):
clist = []
for ch in c:
if ch.isalnum():
clist.append(ch)
else:
clist.append("-")
newc = "".join(clist)
if newc != c:
print("%s is replaced with %s" % (c, newc))
all_coordinates[newc] = all_coordinates.pop(c)
capture_types[newc] = capture_types.pop(c)
target_regions, target_names = merge_coordinates(all_coordinates,
capture_size)
# prioritize gene names ond coordinate names over snp or other names
for t in list(target_names.keys()):
for n in target_names[t]:
if n in gene_names:
target_names[n] = target_names.pop(t)
target_regions[n] = target_regions.pop(t)
break
elif n in coord_names:
target_names[n] = target_names.pop(t)
target_regions[n] = target_regions.pop(t)
break
out_dict = {"target_regions": target_regions,
"target_names": target_names,
"capture_types": capture_types,
"gene_names": gene_names,
"snp_coordinates": snp_coordinates,
"gene_coordinates": gene_coordinates,
"region_coordinates": region_coordinates}
return out_dict
def merge_coordinates(coordinates, capture_size):
"""Merge overlapping coordinates for MIP targets.
Parameters
----------
coordinates: python dictionary
Coordinates to be merged in the form {target-name: {chrom: chrx,
begin: start-coordinate, end: end-coordinate}, ..}
capture_size: int
Anticipated MIP capture size. If two regions are as close as 2 times
this value, they will be merged.
Returns
-------
target_coordinates: python dictionary
merged coordinates dictionary
target_names: python dictionary
names of included targets in each merged region.
"""
# create target regions to cover all snps
# start by getting snps on same chromosome together
chroms = {}
for c in coordinates:
chrom = coordinates[c]["chrom"]
try:
chroms[chrom].append([coordinates[c]["begin"],
coordinates[c]["end"]])
except KeyError:
chroms[chrom] = [[coordinates[c]["begin"],
coordinates[c]["end"]]]
# merge snps that are too close to get separate regions
# the length should be twice the capture size
merged_chroms = {}
for c in chroms:
merged_chroms[c] = merge_overlap(chroms[c], 2 * capture_size)
# create regions for alignment
# create target coordinate for each region
target_coordinates = {}
target_names = {}
for c in merged_chroms:
regions = merged_chroms[c]
for reg in regions:
targets_in_region = []
for co in coordinates:
if (coordinates[co]["chrom"] == c
and reg[0] <= coordinates[co]["begin"]
<= coordinates[co]["end"] <= reg[1]):
targets_in_region.append(co)
region_name = targets_in_region[0]
target_names[region_name] = targets_in_region
r_start = reg[0]
r_end = reg[1]
target_coordinates[region_name] = [c, r_start, r_end]
return target_coordinates, target_names
def create_target_fastas(res_dir, targets, species, flank):
""" Create fasta files for a list of region coordinates provided as a dict
in the form {target1: [chrx, start, end], target2: [chrx, start, end], ..},
flank on both sides with the specified length. If beginning coordinate is
less than zero, reset the beginning coordinate to zero..
"""
for t in list(targets.keys()):
chrom = targets[t][0]
begin = targets[t][1] - flank + 1
if begin < 0:
begin = 0
end = targets[t][2] + flank
rk = chrom + ":" + str(begin) + "-" + str(end)
try:
with open(os.path.join(res_dir, t + ".fa"), "w") as outfile:
outfile.write(get_fasta(rk, species, header=t))
except Exception as e:
print(("Fasta file for {} could not be created, "
"due to error {}. It will be removed"
" from the target list.").format(t, e))
targets.pop(t)
return
def add_fasta_targets(res_dir, fasta_files, fasta_capture_type):
fasta_sequences = {}
capture_types = {}
for f in fasta_files:
f_file = os.path.join(res_dir, f)
try:
fasta_sequences.update(fasta_parser(f_file))
except IOError:
print(("Fasta file {} could not be found.").format(f_file))
for f in list(fasta_sequences.keys()):
flist = []
for fch in f:
if fch.isalnum():
flist.append(fch)
else:
flist.append("-")
newf = "".join(flist)
if f != newf:
print("%s is changed to %s." % (f, newf))
fasta_sequences[newf] = fasta_sequences.pop(f)
if newf not in capture_types:
capture_types[newf] = fasta_capture_type
with open(os.path.join(res_dir, newf + ".fa"), "w") as outfile:
outfile.write(">" + newf + "\n" + fasta_sequences[newf] + "\n")
return {"fasta_sequences": fasta_sequences, "capture_types": capture_types}
def set_genomic_target_alignment_options(target_regions, fasta_sequences,
identity, coverage, flank):
alignment_list = []
fasta_list = list(fasta_sequences.keys()) + list(target_regions.keys())
for t in fasta_list:
temp_dict = {"gene_name": t, "identity": identity}
try:
target_size = target_regions[t][2] - target_regions[t][1]
fasta_size = target_size + 2 * flank
except KeyError:
fasta_size = len(fasta_sequences[t])
cover = round(coverage * 100 / fasta_size, 1)
temp_dict["options"] = []
if cover > 100:
cover = 100
temp_dict["coverage"] = cover
if fasta_size < 100:
temp_dict["options"].extend(["--notransition", "--step=10",
"--ambiguous=iupac"])
elif fasta_size < 1000:
temp_dict["options"].extend(["--notransition", "--step=10",
"--ambiguous=iupac"])
elif fasta_size < 5000:
temp_dict["options"].extend(["--notransition",
"--step=" + str(int(fasta_size/10)),
"--ambiguous=iupac"])
else:
temp_dict["options"].extend(["--notransition",
"--step=" + str(int(fasta_size/10)),
"--ambiguous=iupac"])
alignment_list.append(temp_dict)
return alignment_list
def align_region_multi(alignment_list, pro):
"""Parallelize a list of lastz alignments."""
p = Pool(pro)
p.map_async(align_region_worker, alignment_list)
p.close()
p.join()
return
def align_region_worker(l):
"""Worker function for align_region_multi.
Aligns a single fasta query file to a target fasta file. Both query
and target fasta files can be multi sequence files.
"""
# get parameters from the input list
# first item is the fasta file name, including file extension
region_key = l[0]
# second item holds the run directory for lastz
resource_dir = l[1]
# output file is the target name + ".al" where the alignment output
# will be saved.
output_file = l[2]
# target fasta file is usually the reference genome
target_fasta = l[3]
# each action item will be appended to the target or query argument
# within brackets. [unmask] and [multiple] are important target actions
# unmask: allows starting alignments in masked(lowercase) parts of the
# target multiple: indicates there are multiple sequences in the target
# file (e.g. chromosomes, contigs)
target_actions = l[4]
# query file is always treated as a multiple sequence file
# so there is no need for the multiple action
query_actions = l[5]
# percent cutoff value for identity/coverage of query to target. This only
# affects reporting and not the alignment process itself.
identity_cutoff = l[6]
coverage_cutoff = l[7]
# format of the output, follows --format: argument in lastz
# if format is general, it should be followed by a comma separated list of
# fields to output, e.g. general:name1,text1,name2,text2,diff,score would
# seq of target, output the name of the query, sequence of the query, name
# of the target, a string showing the alignment and the alignment score
output_format = l[8]
# additional options to pass to lastz
options = l[9]
query_fasta = os.path.join(resource_dir, region_key)
# create target actions text
if len(target_actions) > 0:
target_act = "[" + ",".join(target_actions) + "]"
else:
target_act = ""
# create query actions text
if len(query_actions) > 0:
query_act = "[" + ",".join(query_actions) + "]"
else:
query_act = ""
# create the command list to pass to the processor
comm = ["lastz_32",
target_fasta + target_act,
query_fasta + query_act,
"--output=" + os.path.join(resource_dir, output_file),
"--format=" + output_format,
"--filter=identity:" + str(identity_cutoff),
"--filter=coverage:" + str(coverage_cutoff)]
# add any extra options to the end of the command
comm.extend(options)
# run the command using subprocess module
subprocess.check_output(comm)
return
def align_genes_for_design(fasta_list, res_dir,
alignment_types=["differences", "general"],
species="hs", num_processor=30):
"""Perform specified alignments given in an alignment dict.
This functions is called from align_targets function for the initial
target alignment to the reference genome.
It align sequences given in an alignment dict which contains alignment
specifics. Each entry in this dict must have a corresponding fasta file in
the res_dir specified. The alignment is performed against the reference
genome. This function merely prepares a list of commands to pass to
align_genes_for_design_worker function to carry out alignments in
parallel where multiple processors are available. Two types of alignment
outputs will be generated; one "general" informative about the alignment
such as where the alignment starts and ends, what is the percent identity,
coverage etc. The second output is the differences between the aligned
sequences, showing at which positions there are nucleotide changes and
what the changes are.
Parameters
----------
fasta_list: list
A list of dictionaries each of which contains specifics
for a single alignment, such as the name of the fasta file, coverage
and identity cut offs and any additional alignment parameters that are
passed to LastZ.
res_dir: str
Path to working directory where input and output files are located.
alignment_types: list
List of alignment types to be performed. Only "general" and/or
"differences" options are allowed.
species: str
Species whose reference genome will be used for alignment.
num_processor: int
Number of processors available for parallel processing.
"""
region_list = []
for gene_dict in fasta_list:
gene_name = gene_dict["gene_name"]
# percent cutoff value for identity/coverage of query to target.
# This only affects reporting and not the alignment process itself.
identity = gene_dict["identity"]
coverage = gene_dict["coverage"]
options = gene_dict["options"]
# alignment target is the reference genome of the specified species.
target = get_file_locations()[species]["fasta_genome"]
# alignment output should have the following fields.
# These are the bare minimum to be able to parse the alignment later.
out_fields = ["name1", "strand1", "zstart1", "end1", "length1",
"name2", "strand2", "zstart2", "end2", "zstart2+",
"end2+", "length2", "identity", "coverage"]
out_fields = ",".join(out_fields)
gen_out = "general:" + out_fields
# output fields for "differences" is fixed; it outputs the differences
# between the aligned sequence and the target.
dif_out = "differences"
if not os.path.exists(res_dir):
os.makedirs(res_dir)
# prepare a list of commands to feed to lastz for both alignment types
# i.e. "general" and "differences". Some of the additional parameters
# we are supplying here are the target and query actions.
# each action item will be appended to the target or query argument
# within brackets. [unmask] and [multiple] are important target actions
# unmask: allows starting alignments in masked(lowercase) parts of the
# target multiple: indicates there are multiple sequences in the target
# file (e.g. chromosomes, contigs)
if "general" in alignment_types:
al = [gene_name + ".fa", res_dir, gene_name + ".al", target,
["multiple", "unmask", "nameparse=darkspace"],
["unmask", "nameparse=darkspace"],
identity, coverage, gen_out, options]
region_list.append(al)
if "differences" in alignment_types:
al = [gene_name + ".fa", res_dir, gene_name + ".differences",
target, ["multiple", "unmask", "nameparse=darkspace"],
["unmask", "nameparse=darkspace"],
identity, coverage, dif_out, options]
region_list.append(al)
align_region_multi(region_list, num_processor)
return
def merge_alignments(resource_dir, fasta_list, output_prefix="merged"):
""" Merge the results of "general" type lastZ alignments into a
single file. This is used to process the alignment results from the
align_genes_for_design function where target sequences are aligned
against the reference genome.
Parameters
----------
resource_dir: str
Path to working directory where the alignment outputs are.
fasta_list: list
A list of dictionaries each of which has the specifics for a single
sequence alignment. It is used only to get alignment file names here.
output_prefix: str
Name for the output file. This will be appended by ".al" extension.
"""
# create a list for each alignment type (general and differences)
als_out = []
with open(os.path.join(
resource_dir, output_prefix + ".al"), "w") as alignment_file:
for f in fasta_list:
fnum = 0
with open(os.path.join(resource_dir, f + ".al")) as alignment:
linenum = 0
for line in alignment:
if linenum > 0:
als_out.append(line.strip())
elif fnum == 0:
als_out.append(line.strip())
linenum += 1
else:
linenum += 1
fnum += 0
alignment_file.write("\n".join(als_out))
return
def merge_alignment_diffs(resource_dir, fasta_list, output_prefix="merged"):
""" Merge the results of "differences" type lastZ alignments into a
single file. This is used to process the alignment results from the
align_genes_for_design function where target sequences are aligned
against the reference genome.
Parameters
----------
resource_dir: str
Path to working directory where the alignment outputs are.
fasta_list: list
A list of dictionaries each of which has the specifics for a single
sequence alignment. It is used only to get alignment file names here.
output_prefix: str
Name for the output file. This will be appended by ".al" extension.
"""
# create a list for each alignment type (general and differences)
diffs_out = []
with open(os.path.join(
resource_dir, output_prefix + ".differences"), "w") as diff_file:
for f in fasta_list:
fnum = 0
with open(os.path.join(resource_dir, f + ".differences")) as diffs:
for d in diffs:
diffs_out.append(d.strip())
fnum += 0
diff_file.write("\n".join(diffs_out))
return
def alignment_parser(wdir, name, spacer=0, gene_names=[]):
""" Parse merged genome alignment results file which is generated by
align_genes_for_design function to align design targets to reference
genomes. One query (target region) may have multiple alignments to the
genome.
Parameters
----------
wdir: str
Path to working directory
name: str
File name for the merged alignment file
spacer: int
Spacer length to use when merging overlapping regions. If two regions
are not overlapping but the distance between them is smaller than the
spacer, they will be merged.
Returns
-------
A list of dictionaries:
target_regions: merged genomic coordinates for grouped targets.
This dictionary is used as the final target regions.
For example: {r1: [[chr1, 100, 200], [chr3, 30, 300]],
r3: [chr4, 0, 300]]}
region_names: names for each region.
For example: {r1: [r1, r2], r3: [r3]}
imperfect_aligners: names of the target regions for which a perfect
alignment to the reference genome has not been found.
"""
alignment_dict = {}
# open alignment files
with open(os.path.join(wdir, name + ".al")) as infile:
# each line in the file is a separate alignment for which we'll
# prepare a dictionary.
for line in infile:
newline = line.strip().split("\t")
# first line has column names
if line.startswith("#"):
colnames = [newline[0][1:]]
colnames.extend(newline[1:])
else:
temp_dict = {}
for i in range(len(colnames)):
col = colnames[i]
value = newline[i]
temp_dict[col] = value
query_name = temp_dict["name2"]
try:
alignment_dict[query_name].append(temp_dict)
except KeyError:
alignment_dict[query_name] = [temp_dict]
# go through each target sequence and each alignment for that
# target to where in the genome it was aligned to.
aligned_regions = {}
for query in alignment_dict:
aligned_regions[query] = []
for a in alignment_dict[query]:
chrom = a["name1"]
begin = int(a["zstart1"])
end = int(a["end1"])
aligned_regions[query].append([chrom, begin, end])
# check for overlapping alignments. These can be the same target aligning
# to overlapping regions in the genome (internal duplications) or
# different targets aligning to the same (or overlapping) regions in the
# genome (paralogus sequences).
# overlapping regions will be grouped together to form the final target
# regions for probe design.
overlaps = {}
for q1 in aligned_regions:
# each target will have itself as overlapping
overlaps[q1] = [q1]
# get the genomic regions q1 was aligned to
reg1 = aligned_regions[q1]
# go through each region
for r1 in reg1:
# check overlap with other target regions
for q2 in aligned_regions:
if q1 == q2:
continue
reg2 = aligned_regions[q2]
for r2 in reg2:
if check_overlap(r1, r2, spacer):
overlaps[q1].append(q2)
break
# go through the overlaps and remove the overlapping overlaps
# e.g. if a overlaps b, b overlaps a also. We'll have {a: [a,b], b: [b, a]}
# in the overlaps dict. We want only one of these, so reduce to {a:[a, b]}
overlap_found = True
# place a failsafe counter to avoid unforseen infinite loops
exit_counter = 0
while (overlap_found and (exit_counter < 10000)):
overlap_found = False
for o in list(overlaps.keys()):
# check if o is still in the overlaps and has not been removed
if o in overlaps:
val = overlaps[o]
# get the overlapping regions for "val" and add them
# to overlapping regions for "o", then remove "val"
for v in val:
if (v in overlaps) and (o in overlaps) and (o != v):
overlaps[o].extend(overlaps[v])
overlaps.pop(v)
overlap_found = True
if exit_counter > 9999:
print("Overlap removal while loop limit is reached.")
# clean up overlapping region lists by removing duplicates.
for o in list(overlaps.keys()):
overlaps[o] = sorted(list(set(overlaps[o])))
##########################################################################
# create a new dictionary for target regions.
# for each target group in overlaps, we'll have genomic coordinates
# that will be used as final targets.
##########################################################################
# group regions according to their chromosomes
separated_regions = {}
for o in overlaps:
sep = separated_regions[o] = {}
for g in overlaps[o]:
regs = aligned_regions[g]
for r in regs:
try:
sep[r[0]].append(r[1:])
except KeyError:
sep[r[0]] = [r[1:]]
# merge each overlapping region
separated_merged_regions = {}
for s in separated_regions:
merged_sep = separated_merged_regions[s] = {}
for chrom in separated_regions[s]:
merged_region = merge_overlap(separated_regions[s][chrom])
merged_sep[chrom] = merged_region
###########################################
# organize target regions, assign region names based on the original
# target names. Assign a reference target.
###########################################
# sort target regions based on the length of
# chromosome name and the length of region. Sort is based on the region
# size and chromosome name is used as a tie-breaker
# to distinguish alternate contigs and not use them as reference, but
# it is not absolutely necessary and it would not behave as expected
# when chromosome names do not follow that convention, i.e, chr6 and
# chr6_altXYZ.
for ar in list(aligned_regions.keys()):
regs = aligned_regions[ar]
for r in regs:
r.append(0 - len(r[0]))
r.append(r[2] - r[1] + 1)
aligned_regions[ar] = sorted(regs, key=itemgetter(4, 3),
reverse=True)
target_regions = {}
region_names = {}
regions = separated_merged_regions
for r in regions:
target_regions[r] = []
for chrom in regions[r]:
for l in regions[r][chrom]:
temp_region = [chrom]
temp_region.extend(l)
temp_region.append(-len(chrom))
temp_region.append(l[1] - l[0])
target_regions[r].append(temp_region)
# sort target regions per target group based on the length of
# chromosome name and the length of region. Chromosome name is used
# to distinguish alternate contigs and not use them as reference, but
# it is not absolutely necessary and it would not behave as expected
# when chromosome names do not follow that convention, i.e, chr6 and
# chr6_altXYZ
target_regions[r] = sorted(target_regions[r], key=itemgetter(4, 3),
reverse=True)
# assign names to grouped targets
reg_names = []
# for each region we go back to individual region alignments and see
# if the individual alignment overlaps with this region. If it does
# we use the individual regions name for this region within the group.
for i in range(len(target_regions[r])):
reg = target_regions[r][i]
reg_chrom = reg[0]
reg_begin = reg[1]
reg_end = reg[2]
for c in aligned_regions:
main_region = aligned_regions[c][0]
if (reg_chrom == main_region[0]
and reg_begin <= main_region[1]
and reg_end >= main_region[2]):
reg_names.append(c)
break
else:
reg_names.append("na")
# assign a reference region for each group based on gene names provided
# this is mainly to used to have better names for regions. For example,
# if a gene is a target as well as a snp, we would like the gene name
# to be the name of the group as opposed to the SNP's name.
ref_found = False
for g in gene_names:
if g in reg_names:
ref_found = True
ref_index = reg_names.index(g)
ref_name = g
break
if not ref_found:
ref_name = r
ref_index = 0
ref_region = target_regions[r].pop(ref_index)
reg_names.pop(ref_index)
target_regions[r] = [ref_region] + target_regions[r]
reg_names = [ref_name] + reg_names
region_names[ref_name] = reg_names
target_regions[reg_names[0]] = target_regions.pop(r)
overlaps[reg_names[0]] = overlaps.pop(r)
# after the alignments are done, some regions will not have proper names
# and some will have "na". We'll change those to avoid repeating
# names.
for r in list(region_names.keys()):
rnames = region_names[r]
nnames = []
rn_counts = {}
for rn in rnames:
rnc = rnames.count(rn)
rn_counts[rn] = {"total_count": rnc,
"used_count": 0}
for rn in rnames:
if rn_counts[rn]["total_count"] > 1:
nnames.append(rn + "-" + str(rn_counts[rn]["used_count"]))
rn_counts[rn]["used_count"] += 1
else:
nnames.append(rn)
region_names[r] = nnames
# find target regions that could not be perfectly aligned to the genome
# these are usually extragenomic sequences supplied in fasa files, such as
# certain TCR haplotypes.
imperfect_aligners = []
for r in alignment_dict:
best_score = 0
alignments = alignment_dict[r]
for a in alignments:
cov = int(a["covPct"].split(".")[0])
idt = int(a["idPct"].split(".")[0])
score = cov * idt
if score > best_score:
best_score = score
if best_score != 10000:
imperfect_aligners.append(r)
return [target_regions, region_names, imperfect_aligners, aligned_regions,
overlaps]
def set_intra_alignment_options(target_regions, identity, coverage,
max_allowed_indel_size):
"""Set lastZ alignment options for intraparalog_aligner function."""
alignment_options_dict = {}
for t in target_regions:
temp_dict = {"gene_name": t, "identity": identity}
reference_len = target_regions[t][0][-1]
small_target = 0
for r in target_regions[t]:
if r[-1] < coverage:
small_target += 1
try:
smallest_target = min([smallest_target, r[-1]])
except NameError:
smallest_target = int(r[-1])
if small_target > 0:
print(("{} targets within {} are smaller than intra_coverage"
" value. This means that those targets will not be aligned."
" Smallest target's length was {}. Set intra_coverage"
" to a value smaller than this value to align all regions."
).format(small_target, t, smallest_target))
cover = round(coverage * 100 / reference_len, 1)
gap_open_penalty = 400
gap_extend_penalty = 30
ydrop = max_allowed_indel_size * gap_extend_penalty + gap_open_penalty
alignment_opts = ["--ydrop=" + str(ydrop), "--notransition",
"--ambiguous=iupac", "--noytrim"]
temp_dict["options"] = alignment_opts
if cover > 100:
cover = 100
temp_dict["coverage"] = cover
alignment_options_dict[t] = temp_dict
return alignment_options_dict
def intraparalog_aligner(resource_dir,
target_regions,
region_names,
imperfect_aligners,
fasta_sequences,
species,
num_process,
alignment_options_dict={}):
"""Align all regions within a target group.
Align all regions within a target group to the region selected
as the reference region.
Returns
-------
Returns nothing. It creates query.fa target.fa and .aligned files for each
target region group. These alignment have no genomic coordinates, so
all coordinates are relative to the given sequence. Also, the region names
are indicated as the reference gene name + copy name as this is originally
intended for use in paralog genes.
"""
alignment_commands = []
out_fields = "name1,strand1,zstart1,end1,length1,name2,strand2,zstart2,"
out_fields = out_fields + "end2,zstart2+,end2+,length2,identity,coverage"
gen_out = "general:" + out_fields
diff_out = "differences"
for t in target_regions:
alignment_options = alignment_options_dict[t]["options"]
identity = alignment_options_dict[t]["identity"]
coverage = alignment_options_dict[t]["coverage"]
tar_regs = target_regions[t]
# create a fasta file for the reference copy (or reference region)
target_keys = [tr[0] + ":" + str(tr[1] + 1)
+ "-" + str(tr[2]) for tr in tar_regs]
query_key = target_keys[0]
with open(os.path.join(resource_dir, t + ".query.fa"), "w") as outfile:
outfile.write(">" + t + "_ref\n")
outfile.write(get_sequence(query_key, species))
# create a fasta file that includes all target regions within a group.
with open(os.path.join(
resource_dir, t + ".targets.fa"), "w") as outfile:
outfile_list = []
for i in range(len(target_keys)):
k = target_keys[i]
cname = "_C" + str(i)
outfile_list.append(">" + t + cname)
outfile_list.append(get_sequence(k, species))
# add extragenomic (i.e. imperfect_aligners)
ols = region_names[t]
o_count = 0
for o in ols:
if o in imperfect_aligners:
outfile_list.append(">" + t + "_X" + str(o_count))
outfile_list.append(fasta_sequences[o])
o_count += 1
outfile.write("\n".join(outfile_list))
comm = [t + ".query.fa", resource_dir, t + ".aligned",
os.path.join(resource_dir, t + ".targets.fa"),
["multiple", "unmask", "nameparse=darkspace"],
["unmask", "nameparse=darkspace"],
identity, coverage, gen_out,
alignment_options, species]
alignment_commands.append(comm)
comm = [t + ".query.fa", resource_dir,
t + ".differences",
os.path.join(resource_dir, t + ".targets.fa"),
["multiple", "unmask", "nameparse=darkspace"],
["unmask", "nameparse=darkspace"],
identity, coverage,
diff_out, alignment_options, species]
alignment_commands.append(comm)
return align_region_multi(alignment_commands, num_process)
def intra_alignment_checker(family_name, res_dir, target_regions,
region_names):
"""
Parse intraparalog_aligner results.
Following a within group alignment, check if any individual region
within the group has multiple aligned parts. If found, split that region
into multiple regions to be re-aligned by intraparalog_aligner.
"""
alignment_file = family_name + ".aligned"
new_regions = {}
with open(os.path.join(res_dir, alignment_file), "r") as alignment:
for line in alignment:
# extract the column names from the first line
if line.startswith("#"):
newline = line.strip().split("\t")
newline[0] = newline[0][1:]
colnames = list(newline)
# assign values of each column for each alignment
else:
newline = line.strip().split("\t")
temp_dict = {}
for i in range(len(colnames)):
temp_dict[colnames[i]] = newline[i]
alignment_id = temp_dict["name1"]
ci = alignment_id.split("_")[-1]
ct = ci[0]
if ct == "C":
cn = int(ci[1:])
tr = target_regions[cn]
start = tr[1] + int(temp_dict["zstart1"])
end = tr[1] + int(temp_dict["end1"])
size = end - start + 1
try:
new_regions[cn].append([tr[0], start, end,
0 - len(tr[0]), size])
except KeyError:
new_regions[cn] = [[tr[0], start, end,
0 - len(tr[0]), size]]
# check if any paralog is missing after aligning to the reference copy
targeted_copies = list(range(len(target_regions)))
missing_copies = set(targeted_copies).difference(new_regions.keys())
if len(missing_copies) > 0:
print(("Paralog copies {} were not successfully aligned to "
"the reference copy for the target {}. You may consider "
"relaxing the alignment filters '--local-coverage' "
"and '--local-identity'").format(
", ".join(map(str, sorted(missing_copies))), family_name))
ret_regions = []
rnames = []
for ci in sorted(new_regions):
ret_regions.extend(sorted(new_regions[ci]))
if len(new_regions[ci]) > 1:
print(("Paralog copy {} for target region {} was aligned "
"to the reference copy multiple times. This copy will "
"be treated as multiple independent paralog copies and "
"realigned to the reference copy as separate "
"targets.").format(ci, family_name))
for i in range(len(new_regions[ci])):
rnames.append(region_names[ci] + "-" + str(i))
else:
rnames.append(region_names[ci])
return [ret_regions, rnames]
def align_paralogs(res_dir, target_regions, region_names, imperfect_aligners,
fasta_sequences, species, identity, coverage,
max_allowed_indel_size, num_process):
alignment_options = set_intra_alignment_options(
target_regions, identity, coverage, max_allowed_indel_size)
intraparalog_aligner(res_dir, target_regions, region_names,
imperfect_aligners, fasta_sequences, species,
num_process, alignment_options)
for r in target_regions.keys():
ntr = intra_alignment_checker(r, res_dir, target_regions[r],
region_names[r])
target_regions[r] = ntr[0]
region_names[r] = ntr[1]
alignment_options = set_intra_alignment_options(
target_regions, identity, coverage, max_allowed_indel_size)
intraparalog_aligner(res_dir, target_regions, region_names,
imperfect_aligners, fasta_sequences, species,
num_process, alignment_options)
def get_missed_targets(original_target_regions, target_regions,
aligned_regions, min_target_size, flank, capture_types):
org_chroms = {}
new_chroms = {}
for o in original_target_regions:
org_regs = original_target_regions[o]
for org in org_regs:
try:
org_chroms[org[0]].append(org[1:3])
except KeyError:
org_chroms[org[0]] = [org[1:3]]
new_regs = target_regions[o]
for nrg in new_regs:
try:
new_chroms[nrg[0]].append(nrg[1:3])
except KeyError:
new_chroms[nrg[0]] = [nrg[1:3]]
uncovered_chroms = {}
for chrom in org_chroms:
try:
uncov = subtract_overlap(org_chroms[chrom], new_chroms[chrom])
if len(uncov) > 0:
uncovered_chroms[chrom] = uncov
except KeyError:
uncovered_chroms[chrom] = org_chroms[chrom]
not_aligned_coordinates = {}
for ar in aligned_regions:
main_region = aligned_regions[ar][0]
extra_count = 0
for uc in uncovered_chroms:
unc_regs = uncovered_chroms[uc]
for ur in unc_regs:
if len(overlap(main_region[1:3], ur)) > 0:
not_aligned_coordinates[
ar + "-extra-" + str(extra_count)
] = {"chrom": uc,
"begin": ur[0],
"end": ur[1]}
missed_target_regions, missed_target_names = merge_coordinates(
not_aligned_coordinates, flank)
for t in list(missed_target_regions.keys()):
target_size = (missed_target_regions[t][-1]
- missed_target_regions[t][-2] + 1)
if target_size < min_target_size:
missed_target_regions.pop(t)
missed_target_names.pop(t)
missed_capt_types = {}
for t in missed_target_names:
try:
missed_capt_types[t] = capture_types[t.split("extra")[0][:-1]]
except KeyError:
print(("Capture type not found for {}."
" Setting capture type to 'whole'").format(t))
missed_capt_types[t] = "whole"
return [missed_target_regions, missed_target_names, missed_capt_types]
def align_targets(res_dir, target_regions, species, flank, fasta_files,
fasta_capture_type, genome_identity, genome_coverage,
num_process, gene_names, max_allowed_indel_size,
intra_identity, intra_coverage, capture_types,
min_target_size, merge_distance, savefile):
# create fasta files for each target coordinate
create_target_fastas(res_dir, target_regions, species, flank)
if fasta_files is None:
fasta_sequences = fasta_capture_types = {}
else:
# add target sequences provided by fasta files
fasta_targets = add_fasta_targets(
res_dir, fasta_files, fasta_capture_type=fasta_capture_type)
fasta_sequences = fasta_targets["fasta_sequences"]
fasta_capture_types = fasta_targets["capture_types"]
capture_types.update(fasta_capture_types)
# create a list of target names from all sources
targets_list = (list(target_regions.keys())
+ list(fasta_sequences.keys()))
# align target sequences to reference genome
# create alignment options
genomic_alignment_list = set_genomic_target_alignment_options(
target_regions, fasta_sequences, genome_identity, genome_coverage,
flank)
# perform genome alignment
align_genes_for_design(genomic_alignment_list, res_dir,
alignment_types="general", species=species,
num_processor=num_process)
# merge all alignment files
merge_alignments(res_dir, targets_list, output_prefix="merged")
# parse genome alignment file
# negative merge_distance values keep the target regions separate
# even if they overlap. Positive values lead to merging targets.
# However, the alignments are already carried out with flanking
# sequence so increasing that merge distance is avoided by setting the
# merge_distance 0 here for positive values.
if merge_distance > 0:
merge_distance = 0
genome_alignment = alignment_parser(res_dir, "merged",
spacer=merge_distance,
gene_names=gene_names)
target_regions = copy.deepcopy(genome_alignment[0])
region_names = copy.deepcopy(genome_alignment[1])
imperfect_aligners = copy.deepcopy(genome_alignment[2])
aligned_regions = copy.deepcopy(genome_alignment[3])
overlaps = copy.deepcopy(genome_alignment[4])
# align sequences within target groups (paralog sequences)
align_paralogs(res_dir, target_regions, region_names, imperfect_aligners,
fasta_sequences, species, intra_identity, intra_coverage,
max_allowed_indel_size, num_process)
# compare original target_regions to the final target regions
# to determine if any region is missing due to alignments performed
original_target_regions = genome_alignment[0]
missed_target_regions, missed_target_names, missed_capture_types = (
get_missed_targets(original_target_regions, target_regions,
aligned_regions, min_target_size, flank,
capture_types))
out_dict = {"original_target_regions": genome_alignment[0],
"original_region_names": genome_alignment[1],
"original_imperfect_aligners": genome_alignment[2],
"original_aligned_regions": genome_alignment[3],
"original_overlaps": genome_alignment[4],
"target_regions": target_regions,
"region_names": region_names,
"aligned_regions": aligned_regions,
"capture_types": capture_types,
"imperfect_aligners": imperfect_aligners,
"overlaps": overlaps,
"missed_target_regions": missed_target_regions,
"missed_target_names": missed_target_names,
"missed_capture_types": missed_capture_types}
with open(os.path.join(res_dir, savefile), "w") as outfile:
json.dump(out_dict, outfile, indent=1)
return out_dict
def alignment_mapper(family_name, res_dir):
"""Create a coordinate map of within group alignments."""
alignment_file = family_name + ".aligned"
difference_file = family_name + ".differences"
with open(os.path.join(res_dir, alignment_file), "r") as alignment, open(
os.path.join(res_dir, difference_file), "r") as difference:
# create an alignment dictionary for each region that a query
# aligns to these correspond to each line in the alignment file
# and thus, are relative coordinates.
alignment_dic = {}
for line in alignment:
# extract the column names from the first line
if line.startswith("#"):
newline = line.strip().split("\t")
newline[0] = newline[0][1:]
colnames = list(newline)
# assign values of each column for each alignment
else:
newline = line.strip().split("\t")
temp_dict = {"differences": []}
for i in range(len(colnames)):
temp_dict[colnames[i]] = newline[i]
alignment_id = temp_dict["name1"]
if alignment_id in alignment_dic:
print(("{} aligned to the reference copy multiple times. "
"Only the first alignment will be used for "
"coordinate mapping.").format(alignment_id))
continue
alignment_dic[alignment_id] = temp_dict
cov = float(alignment_dic[alignment_id]["covPct"][:-1])
idt = float(alignment_dic[alignment_id]["idPct"][:-1])
alignment_dic[alignment_id]["score"] = np.mean([idt, cov])
# differences file is a continuous file for all alignments
# extract differences for each alignment
for line in difference:
newline = line.strip().split("\t")
dname = newline[0]
alignment_dic[dname]["differences"].append(newline[:-2])
# map each position in each alignment to the query
for a in alignment_dic:
snps = alignment_dic[a]["snps"] = {}
co = alignment_dic[a]["coordinates"] = {}
rev_co = alignment_dic[a]["reverse_coordinates"] = {}
# if alignment on reverse strand
if alignment_dic[a]["strand2"] == "-":
# genomic coordinate of target start
# this position is zstart2+ away from query end
# (when it is a - alignment)
al_start = int(alignment_dic[a]["zstart1"])
query_plus_end = int(alignment_dic[a]["end2+"])
# assign start to the first key of the coord dictionary
first_key = query_plus_end - 1
co[first_key] = al_start
rev_co[al_start] = first_key
last_key = first_key
inserted = 0
for d in alignment_dic[a]["differences"]:
# start/end coordinates of diff relative to the query
diff_start = int(d[6])
diff_end = int(d[7])
query_length = int(d[9])
# for each diff, fill in the coordinates
# between the last_key in the coord dic and
# start_key - diff start
for j in range(last_key - 1, query_length
- diff_start - 1, -1):
# j decreases by one, starting from the last
# available key the value will be 1 more than the
# previous key (j+1)
if j == last_key - 1:
co[j] = round(co[j + 1] - 0.1) + 1 + inserted
else:
co[j] = round(co[j + 1] - 0.1) + 1
rev_co[co[j]] = j
# current last key is now first_key - diff_start
last_key = query_length - diff_start - 1
query_diff_end = last_key + 1
# genomic coordinate of target at diff start
tar_start = int(d[1])
# genomic coordinate of target at diff end
tar_end = int(d[2])
# if end and start are the same, there is a deletion
# in target compared to query
# all nucleotides from diff start to diff end will have
# the same coordinate
if tar_start == tar_end:
inserted = 0
for i in range(diff_end - diff_start):
co[last_key - i] = tar_start - 0.5
last_key -= diff_end - diff_start - 1
# in cases of deletion in query, only rev_co will be
# updated
elif diff_start == diff_end:
inserted = 0
for i in range(tar_end - tar_start):
rev_co[co[last_key + 1] + i + 1] = (
last_key + 0.5)
inserted += 1
last_key += 1
# last_key will be mapped to target start
# if there is only a SNP and no indel
else:
inserted = 0
co[last_key] = tar_start
rev_co[tar_start] = last_key
query_diff_start = last_key
diff_key = str(query_diff_start) + "-" + str(
query_diff_end)
snps[diff_key] = {"chrom": d[0],
"target_begin": int(d[1]),
"target_end": int(d[2]),
"target_orientation": d[3],
"query_start": diff_start,
"query_end": diff_end,
"query_orientation": d[8],
"target_base": d[10],
"query_base": d[11]}
# fill in the coordinates between last diff
# and the alignment end
query_plus_start = int(alignment_dic[a]["zstart2+"])
for k in range(last_key - 1, query_plus_start - 1, -1):
co[k] = round(co[k+1] - 0.1) + 1
rev_co[co[k]] = k
# when the alignment is on the forward strand
else:
# where on target sequence the alignment starts
tar_start = int(alignment_dic[a]["zstart1"])
# where in the query sequence the alinment starts
q_start = int(alignment_dic[a]["zstart2"])
co[q_start] = tar_start
rev_co[tar_start] = q_start
# last key used is q_start, last key is updated each time
# something is added to the coordinate dict.
last_key = first_key = q_start
inserted = 0
for d in alignment_dic[a]["differences"]:
# where on query sequence the difference starts and
# ends
diff_start = int(d[6])
diff_end = int(d[7])
diff_key = d[6] + "-" + d[7]
query_length = d[9]
snps[diff_key] = {"chrom": d[0],
"target_begin": int(d[1]),
"target_end": int(d[2]),
"target_orientation": d[3],
"query_start": diff_start,
"query_end": diff_end,
"query_orientation": d[8],
"target_base": d[10],
"query_base": d[11]}
# from the last key to the diff start the query and
# target sequences are the same in length and co dict
# is filled so
for i in range(last_key + 1, diff_start):
if i == last_key + 1:
co[i] = round(co[i-1] - 0.1) + 1 + inserted
inserted = 0
else:
co[i] = round(co[i-1] - 0.1) + 1
rev_co[co[i]] = i
# update last used key in co dict
last_key = diff_start
# genomic coordinate of target at diff start
tar_start = int(d[1])
# genomic coordinate of target at diff end
tar_end = int(d[2])
# if end and start are the same, there is a deletion
# in target compared to query
# all nucleotides from diff start to diff end will have
# the same coordinate
if tar_start == tar_end:
inserted = 0
for i in range(diff_end - diff_start):
co[last_key + i] = tar_start - 0.5
last_key += diff_end - diff_start - 1
# in cases of deletion in query (insertion in target)
# position will be mapped to the target end coordinate
elif diff_start == diff_end:
inserted = 0
for i in range(tar_end - tar_start):
rev_co[co[last_key - 1] + 1 + i] = (
last_key - 0.5)
inserted += 1
last_key -= 1
# if there is no indel
# last_key will be mapped to target start
else:
inserted = 0
co[last_key] = tar_start
rev_co[tar_start] = last_key
# fill in the coordinates between last diff
# and the alignment end
q_end = int(alignment_dic[a]["end2"])
for k in range(last_key + 1, q_end):
co[k] = round(co[k-1] - 0.1) + 1
rev_co[co[k]] = k
return alignment_dic
###############################################################
# Design related functions
###############################################################
def order_mips(mip_info, design_name, res_dir):
mip_sequences = []
for g in sorted(mip_info):
for m in sorted(mip_info[g]["mips"]):
minfo = mip_info[g]["mips"][m]["mip_dic"]["mip_information"]
for c in minfo:
s = minfo[c]["SEQUENCE"]
n = m + "_" + c
num = int(m.split("_")[-1][3:])
mip_sequences.append([n, s, g, num, m, c])
if len(mip_info[g]["mips"]) == 0:
mip_info.pop(g)
mip_sequences = sorted(mip_sequences, key=itemgetter(2, 3))
print("%d probes will be ordered." % len(mip_sequences))
# Check for probes that have the same sequence
sequence_only = [i[1].upper() for i in mip_sequences]
for s in sequence_only:
if sequence_only.count(s) > 1:
print("At least two probes share the sequence %s" % s)
rows = ["A", "B", "C", "D", "E", "F", "G", "H"]
columns = list(range(1, 13))
for i in range(len(mip_sequences)):
m = mip_sequences[i]
plate = i/96
pl_pos = i % 96
col = columns[pl_pos % 12]
row = rows[pl_pos/12]
m.extend([row, col, plate])
for i in range(len(mip_sequences)):
m = mip_sequences[i]
s = list(m[1])
N_found = False
for j in s:
if s[j] == "N":
if N_found:
s[j] == "(N)"
else:
N_found = True
s[j] == "(N:25252525)"
m.append("".join(s))
order_dict = {}
for i in range(len(mip_sequences)):
m = mip_sequences[i]
pl = m[-2]
pl_name = design_name + "_" + str(pl)
try:
order_dict[pl_name].append(m)
except KeyError:
order_dict[pl_name] = [m]
for o in order_dict:
with open(os.path.join(res_dir, o), "w") as outfile:
outfile_list = ["\t".join(["WellPosition", "Name", "Sequence"])]
plate_mips = order_dict[o]
for m in plate_mips:
wp = m[-4] + str(m[-3])
outfile_list.append("\t".join([wp, m[0], m[-1]]))
outfile.write("\n".join(outfile_list))
return
def create_dirs(dir_name):
""" create subdirectory names for a given dir,
to be used by os.makedirs, Return a list of
subdirectory names."""
primer3_input_DIR = dir_name + "/primer3_input_files/"
primer3_output_DIR = dir_name + "/primer3_output_files/"
bowtie2_input_DIR = dir_name + "/bowtie2_input/"
bowtie2_output_DIR = dir_name + "/bowtie2_output/"
mfold_input_DIR = dir_name + "/mfold_input/"
mfold_output_DIR = dir_name + "/mfold_output/"
return [primer3_input_DIR, primer3_output_DIR, bowtie2_input_DIR,
bowtie2_output_DIR, mfold_input_DIR, mfold_output_DIR]
def get_snps(region, snp_file):
""" Take a region string and a tabix'ed snp file,
return a list of snps which are lists of
tab delimited information from the snp file. """
# extract snps using tabix, in tab separated lines
snp_temp = subprocess.check_output(["tabix", snp_file, region]).decode(
"UTF-8"
)
# split the lines (each SNP)
snps_split = snp_temp.split("\n")
# add each snp in the region to a list
# as lists of
snps = []
for line in snps_split:
snp = line.split('\t')
snps.append(snp)
# remove last item which is coming from the new line at the end
del snps[-1]
return snps
def get_vcf_snps(region, snp_file):
""" Take a region string and a tabix'ed snp file,
return a list of snps which are lists of
tab delimited information from the snp file. """
# extract snps using tabix, in tab separated lines
snp_temp = subprocess.check_output(["bcftools", "view", "-H", "-G", "-r",
region, snp_file]).decode("UTF-8")
# split the lines (each SNP)
snps_split = snp_temp.split("\n")[:-1]
# add each snp in the region to a list
# as lists of
snps = []
for line in snps_split:
snp = line.split('\t')[:8]
snps.append(snp)
return snps
def get_exons(gene_list):
""" Take a list of transcript information in refgene format and return a
list of exons in the region as [[e1_start, e1_end], [e2_start], [e2_end],
..]. The transcripts must belong to the same gene (i.e. have the same gene
name).Merge overlapping exons.
"""
# get start and end coordinates of exons in gene list
starts = []
ends = []
gene_names = []
gene_ids = []
chrom_list = []
for gene in gene_list:
chrom_list.append(gene[2])
chrom_set = list(set(chrom_list))
if len(chrom_set) == 0:
return {}
chrom_set = [c for c in chrom_set if len(c) < 6]
if len(chrom_set) > 1:
print(("More than one chromosomes, ",
chrom_set,
", has specified gene ",
gene[12]))
return {}
chrom = chrom_set[0]
for gene in gene_list:
if gene[2] == chrom:
starts.extend(list(map(int, gene[9].split(",")[:-1])))
ends.extend(list(map(int, gene[10].split(",")[:-1])))
gene_names.append(gene[12])
gene_ids.append(gene[1])
ori = gene[3]
# pair exon starts and ends
exons = []
for i in range(len(starts)):
exons.append([starts[i], ends[i]])
# check for overlapping exons and merge if any
overlapping = 1
while overlapping:
overlapping = 0
for i in range(len(exons)):
e = exons[i]
for j in range(len(exons)):
x = exons[j]
if (i != j) and ((e[0] <= x[0] <= e[1])
or (e[0] <= x[1] <= e[1])
or (x[0] <= e[0] <= x[1])):
# merge exons and add to the exon list
exons.append([min(e[0], x[0]), max(e[1], x[1])])
# remove the exons e and x
exons.remove(e)
exons.remove(x)
# change overlapping to 1 so we can stop the outer for loop
overlapping = 1
# once an overlapping exon is found, break the for loop
break
if overlapping:
# if an overlapping exon is found, stop this for loop and
# continue with the while loop with the updated exon list
break
# get the gene start and end coordinates
if (len(starts) >= 1) and (len(ends) >= 1):
start = min(starts)
end = max(ends)
else:
print(("No exons found for ", gene_list[0][1]))
return {}
# create an output dict
out = {}
out["chrom"] = chrom
out["begin"] = start + 1
out["end"] = end
out["exons"] = [[e[0] + 1, e[1]] for e in sorted(exons, key=itemgetter(0))]
out["names"] = gene_names
out["ids"] = gene_ids
out["orientation"] = ori
return out
def get_gene_name(region, species):
""" Return the gene(s) in a region. """
gene_names = []
try:
genes = get_snps(region, get_file_locations()[species][
"refgene_tabix"])
for g in genes:
gene_names.append(g[12])
except KeyError:
pass
return gene_names
def get_gene(gene_name, refgene_file, chrom=None, alternative_chr=1):
""" Return genomic coordinates of a gene extracted from the refseq genes file.
Refgene fields are as follows:
0:bin, 1:name, 2:chrom, 3:strand, 4:txStart, 5:txEnd, 6:cdsStart, 7:cdsEnd,
8:exonCount, 9:exonStarts, 10:exonEnds, 11:score, 12:name2,
13:cdsStartStat, 14:cdsEndStat, 15:exonFrames.
Field 12 will be used for name search."""
# all chromosomes must be included if chromosome of the gene is not
# provided therefore, chrom cannot be None when alternative_chr is set to 0
if not (chrom or alternative_chr):
print(("Chromosome of the gene %s must be specified "
"or all chromosomes must be searched."))
print(("Specify a chromosome or set alternative chromosome to 1."
% gene_name))
return 1
with open(refgene_file, 'r') as infile:
coord = []
for line in infile:
if not line.startswith('#'):
newline = line.strip().split('\t')
if newline[12] == gene_name:
coord.append(newline)
if len(coord) < 1:
print(("No gene found with the name ", gene_name))
return []
alter = []
if chrom:
# add each gene to alter dict, in the corresponding chromosome key
for c in coord:
if c[2] == chrom:
alter.append(c)
# find genes on alternate chromosomes if requested
elif alternative_chr:
for c in coord:
alter.append(c)
return alter
def create_gene_fasta(gene_name_list, wdir, species="hs", flank=150,
multi_file=False):
""" Get a list of genes, extract exonic sequence + flanking sequence.
Create fasta files in corresponding directory for each gene if multi_file
is True, create a single fasta file if False.
"""
region_list = []
for gene_name in gene_name_list:
if gene_name.startswith("chr"):
coord = get_coordinates(gene_name)
query = make_region(coord[0], coord[1] - flank, coord[2] + flank)
else:
e = get_exons(
get_gene(gene_name, get_file_locations()[species]["refgene"],
alternative_chr=1)
)
query = e["chrom"] + ":" + str(e["begin"] - flank) + "-" + str(
e["end"] + flank)
region_list.append(query)
regions = get_fasta_list(region_list, species)
fasta_dict = {}
for i in range(len(region_list)):
r = region_list[i]
gene_name = gene_name_list[i]
fasta_dict[gene_name] = regions[r]
if multi_file:
for gene_name in fasta_dict:
save_dict = {gene_name: fasta_dict[gene_name]}
filename = os.path.join(wdir, gene_name + ".fa")
save_fasta_dict(save_dict, filename)
else:
save_fasta_dict(fasta_dict, os.path.join(wdir, "multi.fa"))
def get_region_exons(region, species):
try:
genes = get_snps(region, get_file_locations()[species][
"refgene_tabix"])
except KeyError:
genes = []
return get_exons(genes)
def get_cds(gene_name, species):
gene_list = get_gene(gene_name,
get_file_locations()[species]["refgene"],
alternative_chr=1)
if len(gene_list) > 1:
print(("More than one refgene entry was found for the gene ",
gene_name))
print(("Exons from alternative transcripts will be merged "
"and CDS will be generated from that."))
print("This may lead to unreliable CDS sequence information.")
if len(gene_list) == 0:
return {}
g = gene_list[0]
cds = {"chrom": g[2],
"orientation": g[3],
"begin": int(g[6]) + 1,
"end": int(g[7])}
exons = get_exons(gene_list)["exons"]
exons_nuc = []
for i in range(len(exons)):
e = exons[i]
if not e[0] <= cds["begin"] <= e[1]:
exons[i] == "remove"
else:
e[0] = cds["begin"]
break
exons = [i for i in exons if i != "remove"]
for i in range(-1, -1 * len(exons), -1):
e = exons[i]
if not e[0] <= cds["end"] <= e[1]:
exons[i] = "remove"
else:
e[1] = cds["end"]
break
exons = [i for i in exons if i != "remove"]
sequences = []
for e in exons:
exons_nuc.extend(list(range(e[0], e[1] + 1)))
sequences.append(fasta_to_sequence(
get_fasta(cds["chrom"]
+ ":" + str(e[0]) + "-"
+ str(e[1]), species)))
coord = {}
if cds["orientation"] == "+":
cds["sequence"] = "".join(sequences)
for i in range(len(exons_nuc)):
coord[i] = exons_nuc[i]
else:
cds["sequence"] = reverse_complement("".join(sequences))
rev_exons = list(reversed(exons_nuc))
for i in range(len(exons_nuc)):
coord[i] = rev_exons[i]
cds["coordinates"] = coord
cds
return cds
def make_boulder(fasta, primer3_input_DIR, exclude_list=[],
output_file_name="", sequence_targets=[]):
""" Create a boulder record file in primer3_input_DIR from a given fasta
STRING. SEQUENCE_ID is the fasta header, usually the genomic region
(chrX:m-n) exclude_list is [coordinate,length] of any regions primers
cannot overlap.
"""
# parse fasta string, get header and remove remaining nextlines.
fasta_list = fasta.split("\n")
fasta_head = fasta_list[0][1:]
seq_template = "".join(fasta_list[1:])
# convert exclude list to strings
exclude_string_list = []
exclude_region = ""
for i in exclude_list:
exclude_string_list.append(str(i[0])+","+str(i[1]))
exclude_region = " ".join(exclude_string_list)
# create the boulder record
if len(sequence_targets) == 0:
sequence_target_string = ""
else:
sequence_target_string = " ".join([",".join(map(str, s))
for s in sequence_targets])
boulder = ("SEQUENCE_ID=" + fasta_head + "\n" +
"SEQUENCE_TEMPLATE=" + seq_template + "\n" +
"SEQUENCE_TARGET=" + sequence_target_string + "\n" +
"SEQUENCE_EXCLUDED_REGION=" + exclude_region + "\n" + "=")
if output_file_name == "":
outname = fasta_head
else:
outname = output_file_name
with open(os.path.join(primer3_input_DIR, outname), 'w') as outfile:
outfile.write(boulder)
return boulder
def make_primers_worker(l):
"""
Worker function to make_primers_multi.
A worker function to make primers for multiple regions using separate
processors. Read boulder record in given input directory and creates primer
output files in output directory
"""
# function arguments should be given as a list due to single
# iterable limitation of map_async function of multiprocessor.Pool
# input boulder record name
input_file = l[0]
# primer settings used
settings = l[1]
# output file name
output_file = l[2]
# locations of input/output dirs
primer3_input_DIR = l[3]
primer3_output_DIR = l[4]
primer3_settings_DIR = l[5]
subregion_name = l[6]
paralog_name = l[7]
primer_type = l[8]
input_file = os.path.join(primer3_input_DIR, input_file)
output_file = os.path.join(primer3_output_DIR, output_file)
settings = os.path.join(primer3_settings_DIR, settings)
# call primer3 program using the input and settings file
res = subprocess.run(["primer3_core",
"-p3_settings_file=" + settings, input_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.returncode != 0:
print(("Primer design for the gene {} subregion {} {} arm failed "
"with error {}").format(paralog_name, subregion_name,
primer_type, res.stderr))
return
else:
primer3_output = res.stdout
# write boulder record to file.
with open(output_file, 'w') as outfile:
outfile.write(primer3_output.decode("UTF-8"))
return
def make_primers_multi(ext_list, lig_list, pro):
"""Design primers in parallel using the make_primers_worker function."""
# create a pool of twice the number of targets (for extension and ligation)
# p = Pool(2*pro)
p = Pool(pro)
# make extension primers using extension arm primer settings
p.map_async(make_primers_worker, ext_list)
# make ligation primers using ligation arm primer settings
p.map_async(make_primers_worker, lig_list)
# close pool
p.close()
# wait for processes to finish
p.join()
return
def primer_parser3(input_file, primer3_output_DIR, bowtie2_input_DIR,
parse_out, fasta=1, outp=1):
"""
Parse a primer3 output file and generate a primer fasta file.
The fasta file for the primers that only contains primer names and
sequences will be placed in the bowtie input directory to be
used as bowtie2 input.
Return a dictionary {sequence_information:{}, primer_information{}}
first dict has tag:value pairs for input sequence while second dict
has as many dicts as the primer number returned with primer name keys
and dicts as values {"SEQUENCE": "AGC..", "TM":"58"...}. Also write
this dictionary to a json file in primer3_output_DIR.
"""
primer_dic = {}
# all target sequence related information will be placed in
# sequence_information dictionary.
primer_dic["sequence_information"] = {}
# primer information will be kept in primer_information dicts.
primer_dic["primer_information"] = {}
# load the whole input file into a list.
infile = open(primer3_output_DIR + input_file, 'r')
lines = []
for line in infile:
# if a line starts with "=" that line is a record separator
if not line.startswith("="):
# boulder record tag-value pairs separated by "="
inline = line.strip('\n').split('=')
lines.append(inline)
infile.close()
# find sequence related information and add it to appropriate dic.
for pair in lines:
tag = pair[0]
value = pair[1]
if tag.startswith("SEQUENCE"):
if tag == "SEQUENCE_ID":
new_value = value.split(",")[-1].replace("CHR", "chr")
primer_dic["sequence_information"][tag] = new_value
else:
primer_dic["sequence_information"][tag] = value
# find how many left primers returned and create empty dictionary
# for each primer in primer_information dict.
for pair in lines:
tag = pair[0]
value = pair[1]
if tag == "PRIMER_LEFT_NUM_RETURNED":
# Add this to sequence information dic because it is sequence
# specific information
primer_dic["sequence_information"][
"SEQUENCE_LEFT_NUM_RETURNED"] = value
# create empty dictionaries with primer name keys
for i in range(int(value)):
primer_key = "PRIMER_LEFT_" + str(i)
primer_dic["primer_information"][primer_key] = {}
# do the same for right primers found
for pair in lines:
tag = pair[0]
value = pair[1]
if tag == "PRIMER_RIGHT_NUM_RETURNED":
primer_dic["sequence_information"][
"SEQUENCE_RIGHT_NUM_RETURNED"] = value
for i in range(int(value)):
primer_key = "PRIMER_RIGHT_" + str(i)
primer_dic["primer_information"][primer_key] = {}
# get sequence coordinate information to determine genomic coordinates of
# primers because primer information is relative to template sequence
sequence_coordinates = get_coordinates(primer_dic[
"sequence_information"]["SEQUENCE_ID"])
seq_chr = sequence_coordinates[0]
seq_start = int(sequence_coordinates[1])
# get primer information from input file and add to primer dictionary
for pair in lines:
tag = pair[0]
value = pair[1]
if ((tag.startswith("PRIMER_LEFT_")
or tag.startswith("PRIMER_RIGHT_"))
and (tag != "PRIMER_LEFT_NUM_RETURNED")
and (tag != "PRIMER_RIGHT_NUM_RETURNED")):
attributes = tag.split('_')
# primer coordinates tag does not include an attribute value
# it is only primer name = coordinates, so:
if len(attributes) > 3:
# then this attribute is not coordinates and should have an
# attribute value such as TM or HAIRPIN etc.
primer_name = '_'.join(attributes[0:3])
attribute_value = '_'.join(attributes[3:])
primer_dic["primer_information"][primer_name][
attribute_value] = value
else:
# then this attribute is coordinates and has no attribute value
# give it an attribute valute "COORDINATES"
primer_name = '_'.join(attributes[0:3])
primer_dic["primer_information"][primer_name][
'COORDINATES'] = value
# the coordinates are relative to sequence template
# find the genomic coordinates
coordinate_values = value.split(",")
if tag.startswith("PRIMER_LEFT"):
# sequence start is added to primer start to get genomic
# primer start
genomic_start = seq_start + int(coordinate_values[0])
# primer len is added "to genomic start because it is a
# left primer
genomic_end = genomic_start + int(coordinate_values[1]) - 1
primer_dic["primer_information"][primer_name][
'GENOMIC_START'] = genomic_start
primer_dic["primer_information"][primer_name][
'GENOMIC_END'] = genomic_end
primer_dic["primer_information"][primer_name][
'CHR'] = seq_chr
primer_dic["primer_information"][primer_name][
'ORI'] = "forward"
else:
# sequence start is added to primer start to get genomic
# primer start
genomic_start = seq_start + int(coordinate_values[0])
# primer len is subtracted from genomic start because it is
# a right primer
genomic_end = genomic_start - int(coordinate_values[1]) + 1
primer_dic["primer_information"][primer_name][
'GENOMIC_START'] = genomic_start
primer_dic["primer_information"][primer_name][
'GENOMIC_END'] = genomic_end
primer_dic["primer_information"][primer_name][
'CHR'] = seq_chr
primer_dic["primer_information"][primer_name][
'ORI'] = "reverse"
# add NAME as a key to primer information dictionary
primer_dic["primer_information"][primer_name]['NAME'] = primer_name
# if some primers were eliminated from initial primer3 output, remove from
# dictionary
for primer in list(primer_dic["primer_information"].keys()):
if primer_dic["primer_information"][primer] == {}:
primer_dic["primer_information"].pop(primer)
# dump the dictionary to json file in primer3_output_DIR if outp parameter
# is true
if outp:
dict_file = open(os.path.join(primer3_output_DIR, parse_out), 'w')
json.dump(primer_dic, dict_file, indent=1)
dict_file.close()
# generate a simple fasta file with primer names
if fasta:
outfile = open(bowtie2_input_DIR+parse_out, 'w')
for primer in primer_dic["primer_information"]:
# primer name is fasta header and sequence is fasta sequence
fasta_head = primer
fasta_line = primer_dic["primer_information"][primer]["SEQUENCE"]
outfile.write(">" + fasta_head + "\n" + fasta_line + "\n")
outfile.close()
return primer_dic
def paralog_primers(primer_dict, copies, coordinate_converter, settings,
primer3_output_DIR, outname, species, outp=0):
"""
Process primers generated for paralogs.
Take a primer dictionary file and add genomic start and end coordinates
of all its paralogs.
"""
# uncomment for using json object instead of dic
# load the primers dictionary from file
# with open(primer_file, "r") as infile:
# primer_dic = json.load(infile)
# primer dict consists of 2 parts, sequence_information dict
# and primer information dict. We wont'change the sequence_info part
primers = primer_dict["primer_information"]
primer_keys = set()
for primer in list(primers.keys()):
p_name = primer
p_dic = primers[primer]
p_coord = coordinate_converter
p_copies = copies
chroms = p_coord["C0"]["chromosomes"]
start = p_dic["GENOMIC_START"]
end = p_dic["GENOMIC_END"]
ref_coord = p_dic["COORDINATES"]
primer_ori = p_dic["ORI"]
p_dic["PARALOG_COORDINATES"] = {}
primer_seq = p_dic["SEQUENCE"]
# add reference copy as paralog
p_dic["PARALOG_COORDINATES"]["C0"] = {"SEQUENCE": primer_seq,
"ORI": primer_ori,
"CHR": chroms["C0"],
"NAME": p_name,
"GENOMIC_START": start,
"GENOMIC_END": end,
"COORDINATES": ref_coord}
for c in p_copies:
if c != "C0":
# check if both ends of the primer has aligned with reference
try:
para_start = p_coord["C0"][c][start]
para_end = p_coord["C0"][c][end]
except KeyError:
# do not add that copy if it is not aligned
continue
para_primer_ori = para_start < para_end
if para_primer_ori:
para_primer_key = (chroms[c] + ":" + str(para_start) + "-"
+ str(para_end))
p_dic["PARALOG_COORDINATES"][c] = {
"ORI": "forward", "CHR": chroms[c], "NAME": p_name,
"GENOMIC_START": para_start, "GENOMIC_END": para_end,
"COORDINATES": ref_coord, "KEY": para_primer_key}
primer_keys.add(para_primer_key)
else:
para_primer_key = chroms[c] + ":" + str(
para_end) + "-" + str(para_start)
p_dic["PARALOG_COORDINATES"][c] = {
"ORI": "reverse", "CHR": chroms[c], "NAME": p_name,
"GENOMIC_START": para_start, "GENOMIC_END": para_end,
"COORDINATES": ref_coord, "KEY": para_primer_key}
primer_keys.add(para_primer_key)
if len(primer_keys) > 0:
primer_sequences = get_fasta_list(primer_keys, species)
for p in primers:
para = primers[p]["PARALOG_COORDINATES"]
for c in para:
if c != "C0":
copy_dict = para[c]
p_ori = copy_dict["ORI"]
p_key = copy_dict["KEY"]
p_seq = primer_sequences[p_key]
if p_ori == "reverse":
p_seq = reverse_complement(p_seq)
copy_dict["SEQUENCE"] = primer_sequences[p_key]
if outp:
with open(os.path.join(primer3_output_DIR, outname), "w") as outf:
json.dump(primer_dict, outf, indent=1)
return primer_dict
def bowtie2_run(fasta_file, output_file, bowtie2_input_DIR,
bowtie2_output_DIR, species, process_num=4,
seed_MM=1, mode="-a", seed_len=18, gbar=1, local=0):
"""Align primers from a fasta file to specified species genome."""
file_locations = get_file_locations()
# check if entered species is supported
genome = file_locations[species]["bowtie2_genome"]
# determine what type of alignment is wanted
# local or end-to-end
if local:
check_local = "--local"
else:
check_local = "--end-to-end"
res = subprocess.Popen(["bowtie2", "-p", str(process_num), "-D", "20",
"-R", "3", "-N", str(seed_MM), "-L",
str(seed_len), "-i", "S,1,0.5", "--gbar",
str(gbar), mode, check_local, "-x", genome, "-f",
os.path.join(bowtie2_input_DIR, fasta_file), "-S",
os.path.join(bowtie2_output_DIR, output_file)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
log_file = os.path.join(
bowtie2_output_DIR, "log_" + species + "_" + id_generator(6))
with open(log_file, "wb") as outfile:
outfile.write(res.communicate()[1])
return 0
def bowtie(fasta_file, output_file, bowtie2_input_DIR, bowtie2_output_DIR,
options, species, process_num=4, mode="-a", local=0, fastq=0):
"""Align a fasta or fastq file to a genome using bowtie2."""
file_locations = get_file_locations()
# check if entered species is supported
genome = file_locations[species]["bowtie2_genome"]
# determine what type of alignment is wanted
# local or end-to-end
if local:
check_local = "--local"
else:
check_local = "--end-to-end"
com = ["bowtie2", "-p " + str(process_num)]
com.extend(options)
com.append(mode)
com.append(check_local)
com.append("-x " + genome)
if fastq:
com.append("-q " + os.path.join(bowtie2_input_DIR, fasta_file))
else:
com.append("-f " + os.path.join(bowtie2_input_DIR, fasta_file))
com.append("-S " + os.path.join(bowtie2_output_DIR, output_file))
subprocess.check_output(com)
return 0
def bwa(fastq_file, output_file, output_type, input_dir,
output_dir, options, species, base_name="None"):
"""
Align a fastq file to species genome using bwa.
Options should be a list that starts with the command (e.g. mem, aln etc).
Additional options should be appended as strings of "option value",
for example, "-t 30" to use 30 threads. Output type can be sam or bam.
Recommended options ["-t30", "-L500", "-T100"]. Here L500 penalizes
clipping severely so the alignment becomes end-to-end and T100 stops
reporting secondary alignments, assuming their score is below 100.
"""
genome_file = get_file_locations()[species]["bwa_genome"]
read_group = ("@RG\\tID:" + base_name + "\\tSM:" + base_name + "\\tLB:"
+ base_name + "\\tPL:ILLUMINA")
options = copy.deepcopy(options)
options.append("-R" + read_group)
if output_type == "sam":
com = ["bwa"]
com.extend(options)
com.append(genome_file)
com.append(os.path.join(input_dir, fastq_file))
with open(os.path.join(output_dir, output_file), "w") as outfile:
subprocess.check_call(com, stdout=outfile)
else:
com = ["bwa"]
com.extend(options)
com.append(genome_file)
com.append(os.path.join(input_dir, fastq_file))
sam = subprocess.Popen(com, stdout=subprocess.PIPE)
bam_com = ["samtools", "view", "-b"]
bam = subprocess.Popen(bam_com, stdin=sam.stdout,
stdout=subprocess.PIPE)
bam_file = os.path.join(output_dir, output_file)
sort_com = ["samtools", "sort", "-T", "/tmp/", "-o", bam_file]
subprocess.run(sort_com, stdin=bam.stdout)
subprocess.run(["samtools", "index", bam_file], check=True,
stderr=subprocess.PIPE)
def bwa_multi(fastq_files, output_type, fastq_dir, bam_dir, options, species,
processor_number, parallel_processes):
"""Align fastq files to species genome using bwa in parallel."""
if len(fastq_files) == 0:
fastq_files = [f.name for f in os.scandir(fastq_dir)]
if output_type == "sam":
extension = ".sam"
elif output_type == "bam":
extension = ".srt.bam"
else:
print(("Output type must be bam or sam, {} was given").format(
output_type))
return
if not os.path.exists(bam_dir):
os.makedirs(bam_dir)
if parallel_processes == 1:
for f in fastq_files:
# get base file name
base_name = f.split(".")[0]
bam_name = base_name + extension
options.extend("-t" + str(processor_number))
bwa(f, bam_name, output_type, fastq_dir, bam_dir, options, species,
base_name)
else:
processor_per_process = processor_number // parallel_processes
p = NoDaemonProcessPool(parallel_processes)
options = options + ["-t " + str(processor_per_process)]
results = []
errors = []
for f in fastq_files:
base_name = f.split(".")[0]
bam_name = base_name + extension
p.apply_async(bwa, (f, bam_name, output_type, fastq_dir, bam_dir,
options, species, base_name),
callback=results.append,
error_callback=errors.append)
p.close()
p.join()
if len(errors) > 0:
for e in errors:
print("Error in bwa_multi function", e.stderr)
def parse_cigar(cigar):
"""
Parse a CIGAR string.
CIGAR string is made up of numbers followed
by key letters that represent a sequence alignment; return a dictionary
with alignment keys and number of bases with that alignment key as values.
Below is some more information about cigar strings.
2S20M1I2M5D,for, example would mean that the 2 bases are "S"oft clipped
from 5' end of the sequence(read) aligned and it is not part of the
alignment; following that 2 bases, 20 bases of the read aligns or "M"atches
to the reference sequence, match here does not mean the bases are
identical, just that there is 1 base of reference for each base of the read
and there are enough similarity between the two sequences that they
aligned. 1 base following the 20M is an insertion, that is, it exists in
the read but not in the reference; 5 bases at the end are "D"eletions,
they are in the reference but not in the read.
"""
cig = {}
values = []
for c in cigar:
try:
values.append(str(int(c)))
except ValueError:
if c in list(cig.keys()):
cig[c] += int("".join(values))
else:
cig[c] = int("".join(values))
values = []
return cig
def get_cigar_length(cigar):
"""Get the length of the reference sequence from CIGAR string."""
try:
# parse cigar string and find out how many insertions are in the
# alignment
insertions = parse_cigar(cigar)["I"]
except KeyError:
# the key "I" will not be present in the cigar string if there is no
# insertion
insertions = 0
# all the values in the cigar dictionary represent a base in the reference
# seq,
# except the insertions, so they should be subtracted
return sum(parse_cigar(cigar).values()) - insertions
def parse_bowtie(primer_dict, bt_file, primer_out, primer3_output_DIR,
bowtie2_output_DIR, species, settings, outp=1):
"""
Take a bowtie output (sam) file and filter top N hits per primer.
When a primer has more than "upper_hit_limit" bowtie hits,
remove that primer.
Add the bowtie hit information, including hit sequence to
the primers dictionary.
"""
# extract how many bowtie hits should be added
# to the primer information for further TM analysis
N = int(settings["hit_limit"])
# how many total bowtie hits gets a primer fired
M = int(settings["upper_hit_limit"])
# read in bowtie file
infile = open(os.path.join(bowtie2_output_DIR, bt_file), 'r')
primers = copy.deepcopy(primer_dict)
# create a temp dic to count hits/primer
counter_dic = {}
# create a bowtie key that will be used when adding
# bowtie information to primers
bowtie_key = "bowtie_information_" + species
# all bowtie hits that will be used further for TM analysis
# will need to have sequence information with them
# region keys for hits (in chrx:begin-end format) will be
# kept in a list for mass fasta extraction later.
keys = set()
#
# read bowtie hits
for line in infile:
try:
if not line.startswith("@"):
record = line.strip('\n').split('\t')
primer_name = record[0]
# increment hit counter for primer
try:
counter_dic[primer_name] += 1
except KeyError:
counter_dic[primer_name] = 1
# check how many hits have been analyzed for this primer
# if upper hit limit has been reached, mark primer for removal
if counter_dic[primer_name] >= M:
primers['primer_information'][primer_name]["remove"] = True
continue
# move on to the next hit if primer hit limit has been reached.
# no further hits will be added for those primers
if counter_dic[primer_name] >= N:
continue
flag = record[1]
# a flag value of 4 means there was no hit, so pass those lines
if flag == "4":
continue
# chromosome of the bowtie hit
chrom = record[2]
# genomic position of bowtie hit
pos = int(record[3])
# get cigar string of alignment
cigar = record[5]
# extract which strand is the bowtie hit on
# true if forward
strand = ((int(record[1]) % 256) == 0)
# get hit coordinates
hit_start = pos
# bowtie gives us the start position of the hit
# end position is calculated using the cigar string
# of the hit
hit_end = pos + get_cigar_length(cigar) - 1
# create region keys required for sequence retrieval
# we want 3 nt extra on the 5' of the primer
# because when alternative primers for paralogs
# are considered we check +/- 3 nt from 5' end
# to balance TM.
if strand:
# Primer's 5' is the hit start when the hit is on forward
# strand so the nucleotides are added at start position
bt_start = hit_start
bt_end = hit_end
hit_str = "forward"
hit_region_key = (chrom + ":" + str(hit_start)
+ "-" + str(hit_end))
else:
bt_start = hit_end
bt_end = hit_start
hit_str = "reverse"
hit_region_key = (chrom + ":" + str(hit_start)
+ "-" + str(hit_end))
# add region key to keys list for fasta retrieval later
keys.add(hit_region_key)
# add all hit information to primer dictionary
try:
primers["primer_information"][primer_name][bowtie_key][
str(counter_dic[primer_name])
] = {"chrom": chrom, "begin": bt_start, "end": bt_end,
"key": hit_region_key, "strand": hit_str}
except KeyError:
primers["primer_information"][primer_name][bowtie_key] = {
str(counter_dic[primer_name]): {"chrom": chrom,
"begin": bt_start,
"end": bt_end,
"key": hit_region_key,
"strand": hit_str}
}
except KeyError:
# in earlier versions of this function the primers with
# excessive hits were removed during iteration and that lead
# to keyerrors. Now there should be no key error.
continue
# get the fasta sequences of all hits
sequence_dic = get_fasta_list(keys, species)
# remove primers with too many hits and add bowtie information for others.
for p in list(primers["primer_information"].keys()):
try:
if primers["primer_information"][p]["remove"]:
primers["primer_information"].pop(p)
continue
except KeyError:
pass
# add hit sequences to primer dictionary
# forward strand hits are added directly
# reverse strand hits are reversed-complemented
# so the hit is always in the primer orientation and
# and similar in sequence"
try:
for h in primers["primer_information"][p][bowtie_key]:
if (primers["primer_information"][p]
[bowtie_key][h]["strand"] == "forward"):
primers["primer_information"][p][bowtie_key][h][
"sequence"
] = sequence_dic[primers["primer_information"][p][
bowtie_key][h]["key"]
]
else:
primers["primer_information"][p][bowtie_key][h][
"sequence"
] = reverse_complement(
sequence_dic[primers["primer_information"]
[p][bowtie_key][h]["key"]]
)
except KeyError:
# if there is no bowtie hit for this primer (happens for host
# species):
primers["primer_information"][p][bowtie_key] = {}
# save the updated primers file
if outp:
with open(os.path.join(
primer3_output_DIR, primer_out), 'w') as outfile:
json.dump(primers, outfile, indent=1)
return primers
def process_bowtie(primers, primer_out, primer3_output_DIR,
bowtie2_output_DIR, species, settings, host=False, outp=1):
"""
Process a primer dict with bowtie information added.
Look at bowtie hits for each primer, determine if they
are on intended targets or nonspecific. In cases of paralogus
regions, check all paralogs and determine if the primer
will bind to any paralog. Create alternative primers if necessary
and allowed. Get melting temperatures of all hits and add
all these information to the primer dictionary.
"""
# get Na, Mg and oligo concentrations these are specified in M but primer3
# uses mM for ions and nM for oligos, so those will be adjusted.
Na = float(settings["Na"]) * 1000
Mg = float(settings["Mg"]) * 1000
conc = float(settings["oligo_conc"]) * pow(10, 9)
# are alternative mip arms allowed/desired
alt_arm = int(settings["alternative_arms"])
bowtie_key = "bowtie_information_" + species
alt_keys = set([])
# get reference chromosome lengths
genome_file = get_file_locations()[species]["fasta_genome"]
reference_lengths = {}
genome_sam = pysam.FastaFile(genome_file)
for r in genome_sam.references:
reference_lengths[r] = genome_sam.get_reference_length(r)
# read bowtie hits
for primer_name in primers['primer_information']:
try:
primer_seq = primers['primer_information'][primer_name]["SEQUENCE"]
if not host:
para = (primers['primer_information'][primer_name]
["PARALOG_COORDINATES"])
if ("BOWTIE_BINDS" not in
primers['primer_information'][primer_name]):
primers[
'primer_information'][primer_name]["BOWTIE_BINDS"] = []
if ("ALT_BINDS" not in
primers['primer_information'][primer_name]):
primers[
'primer_information'][primer_name]["ALT_BINDS"] = []
for bt_hit_name in list(primers['primer_information']
[primer_name][bowtie_key].keys()):
bt_hit = (primers['primer_information'][primer_name]
[bowtie_key][bt_hit_name])
bt_chrom = bt_hit["chrom"]
bt_begin = bt_hit["begin"]
bt_end = bt_hit["end"]
bt_ori = bt_hit["strand"]
bt_seq = bt_hit["sequence"]
if host:
bt_hit["TM"] = calcHeterodimerTm(
primer_seq,
reverse_complement(bt_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
continue
intended = 0
# para is a dict like:
# {C0:{"CHR": "chr4", "GENOMIC_START" ..}, C1:{..
# for non-CNV regions, bowtie mapping should be exactly the
# same as genomic coordinates, so even if there is 1 bp
# difference, we'll count this as off target. For CNV regions,
# a more generous 20 bp padding will be allowed to account for
# differences in our mapping and bowtie mapping. Bowtie mapping
# will be accepted as the accurate mapping and paralog
# coordinates will be changed accordingly.
map_padding = 1
if len(para) > 1:
map_padding = 20
for k in para:
para_ori = para[k]["ORI"]
para_chr = para[k]["CHR"]
para_begin = para[k]["GENOMIC_START"]
para_end = para[k]["GENOMIC_END"]
if ((para_ori == bt_ori) and (para_chr == bt_chrom)
and (abs(para_begin - bt_begin) < map_padding)
and (abs(para_end - bt_end) < map_padding)):
intended = 1
# Get bowtie determined coordinates and sequences
# for the paralog copy. These will have priority
# over GENOMIC_ values calculated internally.
para[k]["BOWTIE_END"] = bt_end
para[k]["BOWTIE_START"] = bt_begin
para[k]["BOWTIE_SEQUENCE"] = bt_seq
if intended:
# if the paralog sequence is the same as the reference
# this primer should bind to the paralog copy as well.
if bt_seq.upper() == primer_seq.upper():
para[k]["BOWTIE_BOUND"] = True
primers['primer_information'][
primer_name]["BOWTIE_BINDS"].append(k)
else:
# if the sequences are not exactly the same
# we'll assume the primer does not bind to the
# paralog and attempt to generate an alternative
# primer for this paralog.
para[k]["BOWTIE_BOUND"] = False
# Do this only if alternative MIP arms are allowed
# specified by alt_arm setting.
if alt_arm:
# get chromosome length to avoid setting
# alt arms beyon chromosome ends
para_chr_length = reference_lengths[para_chr]
al = {}
al["ref"] = {"ALT_SEQUENCE": primer_seq}
al["ref"]["ALT_TM"] = calcHeterodimerTm(
primer_seq,
reverse_complement(primer_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
for j in range(-3, 4):
if j == 0:
continue
alt_start = bt_begin + j
alt_end = bt_end
if ((alt_start < 0) or (alt_end < 0)
or (alt_start > para_chr_length)
or (alt_end > para_chr_length)):
continue
if para_ori == "forward":
alt_primer_key = create_region(
bt_chrom,
alt_start,
alt_end
)
else:
alt_primer_key = create_region(
bt_chrom,
alt_end,
alt_start
)
al[j] = {}
al[j]["ALT_START"] = alt_start
al[j]["ALT_END"] = alt_end
al[j]["ALT_ORI"] = para_ori
al[j]["ALT_KEY"] = alt_primer_key
alt_keys.add(alt_primer_key)
para[k]["ALTERNATIVES"] = al
else:
para[k]["ALTERNATIVES"] = {}
para[k]["ALT_TM"] = 0
para[k]["ALT_TM_DIFF"] = 100
para[k]["ALT_BOUND"] = False
# remove bowtie hit for intended target
primers['primer_information'][
primer_name][bowtie_key].pop(bt_hit_name)
break
# add TM value for unindended target
if not intended:
bt_hit["TM"] = calcHeterodimerTm(
primer_seq,
reverse_complement(bt_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
# Design alternative primers (if allowed) for paralogs
# when there is no bowtie hit for that paralog.
if not host:
for k in para:
try:
para[k]["BOWTIE_END"]
except KeyError:
para_ori = para[k]["ORI"]
para_chr = para[k]["CHR"]
para_begin = para[k]["GENOMIC_START"]
para_end = para[k]["GENOMIC_END"]
para[k]["BOWTIE_BOUND"] = False
if alt_arm:
# get chromosome length to avoid setting
# alt arms beyon chromosome ends
para_chr_length = reference_lengths[para_chr]
al = {}
al["ref"] = {"ALT_SEQUENCE": primer_seq}
al["ref"]["ALT_TM"] = calcHeterodimerTm(
primer_seq,
reverse_complement(primer_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
for j in range(-3, 4):
if j == 0:
continue
alt_start = para_begin + j
alt_end = para_end
if ((alt_start < 0) or (alt_end < 0)
or (alt_start > para_chr_length)
or (alt_end > para_chr_length)):
continue
if para_ori == "forward":
alt_primer_key = create_region(
para_chr,
alt_start,
alt_end
)
else:
alt_primer_key = create_region(
para_chr,
alt_end,
alt_start
)
al[j] = {}
al[j]["ALT_START"] = alt_start
al[j]["ALT_END"] = alt_end
al[j]["ALT_ORI"] = para_ori
al[j]["ALT_KEY"] = alt_primer_key
alt_keys.add(alt_primer_key)
para[k]["ALTERNATIVES"] = al
else:
para[k]["ALTERNATIVES"] = {}
para[k]["ALT_TM"] = 0
para[k]["ALT_TM_DIFF"] = 100
para[k]["ALT_BOUND"] = False
except KeyError:
continue
if len(alt_keys) > 0:
alt_sequences = get_fasta_list(alt_keys, species)
for primer_name in primers['primer_information']:
para = (primers['primer_information'][primer_name]
["PARALOG_COORDINATES"])
for k in para:
try:
alt_candidates = para[k]["ALTERNATIVES"]
except KeyError:
continue
for c in list(alt_candidates.keys()):
try:
alt_candidates[c]["ALT_TM"]
except KeyError:
alt_ori = alt_candidates[c]["ALT_ORI"]
alt_key = alt_candidates[c]["ALT_KEY"]
alt_seq = alt_sequences[alt_key]
if alt_ori == "reverse":
alt_seq = reverse_complement(alt_seq)
if alt_seq != "":
alt_tm = calcHeterodimerTm(
alt_seq,
reverse_complement(alt_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
alt_candidates[c]["ALT_TM"] = alt_tm
alt_candidates[c]["ALT_SEQUENCE"] = alt_seq
else:
alt_candidates.pop(c)
if outp:
with open(os.path.join(
primer3_output_DIR, primer_out), 'w') as outfile:
json.dump(primers, outfile, indent=1)
return primers
def filter_bowtie(primers, output_file, primer3_output_DIR, species, TM=46,
hit_threshold=0, lower_tm=46, lower_hit_threshold=3, outp=1):
"""
Check TMs of bowtie hits of given primers, on a given genome.
Filter the primers with too many nonspecific hits.
"""
for primer in list(primers["primer_information"].keys()):
# create a hit count parameter for hits with significant tm
# there are two parameters specified in the rinfo file
# high temp limit and low temp limit. The idea is to allow
# a very small (if any) number of nonspecific targets with high TM
# values but allow some low TM off targets.
hc = 0
lhc = 0
# check if bowtie information exists in dic
try:
bt_key = "bowtie_information_" + species
bowtie = primers["primer_information"][primer][bt_key]
for h in bowtie:
hit = bowtie[h]
try:
# if TM information is included in bowtie, compare with
# high and low TM, increment hc, lc if necessary and
# discard primers passing specified off target tresholds.
if float(hit["TM"]) >= TM:
hc += 1
if hc > hit_threshold:
primers["primer_information"].pop(primer)
break
elif float(hit["TM"]) >= lower_tm:
lhc += 1
if lhc > lower_hit_threshold:
primers["primer_information"].pop(primer)
break
except KeyError:
continue
# remove bowtie information once we use it.
primers["primer_information"][primer].pop(bt_key)
except KeyError:
continue
if outp:
# write dictionary to file in primer3_output_DIR
outfile = open(os.path.join(primer3_output_DIR, output_file), 'w')
json.dump(primers, outfile, indent=1)
outfile.close()
return primers
def alternative(primer_dic, output_file,
primer3_output_DIR, tm_diff, outp=1):
"""
Pick the best alternative arm for primers that do not bind all paralogs.
This is done by picking the alternative primer with melting temperature
that is closest to the original primer.
"""
primers = primer_dic["primer_information"]
try:
for primer_name in primers:
primer = primers[primer_name]
para = primer["PARALOG_COORDINATES"]
for c in para:
try:
alts = para[c]["ALTERNATIVES"]
# get the original primer TM
ref_tm = alts["ref"].pop("ALT_TM")
alts.pop("ref")
# sort alt primers by their TM difference from the ref
sorted_alts = sorted(
alts, key=lambda a: abs(alts[a]["ALT_TM"] - ref_tm)
)
# use the primer only if the TM difference is within
# specified limit.
if abs(alts[sorted_alts[0]]["ALT_TM"] - ref_tm) <= tm_diff:
primer["ALT_BINDS"].append(c)
para[c].update(alts[sorted_alts[0]])
para[c].pop("ALTERNATIVES")
except KeyError:
try:
para[c].pop("ALTERNATIVES")
except KeyError:
pass
except IndexError:
try:
para[c].pop("ALTERNATIVES")
except KeyError:
pass
except KeyError:
pass
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), "w") as outfile:
json.dump(primer_dic, outfile, indent=1)
return primer_dic
def score_paralog_primers(primer_dict, output_file, primer3_output_DIR,
ext, mask_penalty, species, backbone, outp=1):
"""
Score primers in a dictionary according to a scoring matrix.
Scoring matrices are somewhat crude at this time.
Arm GC content weighs the most, then arms GC clamp and arm length
Next_base values are last.
"""
primers = primer_dict["primer_information"]
extension = (ext == "extension")
# primer scoring coefficients were calculated based on
# linear models of various parameters and provided as a dict
with open("/opt/resources/mip_scores.dict", "rb") as infile:
linear_coefs = pickle.load(infile)
# the model was developed using specific reaction conditions as below.
# actual conditions may be different from these but we'll use these
# for the model.
na = 25 # Sodium concentration
mg = 10 # magnesium concentration
conc = 0.04 # oligo concentration
# get extension arm sequence
if extension:
for p in primers:
extension_arm = primers[p]["SEQUENCE"]
# calculate gc content of extension arm
extension_gc = calculate_gc(extension_arm)
# count lowercase masked nucleotides. These would likely be masked
# for variation underneath.
extension_lowercase = sum([c.islower() for c in extension_arm])
# calculate TM with the model parameters for TM
ext_TM = primer3.calcTm(extension_arm, mv_conc=na, dv_conc=mg,
dna_conc=conc, dntp_conc=0)
# create a mip parameter dict
score_features = {"extension_gc": extension_gc,
"extension_lowercase": extension_lowercase,
"ext_TM": ext_TM}
# calculate primer score using the linear model provided
tech_score = 0
for feature in score_features:
degree = linear_coefs[feature]["degree"]
primer_feature = score_features[feature]
poly_feat = [pow(primer_feature, i) for i in range(degree + 1)]
tech_score += sum(linear_coefs[feature]["coef"] * poly_feat)
tech_score += linear_coefs[feature]["intercept"]
primers[p]["SCORE"] = tech_score
# get ligation arm parameters
else:
for p in primers:
ligation_arm = primers[p]["SEQUENCE"]
# calculate gc content of extension arm
ligation_gc = calculate_gc(ligation_arm)
# only the 3' end of the ligation arm was important in terms of
# lowercase masking.
ligation_lowercase_end = sum([c.islower()
for c in ligation_arm[-5:]])
# calculate TM of ligation sequence (actual ligation probe arm)
# agains probe backbone.
ligation_bb_TM = primer3.calcHeterodimerTm(
reverse_complement(ligation_arm), backbone,
mv_conc=na, dv_conc=mg, dna_conc=conc, dntp_conc=0)
# create a mip parameter dict
score_features = {"ligation_gc": ligation_gc,
"ligation_lowercase_end": ligation_lowercase_end,
"ligation_bb_TM": ligation_bb_TM}
# calculate primer score using the linear model provided
tech_score = 0
for feature in score_features:
degree = linear_coefs[feature]["degree"]
primer_feature = score_features[feature]
poly_feat = [pow(primer_feature, i) for i in range(degree + 1)]
tech_score += sum(linear_coefs[feature]["coef"] * poly_feat)
tech_score += linear_coefs[feature]["intercept"]
primers[p]["SCORE"] = tech_score
if outp:
# write dictionary to json file
outfile = open(os.path.join(primer3_output_DIR, output_file), "w")
json.dump(primer_dict, outfile, indent=1)
outfile.close()
return primer_dict
def filter_primers(primer_dict, output_file,
primer3_output_DIR, n, bin_size, outp=1):
"""
Filter primers so that only top n scoring primers remain for each bin.
Primers are divided into bins of the given size based on the 3' end of
the primer. Only top performing n primers ending in the same bin will
remain after filtering.
For example, bin_size=3 and n=1 would chose the best scoring primer
among primers that end within 3 bps of each other.
"""
# load extension and ligation primers from file
template_seq = primer_dict["sequence_information"]["SEQUENCE_TEMPLATE"]
template_len = len(template_seq)
forward_bins = {}
reverse_bins = {}
for i in range(template_len//bin_size + 1):
forward_bins[i] = []
reverse_bins[i] = []
for primer in list(primer_dict["primer_information"].keys()):
# get primer orientation
ori = primer_dict["primer_information"][primer]["ORI"]
# get primer start coordinate
start = int(primer_dict["primer_information"][primer]
["COORDINATES"].split(",")[0])
primer_len = int(primer_dict["primer_information"][primer]
["COORDINATES"].split(",")[1])
if ori == "forward":
end = start + primer_len - 1
elif ori == "reverse":
end = start - primer_len + 1
# which bin the start coordinate falls into
end_bin = end//bin_size
# get primer score
score = primer_dict["primer_information"][primer]["SCORE"]
# append the primer name/score to appropriate bin dic
if ori == "forward":
forward_bins[end_bin].append([primer, score])
elif ori == "reverse":
reverse_bins[end_bin].append([primer, score])
best_primer_dict = {}
best_primer_dict["sequence_information"] = primer_dict[
"sequence_information"]
best_primer_dict["primer_information"] = {}
# find best scoring mips in each forward bin
for key in forward_bins:
# sort primers for score
primer_set = sorted(forward_bins[key], key=itemgetter(1))
# get best scoring primers (all primers if there are less than n)
if len(primer_set) < n:
best_primers = primer_set
else:
best_primers = primer_set[-n:]
# add best primers do dictionary
for primers in best_primers:
primer_name = primers[0]
best_primer_dict["primer_information"][primer_name] = primer_dict[
"primer_information"][primer_name]
# find best scoring mips in each reverse bin
for key in reverse_bins:
# sort primers for score
primer_set = sorted(reverse_bins[key], key=itemgetter(1))
# get best scoring primers (all primers if there are less than n)
if len(primer_set) < n:
best_primers = primer_set
else:
best_primers = primer_set[-n:]
# add best primers do dictionary
for primers in best_primers:
primer_name = primers[0]
best_primer_dict["primer_information"][primer_name] = primer_dict[
"primer_information"][primer_name]
# write new dic to file
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), "w") as outfile:
json.dump(best_primer_dict, outfile, indent=1)
return best_primer_dict
def pick_paralog_primer_pairs(extension, ligation, output_file,
primer3_output_DIR, min_size, max_size,
alternative_arms, region_insertions,
subregion_name, outp=1):
"""Pick primer pairs satisfying a given size range."""
# assign primer information dictionaries to a shorter name
ext = extension["primer_information"]
lig = ligation["primer_information"]
# check if extension and ligation dictionaries have primers
if len(ext) == 0:
return 1
if len(lig) == 0:
return 1
# create a primer pairs dic. This dictionary is similar to primer dic
primer_pairs = {}
# has the same sequence_information key:value pairs
primer_pairs["sequence_information"] = {}
# has pair information key instead of primer_information
primer_pairs["pair_information"] = {}
# populate sequence information (same as extension or ligation)
primer_pairs["sequence_information"]['SEQUENCE_TEMPLATE'] = extension[
"sequence_information"]['SEQUENCE_TEMPLATE']
primer_pairs["sequence_information"]['SEQUENCE_EXCLUDED_REGION'] = (
extension["sequence_information"]['SEQUENCE_EXCLUDED_REGION']
)
primer_pairs["sequence_information"]['SEQUENCE_TARGET'] = extension[
"sequence_information"]['SEQUENCE_TARGET']
primer_pairs["sequence_information"]['SEQUENCE_ID'] = extension[
"sequence_information"]['SEQUENCE_ID']
# pick primer pairs
for e in ext.keys():
# extension primer information for this mip will be e_info
e_info = ext[e]
# get primer coordinates
ext_start = e_info["GENOMIC_START"]
ext_end = e_info["GENOMIC_END"]
# get primer orientation
ext_ori = ext_end > ext_start
# if end is greater than start then it is a left(fw) primer,
# and ext_ori is True.
# get coordinates of this primer in paralog copies.
ep_info = e_info["PARALOG_COORDINATES"]
# the paralogs bound by primer according to bowtie mapping
e_binds = e_info["BOWTIE_BINDS"]
# paralogs that were not bound by the primer and alt primers were
# designed.
e_alt_binds = e_info["ALT_BINDS"]
# find a ligation primer
for l in list(lig.keys()):
l_info = lig[l]
# get primer coordinates
lig_start = l_info["GENOMIC_START"]
lig_end = l_info["GENOMIC_END"]
# get orientation of primer
lig_ori = lig_end < lig_start
# if end is less than start, it is a right primer
# create a list for start and end coordinates
coord = []
# continue only if the two orientations have the same value
if lig_ori == ext_ori:
# check if relative positions of primers are correct
if ext_ori:
# ligation end should be greater than extension end
# for forward pairs
position = lig_end > ext_end
else:
# extension end should be greater than ligation end
# for reverse pairs
position = ext_end > lig_end
# get pair information if relative positions of primers are
# correct
if position:
coord = [ext_start, ext_end, lig_start, lig_end]
coord.sort()
prod_size = coord[-1] - coord[0] + 1
pairs = {}
# get paralogus coordinates
lp_info = l_info["PARALOG_COORDINATES"]
l_binds = l_info["BOWTIE_BINDS"]
l_alt_binds = l_info["ALT_BINDS"]
# find the paralogs that are hybridized by both primers
# start with paralog copies that are bound by the
# original primers (not alts).
paralogs = list(set(l_binds).intersection(e_binds))
for p in paralogs:
try:
p_coord = []
ep_start = ep_info[p]["BOWTIE_START"]
ep_end = ep_info[p]["BOWTIE_END"]
ep_ori = ep_end > ep_start
lp_start = lp_info[p]["BOWTIE_START"]
lp_end = lp_info[p]["BOWTIE_END"]
lp_ori = lp_end < lp_start
lp_chrom = lp_info[p]["CHR"]
if lp_ori == ep_ori:
if lp_ori:
p_position = lp_end > ep_end
pair_ori = "forward"
else:
p_position = lp_end < ep_end
pair_ori = "reverse"
if p_position:
p_coord = [ep_start, ep_end,
lp_start, lp_end]
p_coord.sort()
prod_size = p_coord[-1] - p_coord[0] + 1
pairs[p] = {
"capture_size": prod_size,
"extension_start": ep_start,
"extension_end": ep_end,
"ligation_start": lp_start,
"ligation_end": lp_end,
"mip_start": p_coord[0],
"mip_end": p_coord[3],
"capture_start": p_coord[1] + 1,
"capture_end": p_coord[2] - 1,
"chrom": lp_chrom,
"orientation": pair_ori
}
except KeyError:
continue
# check if any pairs' product is within size limits
# taking into account reported insertions within
# the target region. If there are insertions, we reduce
# the max size to accomodate those insertions.
# Deletions are handled differently because their impact
# on the captures will be different. Any deletion that
# is small enough to be captured will still be captured
# without any alterations. However the capture size will
# become smaller, which is not detrimental.
pair_found = 0
captured_copies = []
for p in list(pairs.keys()):
if not region_insertions.empty:
max_insertion_size = region_insertions.loc[
(region_insertions["copy_chrom"]
== pairs[p]["chrom"])
& (region_insertions["copy_begin"]
> pairs[p]["capture_start"])
& (region_insertions["copy_end"]
< pairs[p]["capture_end"]),
"max_size"].sum()
else:
max_insertion_size = 0
adjusted_max_size = max_size - max_insertion_size
if adjusted_max_size < (min_size/2):
continue
# we do not have to adsjust min_size unless the max
# size get too close to min_size, in which case
# we leave a 30 bp distance between min an max so
# that we're not very limited in primer pair choices.
adjusted_min_size = min(adjusted_max_size - 30,
min_size)
if (adjusted_max_size
>= pairs[p]["capture_size"]
>= adjusted_min_size):
captured_copies.append(p)
pair_found = 1
if pair_found:
# if a pair is found for any copy
# remove minimum size restriction for other copies
for p in list(pairs.keys()):
if p in captured_copies:
continue
if not region_insertions.empty:
max_insertion_size = region_insertions.loc[
(region_insertions["copy_chrom"]
== pairs[p]["chrom"])
& (region_insertions["copy_begin"]
> pairs[p]["capture_start"])
& (region_insertions["copy_end"]
< pairs[p]["capture_end"]),
"max_size"].sum()
else:
max_insertion_size = 0
adjusted_max_size = max_size - max_insertion_size
if adjusted_max_size < (min_size/2):
continue
if (adjusted_max_size
>= pairs[p]["capture_size"] >= 0):
captured_copies.append(p)
# C0 must be in the captured copies because the
# reference copy is used for picking mip sets
if "C0" not in captured_copies:
continue
# create a pair name as
# PAIR_extension primer number_ligation primer number
ext_name = e.split('_')[2]
lig_name = l.split('_')[2]
pair_name = ("PAIR_" + subregion_name + "_" + ext_name
+ "_" + lig_name)
if ext_ori:
orientation = "forward"
pair_name = pair_name + "_F"
else:
orientation = "reverse"
pair_name = pair_name + "_R"
primer_pairs["pair_information"][pair_name] = {
"pairs": pairs,
"extension_primer_information": ext[e],
"ligation_primer_information": lig[l],
"orientation": orientation,
"captured_copies": captured_copies
}
# Check if there are any paralog copies that require
# alt primers to be used. If so, create those pairs.
alt_paralogs = list((set(l_alt_binds).union(
e_alt_binds)).difference(paralogs))
alts = {}
for a in alt_paralogs:
try:
alt_arms = []
p_coord = []
# check if the extension primer is the
# original or alt.
if ep_info[a]["BOWTIE_BOUND"]:
ep_start = ep_info[a]["BOWTIE_START"]
ep_end = ep_info[a]["BOWTIE_END"]
else:
try:
ep_start = ep_info[a]["ALT_START"]
ep_end = ep_info[a]["ALT_END"]
alt_arms.append("extension")
except KeyError:
continue
ep_ori = ep_end > ep_start
# check if ligation primer is the original
# or alternative designed.
if lp_info[a]["BOWTIE_BOUND"]:
lp_start = lp_info[a]["BOWTIE_START"]
lp_end = lp_info[a]["BOWTIE_END"]
else:
try:
lp_start = lp_info[a]["ALT_START"]
lp_end = lp_info[a]["ALT_END"]
alt_arms.append("ligation")
except KeyError:
continue
lp_ori = lp_end < lp_start
lp_chrom = lp_info[a]["CHR"]
if lp_ori == ep_ori:
if lp_ori:
p_position = lp_end > ep_end
pair_ori = "forward"
else:
p_position = lp_end < ep_end
pair_ori = "reverse"
if p_position:
p_coord = [ep_start, ep_end,
lp_start, lp_end]
p_coord.sort()
prod_size = (p_coord[-1]
- p_coord[0] + 1)
alts[a] = {
"capture_size": prod_size,
"extension_start": ep_start,
"extension_end": ep_end,
"ligation_start": lp_start,
"ligation_end": lp_end,
"mip_start": p_coord[0],
"mip_end": p_coord[3],
"capture_start": p_coord[1] + 1,
"capture_end": p_coord[2] - 1,
"chrom": lp_chrom,
"orientation": pair_ori,
"alternative_arms": alt_arms
}
except KeyError:
# if extension or ligation primer coordinates
# are not available for the paralog copy
# for any reason, e.g. the copy does not align
# to the ref for this primer, there will be
# a key error and it should be caught in this
# block.
continue
# check if any pairs' product is within size limits
captured_copies = []
for a in list(alts.keys()):
# does it satisfy arm setting?
good_alt = 0
# "any" means both ligation and extension arms
# are allowed to have alt sequences.
if alternative_arms == "any":
good_alt = 1
# if only one arm is allowed to have alt sequence,
# it could be specified as "one" or the specific
# arm (extension or ligation).
elif ((len(alts[a]["alternative_arms"]) == 1)
and ((alternative_arms
== alts[a]["alternative_arms"][0])
or (alternative_arms == "one"))):
good_alt = 1
# if the alt capture is valid, check the capture
# size and determined if it is likely to be
# captured.
if good_alt:
if not region_insertions.empty:
max_insertion_size = region_insertions.loc[
(region_insertions["copy_chrom"]
== alts[a]["chrom"])
& (region_insertions["copy_begin"]
> alts[a]["capture_start"])
& (region_insertions["copy_end"]
< alts[a]["capture_end"]),
"max_size"].sum()
else:
max_insertion_size = 0
adjusted_max_size = (max_size
- max_insertion_size)
if adjusted_max_size < (min_size/2):
continue
if (adjusted_max_size
>= alts[a]["capture_size"] >= 0):
captured_copies.append(a)
primer_pairs["pair_information"][
pair_name]["pairs"][a] = alts[a]
primer_pairs["pair_information"][pair_name][
"alt_copies"] = captured_copies
# return if no pairs found
if len(primer_pairs["pair_information"]) == 0:
# No primer pairs found.
return 1
# write dict to file in primer_output_DIR
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), 'w') as outfile:
json.dump(primer_pairs, outfile, indent=1)
return primer_pairs
def add_capture_sequence(primer_pairs, output_file, primer3_output_DIR,
species, outp=1):
"""
Extract the sequence between primers.
Get captured sequence using the primer coordinates.
"""
capture_keys = set()
for p_pair in primer_pairs["pair_information"]:
pairs = primer_pairs["pair_information"][p_pair]["pairs"]
for p in pairs:
paralog_key = pairs[p]["chrom"] + ":" + str(pairs[p][
"capture_start"]) + "-" + str(pairs[p]["capture_end"])
pairs[p]["capture_key"] = paralog_key
capture_keys.add(paralog_key)
capture_sequence_dic = get_fasta_list(capture_keys, species)
for p_pair in primer_pairs["pair_information"]:
pairs = primer_pairs["pair_information"][p_pair]["pairs"]
for p in pairs:
if pairs[p]["orientation"] == "forward":
pairs[p]["capture_sequence"] = capture_sequence_dic[pairs[p][
"capture_key"]]
else:
pairs[p]["capture_sequence"] = reverse_complement(
capture_sequence_dic[pairs[p]["capture_key"]]
)
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), "w") as outfile:
json.dump(primer_pairs, outfile, indent=1)
return primer_pairs
def make_mips(pairs, output_file, primer3_output_DIR, mfold_input_DIR,
backbone, outp=1):
"""
Make mips from primer pairs.
Take the reverse complement of ligation primer sequence, add the backbone
sequence and the extension primer. Standard backbone is used if none
specified.
Add a new key to each primer pair:
"mip_information" with a dictionary that has SEQUENCE key
and mip sequence as value.
"""
# check if the primer dictionary is empty
if len(pairs["pair_information"]) == 0:
return 1
# get primer sequences for each primer pair
for primers in pairs["pair_information"]:
extension_sequence = pairs["pair_information"][primers][
"extension_primer_information"]["SEQUENCE"]
ligation_sequence = pairs["pair_information"][primers][
"ligation_primer_information"]["SEQUENCE"]
# reverse complement ligation primer
ligation_rc = reverse_complement(ligation_sequence)
# add sequences to make the mip
mip_sequence = ligation_rc + backbone + extension_sequence
# create a dictionary to hold mip information
mip_dic = {"ref": {"SEQUENCE": mip_sequence,
"captures": copy.deepcopy(
pairs["pair_information"][primers]
["captured_copies"]
)}}
# create alternative mips where necessary
if "alt_copies" in list(pairs["pair_information"][primers].keys()):
alt_sequences = {}
alt_counter = 0
alt = pairs["pair_information"][primers]["alt_copies"]
p_para = pairs["pair_information"][primers]["pairs"]
e_para = pairs["pair_information"][primers][
"extension_primer_information"]["PARALOG_COORDINATES"]
l_para = pairs["pair_information"][primers][
"ligation_primer_information"]["PARALOG_COORDINATES"]
# since alt primers are created for each copy, it is possible
# that some copies have the same primer pair. Pick just one
# such pair and remove the others.
for a in alt:
if "extension" in p_para[a]["alternative_arms"]:
extension_sequence = e_para[a]["ALT_SEQUENCE"].upper()
if "ligation" in p_para[a]["alternative_arms"]:
ligation_sequence = l_para[a]["ALT_SEQUENCE"].upper()
value_found = 0
# search through already created alt pairs to see if this one
# is already there.
for key, value in list(alt_sequences.items()):
if ([extension_sequence, ligation_sequence]
== value["sequences"]):
value_found = 1
# add the copy name to the dict and not create
# a new key for this copy.
value["copies"].append(a)
break
# create new entry if this alt pair is new
if not value_found:
alt_sequences[alt_counter] = {
"sequences": [extension_sequence, ligation_sequence],
"copies": [a]
}
alt_counter += 1
# create mip sequence and dict for the alt pairs
for alt_pair in alt_sequences:
seq_dic = alt_sequences[alt_pair]["sequences"]
alt_copies = alt_sequences[alt_pair]["copies"]
# reverse complement ligation primer
ligation_rc = reverse_complement(seq_dic[1])
# add sequences to make the mip
mip = ligation_rc + backbone + seq_dic[0]
mip_dic["alt" + str(alt_pair)] = {"SEQUENCE": mip,
"captures": alt_copies}
pairs["pair_information"][primers]["mip_information"] = mip_dic
# write mip sequences to a fasta file in mfold_input_DIR
# to check hairpin formation
with open(os.path.join(mfold_input_DIR, output_file), "w") as outfile:
for primers in pairs["pair_information"]:
outline = (">" + primers + "\n" + pairs["pair_information"]
[primers]["mip_information"]["ref"]['SEQUENCE'] + "\n")
outfile.write(outline)
# write mip dictionary to file in primer3_output_DIR
if outp:
outfile = open(os.path.join(primer3_output_DIR, output_file), 'w')
json.dump(pairs, outfile, indent=1)
outfile.close()
return pairs
def check_hairpin(pairs, output_file, settings, output_dir, outp=1):
"""Check possible hairpin formation in MIP probe.
Calculate possible hiybridization between the MIP arms or between the MIP
arms and the probe backbone. Remove MIPs with likely hairpins.
"""
pairs = copy.deepcopy(pairs)
# get Na, Mg and oligo concentrations these are specified in M but primer3
# uses mM for ions and nM for oligos, so those will be adjusted.
Na = float(settings["mip"]["Na"]) * 1000
Mg = float(settings["mip"]["Mg"]) * 1000
conc = float(settings["mip"]["oligo_conc"]) * pow(10, 9)
# number of mips will be used to determine the bacbone concentration
mip_count = int(settings["mip"]["mipset_size"])
# get TM thresholds for hairpins, arm tms should be the same
# otherwise we'll use the lower of the two
ext_arm_tm = float(settings["extension"]["hairpin_tm"])
lig_arm_tm = float(settings["ligation"]["hairpin_tm"])
arm_tm = min([ext_arm_tm, lig_arm_tm])
# backbone tm will be used for interactions between arms and
# all the backbones (from other mips as well). This will cause a higher
# tm since the backbones will be more in concentration, so it could
# make sense to keep this threshold high. On the other hand, eliminating
# even low likelyhood interactions could be useful.
backbone_tm = float(settings["mip"]["hairpin_tm"])
backbone_name = settings["mip"]["backbone"]
backbone = mip_backbones[backbone_name]
# go through mips and calculate hairpins
# we will calculate hairpins by looking at TMs between arm sequences
# and backbone sequences since the whole MIP sequence is too long
# for nearest neighbor calculations (at least for primer3 implementation).
for p in list(pairs["pair_information"].keys()):
pair_dict = pairs["pair_information"][p]
mip_dict = pair_dict["mip_information"]
# for each primer pair we can have a number of mips due to paralog
# copies having alternative mips. We'll go through each mip.
for m in list(mip_dict.keys()):
mip_seq = mip_dict[m]["SEQUENCE"]
# extract arm and backbone sequences from the mip sequence
lig = mip_seq[:mip_seq.index(backbone)]
ext = mip_seq[mip_seq.index(backbone) + len(backbone):]
bb = backbone.replace("N", "")
# calculate dimer TMs between sequence combinations
ext_lig = calcHeterodimerTm(ext, lig, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc)
bb_ext_arm = calcHeterodimerTm(ext, bb, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc)
bb_lig_arm = calcHeterodimerTm(lig, bb, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc)
# take the maximum TM for hairpin threshold comparison
arms = max([ext_lig, bb_ext_arm, bb_lig_arm])
# calculate TM between arms and the whole reaction backbones
# backbone concentration will be more for this calculation.
bb_ext = calcHeterodimerTm(ext, bb, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc * mip_count)
bb_lig = calcHeterodimerTm(lig, bb, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc * mip_count)
bb_temp = max([bb_ext, bb_lig])
# if either hairpin tms is higher than the limit, remove the mip
# and remove the paralog copy that is supposed to be captured
# by this specific mip from the pair dictionary.
if (arms > arm_tm) or (bb_temp > backbone_tm):
lost_captures = mip_dict[m]["captures"]
mip_copies = pair_dict["captured_copies"]
mip_copies = list(set(mip_copies).difference(lost_captures))
pair_dict["captured_copies"] = mip_copies
alt_copies = pair_dict["alt_copies"]
alt_copies = list(set(alt_copies).difference(lost_captures))
pair_dict["alt_copies"] = alt_copies
mip_dict.pop(m)
else:
mip_dict[m]["Melting Temps"] = {"arms_hp": ext_lig,
"ext_hp": bb_ext_arm,
"lig_hp": bb_lig_arm,
"ext_backbone": bb_ext,
"lig_backbone": bb_lig}
if len(mip_dict) == 0:
pairs["pair_information"].pop(p)
for p in pairs["pair_information"].keys():
pair_dict = pairs["pair_information"][p]
hp_dict = pair_dict["hairpin"] = {}
mip_dict = pair_dict["mip_information"]
for m in mip_dict:
hp_dict[m] = mip_dict[m]["Melting Temps"]
if outp:
output_file = os.path.join(output_dir, output_file)
with open(output_file, "w") as outfile:
json.dump(pairs, outfile)
return pairs
def filter_mips(mip_dic, bin_size, mip_limit):
"""
Filter MIPs covering similar regions.
Filter MIPs so that only top scoring mip ending within the "bin_size"
nucleotides on the same strand remain.
"""
# load extension and ligation primers from file
shuffled = list(mip_dic.keys())
random.shuffle(shuffled)
for m in shuffled:
if len(mip_dic) <= mip_limit:
return
try:
m_start = mip_dic[m].mip["C0"]["capture_start"]
m_end = mip_dic[m].mip["C0"]["capture_end"]
m_func = mip_dic[m].func_score
m_tech = mip_dic[m].tech_score
m_ori = mip_dic[m].mip["C0"]["orientation"]
for n in shuffled:
if len(mip_dic) <= mip_limit:
return
try:
if mip_dic[m].name != mip_dic[n].name:
n_start = mip_dic[n].mip["C0"]["capture_start"]
n_end = mip_dic[n].mip["C0"]["capture_end"]
n_func = mip_dic[n].func_score
n_tech = mip_dic[n].tech_score
n_ori = mip_dic[n].mip["C0"]["orientation"]
if (((abs(n_start - m_start) <= bin_size)
and (abs(n_end - m_end) <= bin_size))
and (m_ori == n_ori)):
if (m_tech + m_func) >= (n_tech + n_func):
mip_dic.pop(n)
else:
mip_dic.pop(m)
break
except KeyError:
continue
except KeyError:
continue
return
def compatible_mip_check(m1, m2, overlap_same, overlap_opposite):
d = m1.mip_dic
# get m1 coordinates
ext_start = d["extension_primer_information"]["GENOMIC_START"]
ext_end = d["extension_primer_information"]["GENOMIC_END"]
lig_start = d["ligation_primer_information"]["GENOMIC_START"]
lig_end = d["ligation_primer_information"]["GENOMIC_END"]
# get mip1 orientation
ori = d["orientation"]
# get m2 coordinates
m = m2.mip_dic
next_ext_start = m["extension_primer_information"]["GENOMIC_START"]
next_ext_end = m["extension_primer_information"]["GENOMIC_END"]
next_lig_start = m["ligation_primer_information"]["GENOMIC_START"]
next_lig_end = m["ligation_primer_information"]["GENOMIC_END"]
# get mip2 orientation
next_ori = m["orientation"]
if ori == next_ori:
m1_start = min([ext_start, ext_end, lig_start, lig_end])
m1_end = max([ext_start, ext_end, lig_start, lig_end])
m2_start = min([next_ext_start, next_ext_end, next_lig_start,
next_lig_end])
m2_end = max([next_ext_start, next_ext_end, next_lig_start,
next_lig_end])
ol = overlap([m1_start, m1_end], [m2_start, m2_end])
if len(ol) == 0:
return True
else:
return (ol[1] - ol[0] + 1) <= overlap_same
else:
m1_set = set(list(range(min([ext_start, ext_end]),
max([ext_start, ext_end]) + 1))
+ list(range(min([lig_start, lig_end]),
max([lig_start, lig_end]) + 1)))
m2_set = set(list(range(min([next_ext_start, next_ext_end]),
max([next_ext_start, next_ext_end]) + 1))
+ list(range(min([next_lig_start, next_lig_end]),
max([next_lig_start, next_lig_end]) + 1)))
ol = len(m1_set.intersection(m2_set))
return ol <= overlap_opposite
def compatible_chains(primer_file, mip_dict, primer3_output_DIR,
primer_out, output_file, must_bonus, set_copy_bonus,
overlap_same, overlap_opposite, outp, bin_size,
trim_increment, trim_limit, set_size, chain_mips,
intervals):
try:
with open(os.path.join(
primer3_output_DIR, primer_file), "r") as infile:
scored_mips = json.load(infile)
except IOError:
print("Primer file does not exist.")
return 1
else:
# make a copy of the original mip dict to use in filtering
temp_dict = copy.deepcopy(mip_dict)
# create small subregions for binning MIPs and creating compatible
# mip sets for smaller regions
begin = intervals[0]
end = intervals[1]
bins = list(range(begin, end, bin_size))
# if a single nucleotide is the target, the interval will be the
# position of that nucleotide as [pos, pos] and the range will return
# an empty list. In this case we'll crease a [pos, pos] list instead.
if begin == end:
bins = [begin, end]
if bins[-1] != end:
bins.append(end)
num_bins = len(bins) - 1
# group MIPs into bins. Bins can share MIPs.
binned = {}
for i in range(num_bins):
binned[i] = {}
bin_start = bins[i]
bin_end = bins[i + 1]
for k in temp_dict:
cp = temp_dict[k].mip["C0"]
cs = cp["capture_start"]
ce = cp["capture_end"]
if len(overlap([cs, ce], [bin_start, bin_end])) > 0:
binned[i][k] = temp_dict[k]
# remove MIPs covering similar regions until we have only
# "set_size" number of MIPs per bin.
for i in binned:
trim_size = 1
while (trim_size <= trim_limit) and (len(binned[i]) > set_size):
filter_mips(binned[i], trim_size, set_size)
trim_size += trim_increment
# create (in)compatibility lists for each MIP
for k in list(scored_mips["pair_information"].keys()):
# get coordinates of mip arms
d = scored_mips["pair_information"][k]
# extension arm start position
es = d["extension_primer_information"]["GENOMIC_START"]
# extension arm end position
ee = d["extension_primer_information"]["GENOMIC_END"]
# ligation arm start position
ls = d["ligation_primer_information"]["GENOMIC_START"]
# ligation arm end position
le = d["ligation_primer_information"]["GENOMIC_END"]
# get mip orientation
ori = d["orientation"]
# create an in/compatibility list
incompatible = set()
compatible = set()
# loop through all mips to populate compatibility lists
for mk in list(scored_mips["pair_information"].keys()):
m = scored_mips["pair_information"][mk]
# next MIP's extension arm start position
nes = m["extension_primer_information"]["GENOMIC_START"]
# next MIP's extension arm end position
nee = m["extension_primer_information"]["GENOMIC_END"]
# next MIP's ligation arm start position
nls = m["ligation_primer_information"]["GENOMIC_START"]
# next MIP's ligation arm end position
nle = m["ligation_primer_information"]["GENOMIC_END"]
# get mip orientation
next_ori = m["orientation"]
compat = 0
next_compat = 0
# check if the two mips are compatible in terms of
# orientation and coordinates
if ori == next_ori == "forward":
if (((ls < nls) and (ls < nes + overlap_same))
or ((ls > nls) and (es + overlap_same > nls))):
compat = 1
elif ori == next_ori == "reverse":
if (((ls < nls) and (es < nls + overlap_same))
or ((ls > nls) and (ls + overlap_same > nes))):
compat = 1
elif (ori == "forward") and (next_ori == "reverse"):
if ((ls < nls + overlap_opposite)
or (es + overlap_opposite > nes)):
compat = 1
elif ((es < nls) and (ee < nls + overlap_opposite)
and (le + overlap_opposite > nle)
and (ls < nee + overlap_opposite)):
compat = 1
next_compat = 1
elif ((es > nls) and (es + overlap_opposite > nle)
and (ee < nee + overlap_opposite)
and (le + overlap_opposite > nes)):
compat = 1
elif (ori == "reverse") and (next_ori == "forward"):
if ((ls + overlap_opposite > nls)
or (es < nes + overlap_opposite)):
compat = 1
elif ((ls > nes) and (ls + overlap_opposite > nee)
and (le < nle + overlap_opposite)
and (ee + overlap_opposite > nls)):
compat = 1
elif ((ls < nes) and (le < nes + overlap_opposite)
and (ee + overlap_opposite > nee)
and (es < nle + overlap_opposite)):
compat = 1
next_compat = 1
if not compat:
incompatible.add(mk)
if next_compat:
compatible.add(mk)
d["incompatible"] = incompatible
d["compatible"] = compatible
def compatible_recurse(l):
"""
Take a list, l, of numbers that represent a mip set with
their corresponding "place" in the mip dictionary, and index
number, i. Find the subset of mips in the rest of the list
that are compatible with the mip at index i, using compatibility
dictionary d. For each mip in the subset, find compatible mips
in the rest of the list. Recurse until the subset does not have
any mips. Append each compatible subset to a final result list, f.
"""
# create a set of mips that are incompatible with any mip in
# the starting list.
incomp = set(l)
for il in l:
incomp.update(scored_mips["pair_information"][il][
"incompatible"])
# create a set of mips that can be the "next" mip that can be
# added to the mip list
comp = scored_mips["pair_information"][l[-1]][
"compatible"].difference(incomp).intersection(subset)
# if there are mips that can be added, call compatible_recurse
# function for each of those mips
if len(comp) > 0:
for n in comp:
compatible_recurse(l + [n])
# stop recursing when the mip chain cannot be elongated
else:
mip_sets.append((l))
keys = sorted(scored_mips["pair_information"],
key=lambda a: scored_mips["pair_information"][a]
["pairs"]["C0"]["capture_start"])
ms_dict = {}
for i in binned:
subset = binned[i]
mip_sets = []
for k in keys:
if k in subset:
comp_list = scored_mips["pair_information"][k][
"compatible"].intersection(subset)
if len(comp_list) > 0:
# for each of the mips in the compatibility list,
for m in comp_list:
# check if these two mips are present in other sets
# if they are, then no need to pursue this branch
# anymore as the same branch will be in the other
# mip set as well
test_set = frozenset([k, m])
for p_set in mip_sets:
if test_set.issubset(set(p_set)):
break
else:
# create an initial result list to be used by
# the compatible_recurse function
compatible_recurse([k, m])
else:
mip_sets.append(([k]))
ms_dict[i] = mip_sets
# define a funtcion for getting the mipset score and coverage
def score_mipset(mip_set):
# create a dic for diffs captured cumulatively by all
# mips in the set
merged_caps = []
# create a list for mip scores based on mip sequence and
# not the captured diffs
mip_scores = []
# create a list for what is captured by the set (only must
# captures)
must_captured = []
# create a list for other targets captured
targets_captured = []
# a list for mip coordinates
capture_coordinates = []
for mip_key in mip_set:
# extract the mip name
# extract the captured diffs from the mip_dic and
# append to capture list
mip_obj = mip_dict[mip_key]
uniq = mip_obj.capture_info["unique_captures"]
merged_caps.extend(uniq)
must_captured.extend(mip_obj.captures)
targets_captured.extend(mip_obj.captured_targets)
if ((mip_obj.tech_score > 0)
and (mip_obj.func_score > 0)):
mip_scores.append(
float(mip_obj.tech_score * mip_obj.func_score)
/ 1000
)
else:
mip_scores.append(
float(mip_obj.tech_score + mip_obj.func_score)
/ 1000)
mcoord = sorted(
[mip_obj.extension["C0"]["GENOMIC_START"],
mip_obj.ligation["C0"]["GENOMIC_START"],
mip_obj.extension["C0"]["GENOMIC_END"],
mip_obj.ligation["C0"]["GENOMIC_END"]]
)
capture_coordinates.append([mcoord[1] + 1,
mcoord[2] - 1])
merged_capture_coordinates = merge_overlap(
capture_coordinates, 50)
scp = len(set(merged_caps)) * set_copy_bonus
must_set = list(set(must_captured))
mb = len(must_set) * must_bonus
total_score = mb + scp + sum(mip_scores)
return total_score, merged_capture_coordinates
# create a dictionary to hold mip sets and their scores
mip_set_dict = {}
for i in ms_dict:
mip_set_dict[i] = {}
bin_co = bins[i: i + 2]
bin_size = bin_co[1] - bin_co[0] + 1
for j in range(len(ms_dict[i])):
ms = ms_dict[i][j]
sc = score_mipset(ms)
coverage = overlap(sc[1][0], bin_co)
coverage = (coverage[1] - coverage[0] + 1) / bin_size
mip_set_dict[i][j] = {"mip_list": ms, "score": sc[0],
"coordinates": sc[1][0],
"coverage": coverage}
for i in mip_set_dict:
iter_keys = list(mip_set_dict[i].keys())
for j in iter_keys:
try:
s1 = mip_set_dict[i][j]["mip_list"]
sc1 = mip_set_dict[i][j]["score"]
crd1 = mip_set_dict[i][j]["coordinates"]
cov1 = mip_set_dict[i][j]["coverage"]
for k in iter_keys:
if k == j:
continue
try:
s2 = mip_set_dict[i][k]["mip_list"]
sc2 = mip_set_dict[i][k]["score"]
crd2 = mip_set_dict[i][k]["coordinates"]
cov2 = mip_set_dict[i][k]["coverage"]
if check_redundant_region(crd1, crd2, spacer=0):
# if one set is to be removed pick the one
# with full coverage of the target region
# in case there is one
if chain_mips:
if (cov1 == 1) and (cov2 < 1):
mip_set_dict[i].pop(k)
elif (cov2 == 1) and (cov1 < 1):
mip_set_dict[i].pop(j)
break
# if both are covering the target
# or if both are failing to cover
# then pick the set with better score
elif sc2 > sc1:
mip_set_dict[i].pop(j)
break
else:
mip_set_dict[i].pop(k)
# if chaining mip is not required
# pick the better scoring set
elif sc2 > sc1:
mip_set_dict[i].pop(j)
break
else:
mip_set_dict[i].pop(k)
except KeyError:
continue
except KeyError:
continue
# merge compatible chains within each bin (to some extent)
merged_sets = {}
for i in mip_set_dict:
mip_sets = set()
for j in mip_set_dict[i]:
mip_sets.add(frozenset(mip_set_dict[i][j]["mip_list"]))
# these mip sets only contain mip chains. We can expand each
# such set by merging with other sets after removing incompatible
# mips from the second set.
counter = 0
for counter in range(5):
new_mip_sets = set()
for s1 in mip_sets:
inc = set()
for m in s1:
inc.update(scored_mips["pair_information"][m][
"incompatible"])
new_set = set(s1)
for s2 in mip_sets:
counter += 1
s3 = s2.difference(inc).difference(new_set)
if len(s3) > 0:
new_set.update(s3)
for m in new_set:
inc.update(scored_mips["pair_information"][m][
"incompatible"])
new_mip_sets.add(frozenset(new_set))
mip_sets = new_mip_sets
if len(mip_sets) > 0:
merged_sets[i] = mip_sets
# combine mip sets in different bins
# first, calculate how many combinations there will be
combo_length = 1
for i in merged_sets:
combo_length *= len(merged_sets[i])
# if too many combinations, reduce by picking the top 5 scoring
# sets for each bin
if combo_length > pow(10, 7):
for i in list(merged_sets.keys()):
top_sets = set(sorted(merged_sets[i],
key=lambda a: score_mipset(a)[0],
reverse=True)[:5])
merged_sets[i] = top_sets
combo_length = 1
for i in merged_sets:
combo_length *= len(merged_sets[i])
# if still too many combinations, take the top set for each bin
if combo_length > pow(10, 7):
for i in list(merged_sets.keys()):
top_sets = set(sorted(merged_sets[i],
key=lambda a: score_mipset(a)[0],
reverse=True)[:1])
merged_sets[i] = top_sets
# combine mip sets in different bins
combined_sets = set()
combo_list = list(itertools.product(
*[merged_sets[i] for i in sorted(merged_sets)]))
for l in combo_list:
if len(l) == 1:
m_set = set(l[0])
else:
m_set = set()
for i in range(len(l) - 1):
s1 = l[i]
s2 = l[i + 1]
inc = set()
for m in s1:
inc.update(scored_mips["pair_information"][m][
"incompatible"])
s3 = s2.difference(inc)
m_set.update(s1.union(s3))
combined_sets.add(frozenset(m_set))
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), "w") as outfile:
outfile.write("\n".join([",".join(s) for s in combined_sets])
+ "\n")
with open(os.path.join(
primer3_output_DIR, primer_out), "wb") as outfile:
pickle.dump(scored_mips, outfile)
return combined_sets
def design_mips(design_dir, g):
print(("Designing MIPs for ", g))
try:
Par = mod.Paralog(os.path.join(design_dir, g, "resources",
g + ".rinfo"))
Par.run_paralog()
if Par.copies_captured:
print(("All copies were captured for paralog ", Par.paralog_name))
else:
print(("Some copies were NOT captured for paralog ",
Par.paralog_name))
if Par.chain_mips:
if Par.chained_mips:
print(("All MIPs are chained for paralog ", Par.paralog_name))
else:
print(("MIPs are NOT chained for paralog ", Par.paralog_name))
except Exception as e:
print((g, str(e), " FAILED!!!"))
return
def design_mips_worker(design_list):
design_dir, g = design_list
print(("Designing MIPs for ", g))
try:
rinfo_file = os.path.join(design_dir, g, "resources", g + ".rinfo")
Par = mod.Paralog(rinfo_file)
Par.run_paralog()
if len(Par.mips) == 0:
return
if Par.copies_captured:
print(("All copies were captured for paralog ", Par.paralog_name))
else:
print(("Some copies were NOT captured for paralog ",
Par.paralog_name))
if Par.chain_mips:
if Par.chained_mips:
print(("All MIPs are chained for paralog ", Par.paralog_name))
else:
print(("MIPs are NOT chained for paralog ", Par.paralog_name))
except Exception as e:
print((g, str(e), " FAILED!!!"))
traceback.print_exc()
return 0
def design_mips_multi(design_dir, g_list, num_processor):
chore_list = [[design_dir, g] for g in g_list]
res = []
try:
p = NoDaemonProcessPool(num_processor)
p.map_async(design_mips_worker, chore_list, callback=res.append)
p.close()
p.join()
except Exception as e:
res.append(str(e))
return res
def parasight(resource_dir,
design_info_file,
designed_gene_list=None,
extra_extension=".extra",
use_json=False):
if not use_json:
with open(design_info_file, "rb") as infile:
design_info = pickle.load(infile)
else:
with open(design_info_file) as infile:
design_info = json.load(infile)
output_list = ["#!/usr/bin/env bash"]
pdf_dir = os.path.join(resource_dir, "pdfs")
backup_list = ["#!/usr/bin/env bash"]
gs_list = ["#!/usr/bin/env bash"]
pdf_list = ["#!/usr/bin/env bash"]
pdf_merge_list = ["#!/usr/bin/env bash", "cd " + pdf_dir]
pdf_convert_list = ["gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite "
+ "-dPDFSETTINGS=/prepress -dAutoRotatePages=/All "
"-sOutputFile=merged.pdf"]
if not os.path.exists(pdf_dir):
os.makedirs(pdf_dir)
for t in design_info:
basename = os.path.join(design_info[t]["design_dir"], t, t)
backup_name = basename + ".extra"
filtered_name = basename + "_filtered.pse"
backup_list.append("scp " + backup_name + " " + backup_name + ".bak")
backup_list.append("mv " + filtered_name + " " + backup_name)
psname = basename + ".01.01.ps"
pdfname = basename + ".pdf"
gs_command = ("gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite "
+ "-dPDFSETTINGS=/prepress -dAutoRotatePages=/All "
"-sOutputFile=" + pdfname + " " + psname)
if designed_gene_list is not None:
if t in designed_gene_list:
pdf_convert_list.append(t + ".pdf")
else:
pdf_convert_list.append(t + ".pdf")
gs_list.append(gs_command)
pdf_list.append("cp " + basename + ".pdf "
+ os.path.join(pdf_dir, t + ".pdf"))
outlist = ["parasight76.pl",
"-showseq", basename + ".show",
"-extra", basename + extra_extension,
"-template", "/opt/resources/nolabel.pst",
"-precode file:" + basename + ".precode",
"-die"]
output_list.append(" ".join(outlist))
with open(basename + ".precode", "w") as outfile:
outfile.write("$opt{'filename'}='" + t
+ "';&fitlongestline; &print_all (0,'"
+ basename + "')")
with open(os.path.join(resource_dir, "backup_commands"), "w") as outfile:
outfile.write("\n".join(backup_list))
with open(
os.path.join(resource_dir, "parasight_commands"), "w") as outfile:
outfile.write("\n".join(output_list))
with open(os.path.join(resource_dir, "gs_commands"), "w") as outfile:
outfile.write("\n".join(gs_list))
with open(os.path.join(resource_dir, "copy_commands"), "w") as outfile:
outfile.write("\n".join(pdf_list))
pdf_merge_list.append(" ".join(pdf_convert_list))
with open(os.path.join(resource_dir, "convert_commands"), "w") as outfile:
outfile.write("\n".join(pdf_merge_list))
visualization_list = ["#!/usr/bin/env bash"]
visualization_list.append("chmod +x backup_commands")
visualization_list.append("./backup_commands")
visualization_list.append("chmod +x parasight_commands")
visualization_list.append("./parasight_commands")
visualization_list.append("chmod +x gs_commands")
visualization_list.append("./gs_commands")
visualization_list.append("chmod +x copy_commands")
visualization_list.append("./copy_commands")
visualization_list.append("chmod +x convert_commands")
visualization_list.append("./convert_commands")
with open(os.path.join(resource_dir, "visualize.sh"), "w") as outfile:
outfile.write("\n".join(visualization_list))
return
def parasight_print(resource_dir, design_dir, design_info_file,
designed_gene_list=None, extra_extension=".extra",
use_json=False, print_out=False):
if not use_json:
with open(design_info_file, "rb") as infile:
design_info = pickle.load(infile)
else:
with open(design_info_file) as infile:
design_info = json.load(infile)
output_file = os.path.join(resource_dir, "parasight_print.txt")
with open(output_file, "w") as outfile:
for g in design_info:
if (designed_gene_list is None) or (g in designed_gene_list):
show_file = os.path.join(design_dir, g, g + ".show")
extras_file = os.path.join(design_dir, g, g + extra_extension)
line = ["parasight76.pl", "-showseq", show_file,
"-extra ", extras_file]
if print_out:
print(" ".join(line))
outfile.write(" ".join(line) + "\n")
###############################################################
# Data analysis related functions
###############################################################
def get_analysis_settings(settings_file):
"""Convert analysis settings file to dictionary."""
settings = {}
with open(settings_file) as infile:
for line in infile:
try:
if not line.startswith("#"):
newline = line.strip().split("\t")
value = newline[1].split(",")
if len(value) == 1:
settings[newline[0]] = value[0]
else:
settings[newline[0]] = [v for v in value if v != ""]
except Exception as e:
print(("Formatting error in settings file, line {}"
"causing error '{}''").format(line, e))
print(newline)
return
return settings
def write_analysis_settings(settings, settings_file):
"""Create a settings file from a settings dictionary."""
outfile_list = [["# Setting Name", "Setting Value"]]
for k, v in settings.items():
if isinstance(v, list):
val = ",".join(map(str, v))
else:
val = str(v)
outfile_list.append([k, val])
with open(settings_file, "w") as outfile:
outfile.write("\n".join(["\t".join(o) for o in outfile_list]) + "\n")
return
###############################################################################
# New contig based analysis for vcf generation
###############################################################################
def map_haplotypes(settings):
"""Bwa-map haplotypes from MIPWrangler output to the reference genome.
Extract each unique haplotype sequence from the MIPWrangler output and
map to reference genome. MIPWrangler maps the sequencing data to the MIPs
used for an experiment based on the probe arms. We compare here whether
the best genomic loci for a given haplotype matches to the MIPWrangler
assignment. If not, we consider those off target and remove.
"""
wdir = settings["workingDir"]
haplotypes_fq_file = os.path.join(wdir, settings["haplotypesFastqFile"])
haplotypes_sam_file = os.path.join(wdir, settings["haplotypesSamFile"])
bwa_options = settings["bwaOptions"]
call_info_file = settings["callInfoDictionary"]
species = settings["species"]
try:
tol = int(settings["alignmentTolerance"])
except KeyError:
tol = 200
# DATA EXTRACTION ###
raw_results = pd.read_table(os.path.join(wdir,
settings["mipsterFile"]))
##########################################################
# Add the statistics for each haplotype to the data
# such as how many samples had a given haplotype
# and how many barcodes supported a given haplotype
# Filter the haplotypes for those criteria to
# remove possible noise and infrequent haplotypes
##########################################################
# Haplotype Filters from the settings file
haplotype_min_barcode_filter = int(settings["minHaplotypeBarcodes"])
haplotype_min_sample_filter = int(settings["minHaplotypeSamples"])
haplotype_min_sample_fraction_filter = float(
settings["minHaplotypeSampleFraction"]
)
# Gather per haplotype data across samples
hap_counts = raw_results.groupby(
"haplotype_ID"
)["barcode_count"].sum().reset_index().rename(
columns={"barcode_count": "Haplotype Barcodes"})
hap_sample_counts = raw_results.groupby("haplotype_ID")[
"sample_name"].apply(lambda a: len(set(a))).reset_index().rename(
columns={"sample_name": "Haplotype Samples"})
num_samples = float(raw_results["sample_name"].unique().size)
hap_sample_counts["Haplotype Sample Fraction"] = (
hap_sample_counts["Haplotype Samples"] / num_samples
)
hap_counts = hap_counts.merge(hap_sample_counts)
initial_hap_count = len(hap_counts)
hap_counts = hap_counts.loc[(hap_counts["Haplotype Samples"]
>= haplotype_min_sample_filter)
& (hap_counts["Haplotype Sample Fraction"]
>= haplotype_min_sample_fraction_filter)
& (hap_counts["Haplotype Barcodes"]
>= haplotype_min_barcode_filter)]
print(("Out of {} initial haplotypes, {} were filtered using {}, {}, and "
"{} as minimum total UMI count; number and fraction of samples "
" the haplotype was observed in, respectively.").format(
initial_hap_count, initial_hap_count - len(hap_counts),
haplotype_min_barcode_filter, haplotype_min_sample_filter,
haplotype_min_sample_fraction_filter))
hap_df = raw_results.loc[raw_results["haplotype_ID"].isin(
hap_counts["haplotype_ID"])].groupby(
["gene_name", "mip_name", "haplotype_ID"])[
"haplotype_sequence"].first().reset_index()
# fill in fake sequence quality scores for each haplotype. These scores
# will be used for mapping only and the real scores for each haplotype
# for each sample will be added later.This step is probably unnecessary
# as the bwa mem algorithm does not seem to use the quality scores.
hap_df["quality"] = hap_df["haplotype_sequence"].apply(
lambda a: "H" * len(a))
haps = hap_df.set_index("haplotype_ID").to_dict(orient="index")
# BWA alignment
# create a fastq file for bwa input
with open(haplotypes_fq_file, "w") as outfile:
for h in haps:
outfile.write("@" + h + "\n")
outfile.write(haps[h]["haplotype_sequence"] + "\n" + "+" + "\n")
outfile.write(haps[h]["quality"] + "\n")
# run bwa
bwa(haplotypes_fq_file, haplotypes_sam_file, "sam", "", "", bwa_options,
species)
# process alignment output sam file
header = ["haplotype_ID", "FLAG", "CHROM", "POS", "MAPQ", "CIGAR", "RNEXT",
"PNEXT", "TLEN", "SEQ", "QUAL"]
sam_list = []
with open(haplotypes_sam_file) as infile:
for line in infile:
if not line.startswith("@"):
newline = line.strip().split()
samline = newline[:11]
for item in newline[11:]:
value = item.split(":")
if value[0] == "AS":
samline.append(int(value[-1]))
break
else:
samline.append(-5000)
sam_list.append(samline)
sam = pd.DataFrame(sam_list, columns=header + ["alignment_score"])
# find alignment with the highest alignment score. We will consider these
# the primary alignments and the source of the sequence.
sam["best_alignment"] = (sam["alignment_score"] == sam.groupby(
"haplotype_ID")["alignment_score"].transform("max"))
# add MIP column to alignment results
sam["MIP"] = sam["haplotype_ID"].apply(lambda a: a.split(".")[0])
# create call_info data frame for all used probes in the experiment
probe_sets_file = settings["mipSetsDictionary"]
probe_set_keys = settings["mipSetKey"]
used_probes = set()
for psk in probe_set_keys:
with open(probe_sets_file) as infile:
used_probes.update(json.load(infile)[psk])
with open(call_info_file) as infile:
call_info = json.load(infile)
call_df_list = []
for g in call_info:
for m in call_info[g]:
if m in used_probes:
mip_number = int(m.split("_")[-1][3:])
sub_number = int(m.split("_")[-2][3:])
for c in call_info[g][m]["copies"]:
call_dict = call_info[g][m]["copies"][c]
try:
call_dict.pop("genes")
except KeyError:
pass
try:
call_dict.pop("variants")
except KeyError:
pass
call_dict["gene"] = g
call_dict["MIP"] = m
call_dict["copy"] = c
call_dict["mip_number"] = mip_number
call_dict["sub_number"] = sub_number
call_df_list.append(pd.DataFrame(call_dict, index=[0]))
call_df = pd.concat(call_df_list, ignore_index=True, sort=True)
# combine alignment information with design information (call_info)
haplotype_maps = call_df.merge(
sam[["MIP", "haplotype_ID", "CHROM", "POS", "best_alignment",
"alignment_score"]])
haplotype_maps["POS"] = haplotype_maps["POS"].astype(int)
haplotype_maps = haplotype_maps.merge(
hap_df[["haplotype_ID", "haplotype_sequence"]])
# determine which haplotype/mapping combinations are for intended targets
# first, compare mapping coordinate to the MIP coordinate to see if
# a MIP copy matches with the alignment.
haplotype_maps["aligned_copy"] = (
(haplotype_maps["CHROM"] == haplotype_maps["chrom"])
& (abs(haplotype_maps["POS"] - haplotype_maps["capture_start"]) <= tol)
)
# aligned_copy means the alignment is on the intended MIP target
# this is not necessarily the best target, though. For a haplotype sequence
# to be matched to a MIP target, it also needs to be the best alignment.
haplotype_maps["mapped_copy"] = (haplotype_maps["aligned_copy"]
& haplotype_maps["best_alignment"])
# rename some fields to be compatible with previous code
haplotype_maps.rename(columns={"gene": "Gene", "copy": "Copy",
"chrom": "Chrom"}, inplace=True)
# any haplotype that does was not best mapped to at least one target
# will be considered an off target haplotype.
haplotype_maps["off_target"] = ~haplotype_maps.groupby(
"haplotype_ID")["mapped_copy"].transform("any")
off_target_haplotypes = haplotype_maps.loc[haplotype_maps["off_target"]]
# filter off targets and targets that do not align to haplotypes
haplotypes = haplotype_maps.loc[(~haplotype_maps["off_target"])
& haplotype_maps["aligned_copy"]]
# each MIP copy/haplotype_ID combination must have a single alignment
# if there are multiple, the best one will be chosen
def get_best_alignment(group):
return group.sort_values("alignment_score", ascending=False).iloc[0]
haplotypes = haplotypes.groupby(["MIP", "Copy", "haplotype_ID"],
as_index=False).apply(get_best_alignment)
haplotypes.index = (range(len(haplotypes)))
# filter to best mapping copy/haplotype pairs
mapped_haplotypes = haplotypes.loc[haplotypes["mapped_copy"]]
mapped_haplotypes["mapped_copy_number"] = mapped_haplotypes.groupby(
["haplotype_ID"])["haplotype_ID"].transform(len)
mapped_haplotypes.to_csv(os.path.join(
wdir, "mapped_haplotypes.csv"), index=False)
off_target_haplotypes.to_csv(os.path.join(
wdir, "offtarget_haplotypes.csv"), index=False)
haplotypes.to_csv(os.path.join(
wdir, "aligned_haplotypes.csv"), index=False)
haplotype_maps.to_csv(os.path.join(
wdir, "all_haplotypes.csv"), index=False)
num_hap = len(set(haplotype_maps["haplotype_ID"]))
num_off = len(set(off_target_haplotypes["haplotype_ID"]))
print(("{} of {} haplotypes were off-target, either not mapping to "
"the reference genome, or best mapping to a region which was "
"not targeted.").format(num_off, num_hap))
return
def get_vcf_haplotypes(settings):
"""
Reverse compatibile map_haplotypes function.
This is the old name for map_haplotypes function. Some notebooks might
use the old name. So this will just run the map_haplotypes when called
by the old name.
"""
map_haplotypes(settings)
def get_haplotype_counts(settings):
"""Get UMI and read counts for each on target haplotype for each sample.
MIPWrangler output has the UMI and read counts per haplotype but some of
those are off target and some are mapping to multiple loci by design.
The decision on whether a haplotype sequence is on or off target and where
it maps best or if it maps to multiple loci are made by the map_haplotypes
function. This function distributes the UMI and read counts in the
MIPWrangler output using the mapped haplotypes data for each sample.
If a haplotype sequence is uniquely mapping to a targeted locus, we
allocate all reads for that sample and haplotype sequence to that locus.
If it is mapping to multiple places, we determine the ratios of those
'paralogous copies' for that sample based on the average mapping around
each locus and allocate the reads for that sample and that haplotype
sequence proportionally to the mapped loci. If a haplotype sequence is
mapping best to an unintended locus, we remove those.
"""
wdir = settings["workingDir"]
##########################################################
##########################################################
# Process 1: use sample sheet to determine which data points from the
# mipster file should be used, print relevant statistics.
##########################################################
##########################################################
# process sample sheets
run_meta = pd.read_table(os.path.join(wdir, "samples.tsv"))
# create a unique sample ID for each sample using sample name,
# sample set and replicate fields from the sample list file.
run_meta["sample_name"] = (
run_meta["sample_name"].astype(str)
)
run_meta["Sample Name"] = run_meta["sample_name"]
run_meta["Sample ID"] = run_meta[
["sample_name", "sample_set", "replicate"]
].apply(lambda a: "-".join(map(str, a)), axis=1)
# Sample Set key is reserved for meta data
# but sometimes erroneously included in the
# sample sheet. It should be removed.
try:
run_meta.drop("Sample Set", inplace=True, axis=1)
except (ValueError, KeyError):
pass
# a change to the formatting of sample sheets uses library_prep
# instead of Library Prep, so the below line is for backwards compatibility
run_meta.rename(columns={"library_prep": "Library Prep"}, inplace=True)
# drop duplicate values originating from
# multiple sequencing runs of the same libraries
run_meta = run_meta.drop_duplicates()
run_meta = run_meta.groupby(
["Sample ID", "Library Prep"]
).first().reset_index()
run_meta.to_csv(os.path.join(wdir, "run_meta.csv"))
# get used sample ids
sample_ids = run_meta["Sample ID"].unique().tolist()
##########################################################
##########################################################
# Process 2: extract all observed variants from observed
# haplotypes and create a variation data frame that will
# be able to map haplotype IDs to variation.
##########################################################
##########################################################
# get the haplotype dataframe for all mapped haplotypes
mapped_haplotype_df = pd.read_csv(
os.path.join(wdir, "mapped_haplotypes.csv"))
##########################################################
##########################################################
# Process 3: load the MIPWrangler output which has
# per sample per haplotype information, such as
# haplotype sequence quality, barcode counts etc.
# Create a suitable dataframe that can be merged
# with variant data to get the same information for each
# variant (variant barcode count, variant quality, etc.)
##########################################################
##########################################################
# get the MIPWrangler Output
raw_results = pd.read_table(os.path.join(wdir, settings["mipsterFile"]))
# limit the results to the samples intended for this analysis
raw_results = raw_results.loc[
raw_results["sample_name"].isin(sample_ids)
]
# rename some columns for better visualization in tables
raw_results.rename(
columns={"sample_name": "Sample ID",
"mip_name": "MIP",
"gene_name": "Gene",
"barcode_count": "Barcode Count",
"read_count": "Read Count"},
inplace=True
)
# use only the data corresponding to mapped haplotypes
# filtering the off target haplotypes.
mapped_results = raw_results.merge(mapped_haplotype_df, how="inner")
# Try to estimate the distribution of data that is mapping
# to multiple places in the genome.
# This is done in 4 steps.
# 1) Get uniquely mapping haplotypes and barcode counts
unique_df = mapped_results.loc[mapped_results["mapped_copy_number"] == 1]
unique_table = pd.pivot_table(unique_df,
index="Sample ID",
columns=["Gene", "MIP", "Copy", "Chrom"],
values=["Barcode Count"],
aggfunc=np.sum)
# 2) Estimate the copy number of each paralog gene
# for each sample from the uniquely mapping data
# Two values from the settings are used to determine the copy number
# in a given gene. Average copy count is the ploidy of the organism
# and the normalization percentile is what percentile is used for
# normalizing data. For example, for human genes ACC is 2 and
# if the percentiles are given as 0.4, 0.6: we would calculate the
# take the 40th and 60th percentile of them barcode counts for each probe
# across the samples and assume that the average of 40th and 60 pctl values
# to represent the average copy count of 2. Then caluculate this value
# for each probe and each sample.
try:
average_copy_count = float(settings["averageCopyCount"])
norm_percentiles = list(map(float,
settings["normalizationPercentiles"]))
except KeyError:
average_copy_count = 2
norm_percentiles = [0.4, 0.6]
unique_df.loc[:, "Copy Average"] = average_copy_count
# Adjusted barcode count will represent the estimated barcode count
# for multimapping haplotypes. For example, if hap1 is mapping to 2
# places in the genome and its barcode count for a sample containing this
# haplotype is 100. If we determined the copy numbers of the two mapping
# regions to be 1 and 1, the adjusted barcode count for each region
# would be 50. We'll set this value for uniquely mapping haplotypes
# to the Barcode Count, as they are not multi mapping.
unique_df.loc[:, "Adjusted Barcode Count"] = unique_df["Barcode Count"]
unique_df.loc[:, "Adjusted Read Count"] = unique_df["Read Count"]
unique_table.fillna(0, inplace=True)
# calculate the copy counts using the get_copy_counts function.
# this function normalizes data for each probe across samples
# and estimates copy counts using the percentile values as mentioned.
copy_counts = get_copy_counts(unique_table,
average_copy_count,
norm_percentiles)
# 3) Estimate the copy number of each "Gene"
# from the average copy count of uniquely mapping
# data for all MIPs within the gene.
cc = copy_counts.groupby(level=["Gene", "Copy"], axis=1).sum()
gc = copy_counts.groupby(level=["Gene"], axis=1).sum()
ac = cc.div(gc, level="Gene")
# 4) Distribute multi mapping data proportional to
# Paralog's copy number determined from the
# uniquely mapping data
multi_df = mapped_results.loc[mapped_results["mapped_copy_number"] > 1]
if not multi_df.empty:
# get the average copy count for the gene the haplotype belongs to
mca = multi_df.apply(lambda r: get_copy_average(r, ac), axis=1)
multi_df.loc[mca.index, "Copy Average"] = mca
multi_df["copy_sum"] = multi_df.groupby(
["Sample ID", "haplotype_ID"])["Copy Average"].transform("sum")
multi_df["copy_len"] = multi_df.groupby(
["Sample ID", "haplotype_ID"])["Copy Average"].transform("size")
null_index = multi_df["copy_sum"] == 0
multi_df.loc[null_index, "Copy Average"] = (
average_copy_count / multi_df.loc[null_index, "copy_len"])
multi_df.loc[null_index, "copy_sum"] = average_copy_count
multi_df["Copy Average"].fillna(0, inplace=True)
multi_df["Adjusted Barcode Count"] = (multi_df["Barcode Count"]
* multi_df["Copy Average"]
/ multi_df["copy_sum"])
multi_df["Adjusted Read Count"] = (multi_df["Read Count"]
* multi_df["Copy Average"]
/ multi_df["copy_sum"])
# Combine unique and multimapping data
combined_df = pd.concat([unique_df, multi_df], ignore_index=True,
sort=True)
combined_df.rename(
columns={
"Barcode Count": "Raw Barcode Count",
"Adjusted Barcode Count": "Barcode Count",
"Read Count": "Raw Read Count",
"Adjusted Read Count": "Read Count"
},
inplace=True
)
# print total read and barcode counts
print(
(
"Total number of reads and barcodes were {0[0]} and {0[1]}."
" On target number of reads and barcodes were {1[0]} and {1[1]}."
).format(
raw_results[["Read Count", "Barcode Count"]].sum(),
combined_df[["Read Count", "Barcode Count"]].sum().astype(int)
)
)
combined_df.to_csv(os.path.join(wdir, "haplotype_counts.csv"), index=False)
# So far the count data only includes MIPs that has at least one read
# in at least one sample. We would like to include MIPs with no reads
# as well. So we'll create a dataframe that has all the intended MIPs
# and merge with the count data.
# create call_info data frame for all used probes in the experiment
call_info_file = settings["callInfoDictionary"]
probe_sets_file = settings["mipSetsDictionary"]
probe_set_keys = settings["mipSetKey"]
used_probes = set()
for psk in probe_set_keys:
with open(probe_sets_file) as infile:
used_probes.update(json.load(infile)[psk])
with open(call_info_file) as infile:
call_info = json.load(infile)
call_df_list = []
for g in call_info:
for m in call_info[g]:
if m in used_probes:
for c in call_info[g][m]["copies"]:
call_dict = {"MIP": m, "Copy": c}
call_df_list.append(pd.DataFrame(call_dict, index=[0]))
call_df = pd.concat(call_df_list, ignore_index=True, sort=True)
# merge the count data with probe data. Fill missing values with 0.
combined_df = call_df.merge(combined_df, how="left").fillna(0)
# Create pivot table of combined barcode counts
# This is a per MIP per sample barcode count table
# of the samples with sequencing data
barcode_counts = pd.pivot_table(combined_df,
index="Sample ID",
columns=["MIP",
"Copy"],
values=["Barcode Count"],
aggfunc=np.sum)
# Sample name for probes without data would be NA and replaced to 0
# remove that if it exists
try:
barcode_counts.drop(0, inplace=True)
except KeyError:
pass
print("There are {} samples with sequence data".format(
barcode_counts.shape[0]
))
# After pivot table is created, the column names have an extra
# row with the name "Barcode Count". Remove that from column names.
bc_cols = barcode_counts.columns
bc_cols = [bc[1:] for bc in bc_cols]
# barcode count data is only available for samples with data
# so if a sample has not produced any data, it will be missing
# these samples should be added with 0 values for each probe
all_barcode_counts = pd.merge(
run_meta[["Sample ID", "replicate"]].set_index("Sample ID"),
barcode_counts, left_index=True, right_index=True, how="left")
all_barcode_counts.drop("replicate", axis=1, inplace=True)
# fix column names
all_barcode_counts.columns = pd.MultiIndex.from_tuples(
bc_cols, names=["MIP", "Copy"]
)
all_barcode_counts.fillna(0, inplace=True)
print("There are {} total samples.".format(all_barcode_counts.shape[0]))
all_barcode_counts.to_csv(os.path.join(wdir, "barcode_counts.csv"))
# Create an overview statistics file for samples including
# total read count, barcode count, and how well they cover each MIP.
sample_counts = combined_df.groupby("Sample ID")[["Read Count",
"Barcode Count"]].sum()
# Find samples without any data and print the number
no_data = run_meta.loc[
~run_meta["Sample ID"].isin(sample_counts.index)
]
print(("{} out of {} samples had no data and they will be excluded from "
"the variant calls.").format(no_data.shape[0], run_meta.shape[0]))
# add samples with no data
sample_counts = pd.merge(
run_meta[["Sample ID", "replicate"]].set_index("Sample ID"),
sample_counts, left_index=True, right_index=True, how="left")
sample_counts.drop("replicate", axis=1, inplace=True)
target_cov = pd.concat(
[(all_barcode_counts >= 1).sum(axis=1),
(all_barcode_counts >= 5).sum(axis=1),
(all_barcode_counts >= 10).sum(axis=1)],
axis=1,
).rename(
columns={
0: "targets_with_1_barcodes",
1: "targets_with_5_barcodes",
2: "targets_with_10_barcodes"
}
)
sample_counts = sample_counts.merge(target_cov,
how="outer",
left_index=True,
right_index=True).fillna(0)
target_cov_file = os.path.join(wdir, "sample_summary.csv")
sample_counts.to_csv(target_cov_file)
return
def freebayes_call(bam_dir="/opt/analysis/padded_bams",
fastq_dir="/opt/analysis/padded_fastqs",
options=[],
vcf_file="/opt/analysis/variants.vcf.gz",
targets_file=None, make_fastq=True,
align=True, settings=None, settings_file=None,
bam_files=None, bam_list=None, verbose=True,
fastq_padding=20, min_base_quality=1,
errors_file="/opt/analysis/freebayes_errors.txt",
warnings_file="/opt/analysis/freebayes_warnings.txt",
merge_distance=1000, contig_padding=500):
"""Call variants for MIP data using freebayes.
A mapped haplotype file must be present in the working directory. This
is generated during haplotype processing. Per sample fastqs and bams
will be created if align=True. Fastqs are generated with a default 20 bp
padding on each side of the haplotype. This assumes that there were no
errors where the MIP arms bind to the DNA. It may cause some false negative
calls where there was imperfect binding, but it is crucial for determining
variants close to the MIP arms.
Parameters
----------
bam_dir: str/path, /opt/analysis/padded_bams
path to the directory where per sample bam files are or where they
will be created if align=True.
fastq_dir: str/path, /opt/analysis/padded_fastqs
path to the directory where per sample fastq files are or where they
will be created if align=True.
vcf_file: str/path, /opt/analysis/variants.vcf.gz
Output vcf file path.
options: list, []
options to pass to freebayes directly, such as --min-coverage
the list must have each parameter and value as separate items.
For example, ["--min-alternate-count", "2"] and not
["--min-alternate-count 2"]
align: bool, True
Set to false if fastq and bam files have already been created.
settings: dict, None
Analysis settings dictionary. Either this or settings_file must
be provided.
settings_file: str/path, None
Path to the analysis settings file. Either this or the settings dict
must be provided.
targets_file: str/path, None
Path to targets file to force calls on certain locations even if
those variants do not satisfy filter criteria. It must be a tab
separated text file with minimum columns CHROM, POS, REF, ALT.
bam_files: list, None
list of bam files within the bam_dir to pass to freebayes. If None (
default), all bam files in the bam_dir will be used.
verbose: bool, True
if set to True, print errors and warnings in addition to saving to
errors and warnings files.
errors_file: str/path, /opt/analysis/freebayes_errors.txt
file to save freebayes errors.
warnings_file: str/path, /opt/analysis/freebayes_warnings
file to save freebayes warnings
merge_distance: int, 200
When creating contigs from MIP target regions, merge targets closer
to each other than this distance.
contig_padding: int, 50
Add this much padding to the contigs when calling freebayes.
"""
# get the analysis settings
# check if both settings and the settings file are None:
if (settings is None) and (settings_file is None):
print("settings or settings file must be provided for freebayes_call.")
return
else:
if settings is None:
settings = get_analysis_settings(settings_file)
else:
settings = copy.deepcopy(settings)
# get the working directory from settings
wdir = settings["workingDir"]
# load mapped haplotypes file. This file has the genomic locations
# of the haplotypes in mip data
mapped_haplotypes_file = os.path.join(wdir, "mapped_haplotypes.csv")
# get the mip data file location. This file has per sample haplotype
# information including counts.
mipster_file = os.path.join(wdir, settings["mipsterFile"])
if make_fastq:
# create fastq files from MIP data. One read per UMI will be created.
generate_mapped_fastqs(fastq_dir, mipster_file,
mapped_haplotypes_file, settings["species"],
pro=int(settings["processorNumber"]),
pad_size=fastq_padding)
if align:
# map per sample fastqs to the reference genome, creating bam files.
# bam files will have sample groups added, which is required for
# calling variants across the samples.
bwa_multi([], "bam", fastq_dir, bam_dir,
settings["bwaOptions"], settings["species"],
int(settings["processorNumber"]),
int(settings["processorNumber"]))
# divide data into contigs to make parallelization more efficient
# we'll create contigs from overlapping MIPs.
# load the call info dictionary which contains per MIP information
call_file = settings["callInfoDictionary"]
with open(call_file) as infile:
call_dict = json.load(infile)
# create a dataframe that has the genomic coordinates of each MIP
call_df = []
for g in call_dict:
for m in call_dict[g]:
for c in call_dict[g][m]["copies"]:
cdict = call_dict[g][m]["copies"][c]
call_df.append([cdict["chrom"], cdict["capture_start"],
cdict["capture_end"]])
call_df = pd.DataFrame(call_df, columns=["chrom", "capture_start",
"capture_end"])
# create a function that generates contigs of MIPs which overlap
# with 1 kb padding on both sides.
def get_contig(g):
intervals = zip(g["capture_start"], g["capture_end"])
return pd.DataFrame(merge_overlap(
[list(i) for i in intervals], spacer=merge_distance))
# create contigs per chromosome
contigs = call_df.groupby("chrom").apply(get_contig)
contigs = contigs.reset_index()
contigs.rename(columns={"level_1": "contig", 0: "contig_capture_start",
1: "contig_capture_end"}, inplace=True)
contigs["contig_name"] = contigs["chrom"] + "_" + contigs["contig"].astype(
str)
# we'll call freebayes on each contig by providing a region string in the
# form chrx:begin-end. Create those strings for each contig with some
# padding. It is important to check that we don't end up with a start
# position of <1 or end position longer than chom length.
# Begin by adding chromosome length to contig info.
# get reference chromosome lengths
genome_file = get_file_locations()[settings["species"]]["fasta_genome"]
reference_lengths = {}
genome_sam = pysam.FastaFile(genome_file)
for r in genome_sam.references:
reference_lengths[r] = genome_sam.get_reference_length(r)
contigs["chromosome_length"] = contigs["chrom"].map(reference_lengths)
contigs["region_start"] = contigs["contig_capture_start"] - contig_padding
contigs.loc[contigs["region_start"] < 1, "region_start"] = 1
contigs["region_end"] = contigs["contig_capture_end"] + contig_padding
contigs["region_end"] = contigs[
["region_end", "chromosome_length"]].min(axis=1).values
contigs["region"] = contigs["chrom"] + ":" + (
contigs["region_start"]).astype(str) + "-" + (
contigs["region_end"]).astype(str)
# we'll force calls on targeted variants if so specified
if targets_file is not None:
# each contig must include at least one of the targets, otherwise
# freebayes throws an error. So we'll load the targets and add the
# targets option to only those contigs that contain targets
targets = pd.read_table(targets_file)
# merge targets and contigs dataframes to determine which contigs
# contain targets. chrom will be used as the common column name
targets["chrom"] = targets["CHROM"]
targets = targets.merge(contigs)
# remove rows where chrom is shared but target position is outside
# of contig boundries.
targets = targets.loc[
(targets["contig_capture_start"] <= targets["POS"])
& (targets["POS"] <= targets["contig_capture_end"])]
targets["contains_targets"] = True
# merge only two columns of the targets df to contigs so that
# the only shared column is contig_name. More than one target can
# be in a single contig, so we need to drop duplicates from targets.
contigs = contigs.merge(targets[
["contig_name", "contains_targets"]].drop_duplicates(), how="left")
contigs["contains_targets"].fillna(False, inplace=True)
# create a targets.vcf file for freebayes
targets_vcf = os.path.join(wdir, "targets.vcf")
with open(targets_vcf, "w") as outfile:
outfile.write('##fileformat=VCFv4.2\n')
outfile.write(
'##FILTER=<ID=PASS,Description="All filters passed">\n')
outfile.write('##INFO=<ID=TR,Number=.,Type=String,Description'
'="Targeted variant.">\n')
vcf_fields = ["ID", "QUAL", "FILTER"]
for vf in vcf_fields:
targets[vf] = "."
targets["INFO"] = "TR"
vcf_fields = ["#CHROM", "POS", "ID", "REF", "ALT", "QUAL",
"FILTER", "INFO"]
targets = targets.rename(columns={"CHROM": "#CHROM"})[vcf_fields]
targets.sort_values(["#CHROM", "POS"]).to_csv(
outfile, sep="\t", index=False)
# bgzip and index
res = subprocess.run(["bgzip", "-f", targets_vcf],
stderr=subprocess.PIPE)
if res.returncode != 0:
print("Error in compressing targets.vcf file", res.stderr)
targets_vcf = targets_vcf + ".gz"
res = subprocess.run(["tabix", "-s", "1", "-b", "2", "-e", "2", "-f",
targets_vcf], stderr=subprocess.PIPE)
if res.returncode != 0:
print("Error in indexing targets.vcf.gz file ", res.stderr)
else:
contigs["contains_targets"] = False
# create a contig dictionary from the contigs dataframe
# this dict will be passed to the worker function for parallelization
chrom_dict = {}
gb = contigs.groupby("chrom")
for g in gb.groups.keys():
gr = gb.get_group(g)
chrom_dict[g] = gr[["contig_name", "region",
"contains_targets"]].set_index(
"contig_name").to_dict(orient="index")
# populate the contigs dictionary for freebayes parameters
# start with options to be added for each contig
# get fasta genome location
genome_fasta = get_file_locations()[settings["species"]]["fasta_genome"]
# specify fasta genome file
options.extend(["-f", genome_fasta])
# add if bam files are specified. Nothing should be added to options
# after the bam files.
if bam_files is not None:
options.extend(bam_files)
if bam_list is not None:
options.extend(["-L", bam_list])
# create a file list in the bam_dir that has full path to all bam files
# if all bam files are to be used
else:
bam_list = os.path.join(bam_dir, "bamlist.txt")
with open(bam_list, "w") as outfile:
for f in os.scandir(bam_dir):
if os.path.splitext(f.name)[1] == ".bam":
outfile.write(f.path + "\n")
options.extend(["-L", bam_list])
# add minimum base quality parameter to options if not already provided
if ("--min-base-quality" not in options) and ("-q" not in options):
options.extend(["-q", str(min_base_quality)])
# create a list for keeping all contig vcf file paths to concatanate
# them at the end.
contig_vcf_paths = []
# create a similar list for zipped vcf files
contig_vcf_gz_paths = []
# create a list of per contig dictionary to feed to multiprocessing
# function apply_async
contig_dict_list = []
# create the contigs vcf directory
cvcfs_dir = os.path.join(wdir, "contig_vcfs")
if not os.path.exists(cvcfs_dir):
os.makedirs(cvcfs_dir)
# update contig_dict with contig specific options
for chrom in chrom_dict:
for contig_name in chrom_dict[chrom]:
contig_dict = chrom_dict[chrom][contig_name]
################################################################
# create contig specific options and
# add contigs region string (chrx:begin-end)
region = contig_dict["region"]
contig_options = ["-r", region]
# add contigs vcf file name
contig_vcf = os.path.join(wdir, "contig_vcfs",
contig_name + ".vcf")
contig_dict["vcf_path"] = contig_vcf
# add output file to the freebayes options
contig_options.extend(["-v", contig_vcf])
# add contig vcf path to the list
contig_vcf_paths.append(contig_vcf)
# add contigs vcf.gz file name
contig_vcf_gz = os.path.join(wdir, "contig_vcfs",
contig_name + ".vcf.gz")
contig_vcf_gz_paths.append(contig_vcf_gz)
contig_dict["vcf_gz_path"] = contig_vcf_gz
# if contig includes targets, we'll force calls on those
if contig_dict["contains_targets"]:
contig_options.extend(["-@", targets_vcf])
# we'll add the contig specific options to the beginning of
# the options list in case bam files were added to the options
# and they must stay at the end because they are positional args.
contig_dict["options"] = contig_options + options
# add the contig dict to contig dict list
contig_dict_list.append(contig_dict)
# create a processor pool for parallel processing
pool = Pool(int(settings["processorNumber"]))
# create a results container for the return values from the worker function
results = []
errors = []
# run the freebayes worker program in parallel
pool.map_async(freebayes_worker, contig_dict_list, callback=results.extend,
error_callback=errors.extend)
# join and close the processor pool.
pool.close()
pool.join()
# compare the length of the results object and the number of contigs
# print an error message if they are not the same
if len(contig_dict_list) != (len(results) + len(errors)):
print(("Number of contigs, {}, is not the same as number of results "
"from the variant caller, {}, plus number of errors, {}. "
"This means some calls have failed silently. "
"Results and errors should be inspected.").format(
len(contig_dict_list), len(results), len(errors)))
# check each contig's variant call results for errors and warnings
# open files to save errors and warnings
with open(errors_file, "w") as ef, open(warnings_file, "wb") as wf:
# keep a count of warnings an errors
error_count = 0
warning_count = 0
for res in results:
for r in res:
try:
r.check_returncode()
except subprocess.CalledProcessError as e:
error_count += 1
ef.write(str(e) + "\n")
if verbose:
print("Error in freebayes calls: ", e)
# print if any warnings were issued
if len(r.stderr) > 0:
warning_count += 1
wf.write(r.stderr + b"\n")
if verbose:
print("Warning in freebayes calls: ", r.stderr)
# if errors are not printed but present, print an message to indicate
# the presence of errors/warnings
if not verbose:
if error_count > 0:
print(("Errors were encountered in freebayes calls."
" Please inspect {} for errors.").format(errors_file))
if warning_count > 0:
print(("There were warnings in freebayes calls."
" Please inspect {} for warnings.").format(
warnings_file))
if len(errors) > 0:
print(("There were {} calls that failed").format(len(errors)))
# concatanate contig vcfs. The number of contigs may be high, so we'll
# write the vcf paths to a file and bcftools will read from that file
cvcf_paths_file = os.path.join(wdir, "contig_vcfs", "vcf_file_list.txt")
with open(cvcf_paths_file, "w") as outfile:
outfile.write("\n".join(contig_vcf_gz_paths) + "\n")
subprocess.run(["bcftools", "concat", "-f", cvcf_paths_file, "-Oz",
"-o", vcf_file], check=True)
subprocess.run(["bcftools", "index", "-f", vcf_file], check=True)
# fix vcf header if --gvcf option has been used
if "--gvcf" in options:
temp_vcf_path = os.path.join(wdir, "temp.vcf.gz")
vcf_reheader(os.path.basename(vcf_file), temp_vcf_path, wdir=wdir)
old_vcf_path = os.path.join(wdir, "unfixed.vcf.gz")
subprocess.run(["mv", vcf_file, old_vcf_path])
subprocess.run(["mv", temp_vcf_path, vcf_file])
subprocess.run(["bcftools", "index", "-f", vcf_file], check=True)
return (contig_dict_list, results, errors)
def freebayes_worker(contig_dict):
"""Run freebayes program with the specified options.
Run freebayes program with the specified options and return a
subprocess.CompletedProcess object.
"""
options = contig_dict["options"]
command = ["freebayes"]
command.extend(options)
# run freebayes command piping the output
fres = subprocess.run(command, stderr=subprocess.PIPE)
# check the return code of the freebayes run. if succesfull continue
if fres.returncode == 0:
# bgzip the vcf output, using the freebayes output as bgzip input
vcf_path = contig_dict["vcf_path"]
gres = subprocess.run(["bgzip", "-f", vcf_path],
stderr=subprocess.PIPE)
# make sure bugzip process completed successfully
if gres.returncode == 0:
# index the vcf.gz file
vcf_gz_path = contig_dict["vcf_gz_path"]
ires = subprocess.run(["bcftools", "index", "-f", vcf_gz_path],
stderr=subprocess.PIPE)
# return the CompletedProcess objects
return (fres, gres, ires)
else:
return (fres, gres)
# if freebayes call failed, return the completed process object
# instead of attempting to zip the vcf file which does not exist if
# freebayes failed.
else:
return (fres, )
def vcf_reheader(vcf_file, fixed_vcf_file, wdir="/opt/analysis/"):
"""Fix vcf header QA/QR fields.
When --gvcf option is used in freebayes variant calling pipeline,
the header of the vcf file comes out incorrect for QA/QR fields number
type, Integer instead of Float. This function fixes those lines from
the header and creates a new vcf file with the correct header.
"""
# get the current header
vcf_path = os.path.join(wdir, vcf_file)
header = subprocess.Popen(["bcftools", "view", "-h", vcf_path],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
com = header.communicate()
if header.returncode != 0:
print("Failed to extract vcf header. Header will not be fixed.")
return
# convert the header byte string to text and creale a list of lines
head = com[0].decode("utf-8").split("\n")
# create a new header with fixed fields
new_head = []
for line in head:
if ("ID=QA" in line) or ("ID=QR" in line):
new_head.append(line.replace("Type=Integer", "Type=Float"))
else:
new_head.append(line)
new_header_path = os.path.join(wdir, "new_vcf_header.txt")
with open(new_header_path, "w") as outfile:
outfile.write("\n".join(new_head) + "\n")
fixed_vcf_path = os.path.join(wdir, fixed_vcf_file)
subprocess.run(["bcftools", "reheader", "-h", new_header_path,
vcf_path, "-o", fixed_vcf_path], check=True)
return
def gatk(options):
"""GATK wrapper function.
Run gatk program with the given options. Return the subprocess result.
"""
return subprocess.run(["gatk", *options], stderr=subprocess.PIPE)
def gatk_file_prep(bam_dir="/opt/analysis/padded_bams",
fastq_dir="/opt/analysis/padded_fastqs",
targets_file=None,
settings=None, settings_file=None,
errors_file="/opt/analysis/gatk_file_prep_output.txt"):
"""Prepare files for calling variants for MIP data using gatk.
A mapped haplotype file must be present in the working directory. This
is generated during haplotype processing. Per sample fastqs and bams
will be created. Fastqs are generated with a default 20 bp
padding on each side of the haplotype. This assumes that there were no
errors where the MIP arms bind to the DNA. It may cause some false negative
calls where there was imperfect binding, but it is crucial for determining
variants close to the MIP arms.
Parameters
----------
bam_dir: str/path, /opt/analysis/padded_bams
path to the directory where per sample bam files are or where they
will be created if align=True.
fastq_dir: str/path, /opt/analysis/padded_fastqs
path to the directory where per sample fastq files are or where they
will be created if align=True.
settings: dict, None
Analysis settings dictionary. Either this or settings_file must
be provided.
settings_file: str/path, None
Path to the analysis settings file. Either this or the settings dict
must be provided.
targets_file: str/path, None
Path to targets file to force calls on certain locations even if
those variants do not satisfy filter criteria. It must be a tab
separated text file with minimum columns CHROM, POS, REF, ALT.
errors_file: str/path, /opt/analysis/gatk_file_prep_errors.txt
file to save freebayes errors.
"""
# get the analysis settings
# check if both settings and the settings file are None:
if (settings is None) and (settings_file is None):
print("settings or settings file must be provided for freebayes_call.")
return
else:
if settings is None:
settings = get_analysis_settings(settings_file)
else:
settings = copy.deepcopy(settings)
# get the working directory from settings
wdir = settings["workingDir"]
# load mapped haplotypes file. This file has the genomic locations
# of the haplotypes in mip data
mapped_haplotypes_file = os.path.join(wdir, "mapped_haplotypes.csv")
# get the mip data file location. This file has per sample haplotype
# information including counts.
mipster_file = os.path.join(wdir, settings["mipsterFile"])
# get the mip data file location. This file has per sample haplotype
# information including counts.
mipster_file = os.path.join(wdir, settings["mipsterFile"])
# create fastq files from MIP data. One read per UMI will be created.
generate_mapped_fastqs(fastq_dir, mipster_file,
mapped_haplotypes_file, settings["species"],
pro=int(settings["processorNumber"]))
# if there is a targets file provided, we'll create a hypothetical
# sample that has all of the targeted variants. This way, a variant site
# for each target will be created in the final vcf file even if a
# variant was not observed in the data.
if targets_file is not None:
# load the targets as dataframe converting field names to
# field names in a haplotypes file.
targets = pd.read_table(targets_file).rename(
columns={"CHROM": "Chrom", "POS": "capture_start",
"ALT": "haplotype_sequence",
"mutation_name": "haplotype_ID"})
# fill in orientation and copy number information for all targets.
targets["orientation"] = "forward"
targets["mapped_copy_number"] = 1
targets["capture_end"] = (targets["capture_start"]
+ targets["REF"].apply(len) - 1)
# create a haplotype file for the targeted mutations
haplotype_fields = ['capture_end', 'capture_start', 'Chrom',
'orientation', 'haplotype_ID',
'haplotype_sequence', 'mapped_copy_number']
mutant_haplotypes = "/opt/analysis/mutant_haplotypes.csv"
targets[haplotype_fields].to_csv(mutant_haplotypes, index=False)
# create a hypothetical sample that has all mutations and a
# corresponding mip data file that shows a UMI count of 20
# for each observation
targets["sample_name"] = "control_mutant"
targets["sequence_quality"] = targets["haplotype_sequence"].apply(
lambda a: "".join(["H" for i in range(len(a))]))
targets["barcode_count"] = 20
data_fields = ["sample_name", 'haplotype_ID', "haplotype_sequence",
'sequence_quality', 'barcode_count']
mutant_data_file = "/opt/analysis/mutant_data.tsv"
targets[data_fields].to_csv(mutant_data_file, index=False, sep="\t")
# create a fastq file for the "control_mutant" sample
padding = 100
generate_mapped_fastqs(fastq_dir, mutant_data_file,
mutant_haplotypes, settings["species"],
pro=int(settings["processorNumber"]),
pad_size=padding)
# map per sample fastqs to the reference genome, creating bam files.
# bam files will have sample groups added, which is required for
# calling variants across the samples.
bwa_multi([], "bam", fastq_dir, bam_dir,
settings["bwaOptions"], settings["species"],
int(settings["processorNumber"]),
int(settings["processorNumber"]))
# create an intervals file to be used in gatk call
intervals_bed = "/opt/analysis/intervals.bed"
call_file = settings["callInfoDictionary"]
with open(call_file) as infile:
call_dict = json.load(infile)
# create a dataframe that has the genomic coordinates of each MIP
probe_info = []
for g in call_dict:
for m in call_dict[g]:
for c in call_dict[g][m]["copies"]:
cdict = call_dict[g][m]["copies"][c]
probe_info.append([cdict["chrom"], cdict["capture_start"],
cdict["capture_end"]])
probe_info = pd.DataFrame(probe_info, columns=["chrom", "capture_start",
"capture_end"])
probe_info["bed_start"] = probe_info["capture_start"] - 200
probe_info["bed_end"] = probe_info["capture_end"] + 200
probe_info[["chrom", "bed_start", "bed_end"]].to_csv(
intervals_bed, index=False, header=(None), sep="\t")
intervals_list = "/opt/analysis/intervals.list"
genome_dict = get_file_locations()[settings["species"]]["genome_dict"]
interval_call = gatk(["BedToIntervalList", "-I", intervals_bed,
"-O", intervals_list, "-SD", genome_dict])
# check the return code and if not 0 print warning
if interval_call.returncode != 0:
print(("An error ocurred when creating the intervals list. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(interval_call.stderr)
def gatk_haplotype_caller(
options, bam_dir, settings,
errors_file="/opt/analysis/gatk_haplotype_caller_output.txt"):
genome_fasta = get_file_locations()[settings["species"]]["fasta_genome"]
intervals_list = "/opt/analysis/intervals.list"
haplotype_caller_opts = ["HaplotypeCaller", "-R", genome_fasta,
"--native-pair-hmm-threads", "1",
"-L", intervals_list] + options
# scan the bam directory and get file paths. Assign an output name
# for each file (gvcf output)
bam_files = []
for f in os.scandir(bam_dir):
if os.path.splitext(f.name)[1] == ".bam":
base_name = os.path.splitext(f.name)[0]
gvcf = os.path.join(bam_dir, base_name + ".g.vcf.gz")
bam_files.append([f.path, gvcf])
pool = NoDaemonProcessPool(int(settings["processorNumber"]))
results = []
errors = []
for bam in bam_files:
io_options = ["-I", bam[0], "-O", bam[1]]
pool.apply_async(gatk, (haplotype_caller_opts + io_options, ),
callback=results.append, error_callback=errors.append)
pool.close()
pool.join()
if len(errors) > 0:
print(("An error ocurred during haplotype calling . "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
for e in errors:
outfile.write(str(e))
for r in results:
if r.returncode != 0:
print(("An error ocurred when creating the intervals list. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(r.stderr)
return
def genotype_gvcfs(settings, bam_dir, options, gdb, vcf_file,
sample_map=None, keep_control_mutant=False,
errors_file="/opt/analysis/gatk_genotype_gvcfs_output.txt"):
if sample_map is None:
# scan the bam directory and get file paths. Assign an output name
# for each file (gvcf output)
bam_files = []
for f in os.scandir(bam_dir):
if os.path.splitext(f.name)[1] == ".bam":
base_name = os.path.splitext(f.name)[0]
gvcf = os.path.join(bam_dir, base_name + ".g.vcf.gz")
bam_files.append([f.path, gvcf])
sample_map = os.path.join(settings["workingDir"], "sample_map.txt")
with open(sample_map, "w") as outfile:
for f in bam_files:
sample_name = ".".join(os.path.basename(f[0]).split(".")[:-2])
outfile.write(sample_name + "\t" + f[1] + "\n")
intervals_list = "/opt/analysis/intervals.list"
gdb_path = os.path.join("/opt/analysis/", gdb)
gdb_import = ["--java-options", "-Xmx32G", "GenomicsDBImport",
"--genomicsdb-workspace-path", gdb_path,
"--sample-name-map", sample_map,
"-L", intervals_list,
"--max-num-intervals-to-import-in-parallel",
settings["processorNumber"]]
gdb_result = gatk(gdb_import)
if gdb_result.returncode != 0:
print(("An error ocurred when during genomics DB import. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(gdb_result.stderr)
# genotype gvcfs
genome_fasta = get_file_locations()[settings["species"]][
"fasta_genome"]
gdb = "gendb://" + gdb
if keep_control_mutant:
temp_vcf_file = vcf_file
else:
temp_vcf_file = "/opt/analysis/temp.vcf.gz"
genotype_gvcfs = ["GenotypeGVCFs", "-R", genome_fasta,
"-V", gdb, "-O", temp_vcf_file, "-L", intervals_list]
genotypes = gatk(genotype_gvcfs + options)
if genotypes.returncode != 0:
print(("An error ocurred during genotyping GVCFs. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(genotypes.stderr)
# remove control mutant sample if requested
if not keep_control_mutant:
res = subprocess.run(["bcftools", "view", "-s^control_mutant",
"-Oz", "-o", vcf_file, temp_vcf_file,
"--force-samples"],
stderr=subprocess.PIPE)
if res.returncode != 0:
print(("An error ocurred while removing control mutant. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(res.stderr)
# index the final vcf file
res = subprocess.run(["bcftools", "index", "-f", vcf_file],
stderr=subprocess.PIPE)
if res.returncode != 0:
print(("An error ocurred while indexing the final vcf file. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(res.stderr)
def vcf_to_tables_fb(vcf_file, settings=None, settings_file=None,
annotate=True, geneid_to_genename=None,
target_aa_annotation=None, aggregate_aminoacids=False,
target_nt_annotation=None, aggregate_nucleotides=False,
decompose_options=[], annotated_vcf=False,
aggregate_none=False, min_site_qual=-1,
min_target_site_qual=-1, min_genotype_qual=-1,
min_alt_qual=-1, min_ref_qual=-1, min_mean_alt_qual=-1,
min_mean_ref_qual=-1, output_prefix=""):
"""Create various tables from a vcf file.
Create various tables from a vcf file generated by the freebayes
program. There are 3 different types of count output for each variant:
variant count, reference count and coverage. The vcf file will be split
into biallelic variants. Table versions of the input vcf will be created
but the info fields will be limited to the mandatory vcf fields and some
annotation data if avaliable.
In addition to the original vcf table, aa change tables can be generated.
These will be generated by filtering the vcf to missense variants only,
decomposing block substitutions (haplotypes) and combining the counts for
the same aminoacid changes. This operation is specifically intended for
generating data for targeted missense mutations and only reports that. All
other variants, even those complex variants including targeted variants
will not be reported. Finally, one specific mutation (dhps-437) will have
reference counts instead of variant counts if present. This is because this
drug resistance variant is encoded by the 3d7 reference sequence.
Parameters
----------
settings: dict, None
Analysis settings dictionary. Either this or settings_file must
be provided.
settings_file: str/path, None
Path to the analysis settings file. Either this or the settings dict
must be provided.
annotate: bool, True
Annotate variant file. This is required for protein level analysis.
vcf_file: str/path
Starting vcf file.
geneid2genename: str/path, None.
Path to a tab separated tex file that maps gene ids to gene names.
Column names must be gene_id and gene_name. Gene IDs
will populate the Gene field if this file is not provided.
target_aa_annotation: str/path, None.
Path to a tab separated text file with targeted variant information to
annotate and label targeted amino acid changes.
It must have gene_name, aminoacid_change, and mutation_name columns.
Amino acid changes should be represented as refAAPosAltAA. refAA and
AltAA must be three letter amino acid codes.
This file is required for targeted protein variant labeling.
target_nt_annotation: str/path, None.
Path to a tab separated text file with targeted variant information to
annotate and label targeted nucleotide changes.
It must have CHROM, POS, REF, ALT, NAME columns.
This file is required for targeted nucleotide variant labeling.
aggregate_aminoacids: bool, False
whether counts for same amino acids should be aggregated. This involves
decomposing multi amino acid changes for missense variants. If amino
acid based targets will be annotated, based on a provided annotation
dictionary, aggregation step must be completed. Targeted mutations
that are part of complex events (indels, stop loss/gain etc.) will not
be labeled as targeted.
aggregate_nucleotides: bool, False
whether the counts for nucleotide changes should be aggregated. This
involves decomposing all variants to the smallest units possible,
breaking all haplotype data. The level of decomposition should be
specified with the decompose_options parameter.
aggregate_none: bool, False.
Do no aggregation on counts, save the original (annotated if requested)
vcf file as 3 count tables. Three aggregation options are compatible
with each other and can be used all at once.
decompose_options: list, []
if aggregate nucleotides option is selected, these options will be
passed to vt program. "-a" for decomposing variants containing indels,
for example. "-p" for keeping phase information. Any option to vt
decompose_blocksub would be valid. By default indels will not be
decomposed.
annotated_vcf: bool, False
is the provided vcf file annotated using snpEff. These annotations
will be used if no count aggregation is to be done and annotate option
is False.
min_site_qual: float, -1
Filter variants with QUAL values less than this value if the site is
not a targeted site. If targeted, the site will be kept regardless of
the qual value for the site. freebayes manual indicates that
simulations showed a value between 1-30 would be good. So a minimum
value of 1 here would clean up most junk sites.
min_target_site_qual: float, -1
If a variant site is targeted but the site qual is lower than this,
reset the alternate observation counts to 0. It may be best to leave
this at the default value since there is usually additional evidence
that a targeted variant exists in a samples compared to a de novo
variant.
"""
# get the analysis settings
# check if both settings and the settings file are None:
if (settings is None) and (settings_file is None):
print("settings or settings file must be provided for freebayes_call.")
return
else:
if settings is None:
settings = get_analysis_settings(settings_file)
else:
settings = copy.deepcopy(settings)
# get the working directory from settings
wdir = settings["workingDir"]
# All postprocessing steps require biallelic variant representation.
# so we'll use bcftools to split multiallelics to their own lines.
genome_fasta = get_file_locations()[settings["species"]]["fasta_genome"]
vcf_path = os.path.join(wdir, vcf_file)
split_vcf_path = os.path.join(wdir, output_prefix + "split." + vcf_file)
subprocess.run(["bcftools", "norm", "-f", genome_fasta, "-m-both",
vcf_path, "-Oz", "-o", split_vcf_path], check=True,
stderr=subprocess.PIPE)
subprocess.run(["bcftools", "index", "-f", split_vcf_path], check=True,
stderr=subprocess.PIPE)
# Will protein level aggregation be performed on the variants?
# This will only be done for simple missense variants but it is important
# to annotate the vcf file before breaking down the haplotypes.
if annotate:
annotated_vcf_path = os.path.join(wdir, output_prefix + "split.ann."
+ vcf_file)
res = annotate_vcf_file(settings, split_vcf_path, annotated_vcf_path)
if res != 0:
print("Annotating the vcf file failed.")
return
else:
annotated_vcf_path = split_vcf_path
if aggregate_aminoacids:
if not (annotate or annotated_vcf):
print("annotate option must be set to true or an annotadet vcf "
"file must be provided and annotated_vcf option must be "
"set to true for amino acid level aggregation. \n"
"Exiting!")
return
# check if a target annotation dict is provided.
target_annotation_dict = {}
if target_aa_annotation is not None:
taa = pd.read_table(target_aa_annotation).set_index(
["gene_name", "aminoacid_change"]).to_dict(orient="index")
for k in taa.keys():
target_annotation_dict[k] = taa[k]["mutation_name"]
# check if a gene id to gene name file is provided
gene_ids = {}
if geneid_to_genename is not None:
gids = pd.read_table(geneid_to_genename).set_index("gene_id")
gids = gids.to_dict(orient="index")
for g in gids:
gene_ids[g] = gids[g]["gene_name"]
# load annotated vcf file
variants = allel.read_vcf(annotated_vcf_path, fields=["*"],
alt_number=1,
transformers=allel.ANNTransformer())
# allel import provides a variants dictionary with keys such as
# variants/AD, variants/POS for variant level information
# the values are arrays with each element corresponding to one variant.
# similarly, calldata/GT type keys hold the genotype level data.
#############################################################
# Freebayes vcfs have AO and RO counts for alt and ref allele depths
# but GATK has a combined AD depth. Create AO and RO from AD if
# needed
try:
variants["calldata/AO"]
except KeyError:
variants["calldata/RO"] = variants["calldata/AD"][:, :, 0]
variants["calldata/AO"] = variants["calldata/AD"][:, :, 1]
# find missense variant locations in the data. We are going to split
# multi amino acid changes for missense variants only for target
# annotation and count aggregation.
missense = ["missense_variant" == variant for variant
in variants["variants/ANN_Annotation"]]
# spcecify fields of interest from the INFO fields
variant_fields = ["ANN_Gene_ID", "ANN_HGVS_p", "ANN_Annotation",
"QUAL"]
variant_fields = ["variants/" + v for v in variant_fields]
# specify fields of interest from individual level data
# that is basically the count data for tables. AO: alt allele count,
# RO ref count, DP: coverage.
call_data_fields = ['calldata/AO', 'calldata/RO', 'calldata/DP',
'calldata/GT', 'calldata/GQ', 'calldata/QA',
'calldata/QR']
variants["calldata/GT"] = variants["calldata/GT"].sum(axis=2)
# zip variant level information together, so we have a single value
# for each variant
variant_data = list(zip(*[variants[v] for v in variant_fields]))
# so now we have a list of length equal to variant number.
# each item is a tuple such as ('PF3D7_0104300', 'Gln107Leu') or
# ('PF3D7_0104300', 'AspGluAsp144HisGlnTyr'). We'll split these
# compound SNVs later.
# get count data for missense variants
call_data = list(zip(*[variants[c] for c in call_data_fields]))
# first item of the above list is alt counts, then ref counts and
# coverage.
#############################
# split the compound mutations
split_variants = []
split_calls = []
for i in range(len(missense)):
mv = variant_data[i][:3]
# get the aa change such as AspGluAsp144HisGlnTyr
aa_change = mv[1]
# if no aa change, skip
if aa_change == "":
continue
try:
# if a mapping dict is present, add the gene name
# this would get Pfubp1 from PF3D7_0104300, for example
gene_name = gene_ids[mv[0]]
except KeyError:
gene_name = mv[0]
# get site quality, remove those not satisfying min_site_qual
# unless they are targeted mutations
site_qual = float(variant_data[i][3])
if missense[i]:
# get the position of the change (144 above)
aa_pos = int("".join([c for c in aa_change if c.isdigit()]))
# split the aa change to reference aminoacid sequence and
# alt amino acid sequence.
aa_split = aa_change.split(str(aa_pos))
reference = aa_split[0]
alternate = aa_split[1]
# aa changes are in 3 letter format. Loop through each aa and
# split to single aa changes.
for j in range(0, len(reference), 3):
new_pos = int(aa_pos + j/3)
# convert single amino acid names to 1 letter code.
new_reference = reference[j:j+3]
new_alternate = alternate[j:j+3]
new_change = new_reference + str(new_pos) + new_alternate
try:
# if this variant is in the targets, annotate it so.
mut_name = target_annotation_dict[
(gene_name, new_change)]
targeted_mutation = "Yes"
# reset alt observation counts to 0 if quality is low
if site_qual < min_target_site_qual:
call_data[i][0][:] = 0
except KeyError:
# remove low quality non-target alleles as well as
# synonymous changes
if ((site_qual < min_site_qual)
or (new_reference == new_alternate)):
continue
mut_name = gene_name + "-" + new_change
targeted_mutation = "No"
# add the split variant information split variants list
split_variants.append(mv + (new_change, gene_name,
mut_name, targeted_mutation))
# add the individual level data to split calls list.
split_calls.append(call_data[i])
else:
try:
# if this variant is in the targets, annotate it as such.
mut_name = target_annotation_dict[
(gene_name, aa_change)]
targeted_mutation = "Yes"
if site_qual < min_target_site_qual:
call_data[i][0][:] = 0
except KeyError:
# remove low qual or synonymous changes
if ((site_qual < min_site_qual)
or (mv[2] == "synonymous_variant")):
continue
mut_name = gene_name + "-" + aa_change
targeted_mutation = "No"
# add compound variant data to split variant data
split_variants.append(mv + (aa_change, gene_name,
mut_name, targeted_mutation))
# add the individual level data to split calls list.
split_calls.append(call_data[i])
# get individual level data
genotype_quals = call_data[i][4]
ao_count = call_data[i][0]
alt_quals = call_data[i][5]
average_alt_quals = alt_quals / ao_count
ro_count = call_data[i][1]
ref_quals = call_data[i][6]
average_ref_quals = ref_quals / ro_count
gq_mask = genotype_quals < min_genotype_qual
qa_mask = alt_quals < min_alt_qual
qr_mask = ref_quals < min_ref_qual
av_qa_mask = average_alt_quals < min_mean_alt_qual
av_qr_mask = average_ref_quals < min_mean_ref_qual
# replace count data for individuals failing quality thresholds
# alt allele count AO
call_data[i][0][qa_mask] = 0
call_data[i][0][av_qa_mask] = 0
# ref allele count RO
call_data[i][1][qr_mask] = 0
call_data[i][1][av_qr_mask] = 0
# reset coverage for gq failure
call_data[i][2][gq_mask] = 0
# reset genotypes for gq failure
call_data[i][3][gq_mask] = -2
# create a multiindex for the variant df that we'll create next
index = pd.MultiIndex.from_tuples(
split_variants, names=["Gene ID", "Compound Change", "ExonicFunc",
"AA Change", "Gene", "Mutation Name",
"Targeted"])
# get alt counts
variant_counts = pd.DataFrame(np.array(split_calls)[:, 0],
columns=variants["samples"],
index=index).replace(-1, 0)
# get reference counts
reference_counts = pd.DataFrame(np.array(split_calls)[:, 1],
columns=variants["samples"],
index=index).replace(-1, 0)
# get coverage depth
coverage = pd.DataFrame(np.array(split_calls)[:, 2],
columns=variants["samples"],
index=index).replace(-1, 0)
# combine counts for same changes
grouping_keys = ["Gene ID", "Gene", "Mutation Name", "ExonicFunc",
"AA Change", "Targeted"]
# replace -1 (allel assigned NA values) values with 0
# sum alt counts
mutation_counts = variant_counts.groupby(grouping_keys).sum()
# take the max of ref counts
mutation_refs = reference_counts.groupby(grouping_keys).min()
# take the max of coverage counts
mutation_coverage = coverage.groupby(grouping_keys).max()
# due to aggregating aa changes, ref counts can be overcounted even
# if the minimum ref count is taken for the aggregate. The reason for
# this is that each nucleotide variant's reference observation count
# may include the alternate alleles for another nucleotide variant
# that codes for the same aa change. So we'll set the ref counts
# to coverage - alt count where ref count exceeds this value
diff_count = mutation_coverage - mutation_counts
ref_difference = (mutation_refs > diff_count).sum()
# get the variant indices where ref count exceeds coverage - alt count
exceed_index = ref_difference.loc[ref_difference > 0].index
mutation_refs.loc[:, exceed_index] = diff_count.loc[:, exceed_index]
# get genotypes as called by the variant caller
gt_calls = pd.DataFrame((np.array(split_calls)[:, 3]),
columns=variants["samples"],
index=index)
def combine_gt(g):
if 1 in g.values:
return 1
elif 0 in g.values:
if 2 in g.values:
return 1
else:
return 0
elif 2 in g.values:
return 2
else:
return -1
gt_calls = gt_calls.groupby(grouping_keys).agg(combine_gt)
# for one pf mutation alt count will be replaced with ref count
# because reference allele is drug resistant
dhps_key = ("<KEY>", "dhps", "dhps-Gly437Ala",
"missense_variant", "Gly437Ala", "Yes")
dhps_new_key = ("<KEY>", "dhps", "dhps-Ala437Gly",
"missense_variant", "Ala437Gly", "Yes")
try:
mutation_counts.loc[dhps_new_key, :] = mutation_refs.loc[
dhps_key, :]
mutation_refs.loc[dhps_new_key, :] = mutation_counts.loc[
dhps_key, :]
mutation_coverage.loc[dhps_new_key, :] = mutation_coverage.loc[
dhps_key, :]
gt_calls.loc[dhps_new_key, :] = gt_calls.loc[
dhps_key, :].replace({2: 0, 0: 2})
gt_calls.drop(dhps_key, inplace=True)
mutation_counts.drop(dhps_key, inplace=True)
mutation_refs.drop(dhps_key, inplace=True)
mutation_coverage.drop(dhps_key, inplace=True)
mutation_counts = mutation_counts.sort_index()
mutation_refs = mutation_refs.sort_index()
mutation_coverage = mutation_coverage.sort_index()
gt_calls = gt_calls.sort_index()
except KeyError:
pass
# save count tables
mutation_counts.T.to_csv(os.path.join(wdir, output_prefix
+ "alternate_AA_table.csv"))
mutation_refs.T.to_csv(os.path.join(wdir, output_prefix
+ "reference_AA_table.csv"))
mutation_coverage.T.to_csv(os.path.join(wdir, output_prefix
+ "coverage_AA_table.csv"))
gt_calls.T.to_csv(os.path.join(wdir, output_prefix
+ "genotypes_AA_table.csv"))
if aggregate_nucleotides:
# aggregating counts of nucleotides requires decomposing block
# substitutions, at a minimum. If desired, complex variants involving
# indels can be decomposed as well.
decomposed_vcf = os.path.join(wdir, output_prefix
+ "decomposed." + vcf_file)
# prepare vt decompose command
comm = ["vt", "decompose_blocksub"] + decompose_options
comm.append(split_vcf_path)
comm.extend(["-o", decomposed_vcf])
# run decompose
subprocess.run(comm, check=True)
subprocess.run(["bcftools", "index", "-f", decomposed_vcf], check=True)
# load decomposed vcf file
variants = allel.read_vcf(decomposed_vcf, fields=["*"], alt_number=1)
# Freebayes vcfs have AO and RO counts for alt and ref allele depths
# but GATK has a combined AD depth. Create AO and RO from AD if
# needed
try:
variants["calldata/AO"]
except KeyError:
variants["calldata/RO"] = variants["calldata/AD"][:, :, 0]
variants["calldata/AO"] = variants["calldata/AD"][:, :, 1]
# spcecify fields of interest from the INFO fields
variant_fields = ["CHROM", "POS", "REF", "ALT", "QUAL"]
variant_fields = ["variants/" + v for v in variant_fields]
# specify fields of interest from individual level data
# that is basically the count data for tables. AO: alt allele count,
# RO ref count, DP: coverage.
call_data_fields = ['calldata/AO', 'calldata/RO', 'calldata/DP',
'calldata/GT', 'calldata/GQ', 'calldata/QA',
'calldata/QR']
variants["calldata/GT"] = variants["calldata/GT"].sum(axis=2)
# zip variant level information together, so we have a single value
# for each variant
variant_data = list(zip(*[variants[v] for v in variant_fields]))
# get count data for the variants
call_data = list(zip(*[variants[c] for c in call_data_fields]))
# check if a target annotation dict is provided.
target_annotation_dict = {}
if target_nt_annotation is not None:
taa =
|
pd.read_table(target_nt_annotation)
|
pandas.read_table
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390:
|
pd.Timestamp("2013-05-27 00:00:00")
|
pandas.Timestamp
|
"""Module for de-duplicating arrays of strings."""
import re
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
STOP_TOKENS = r'[\W_]+|(corporation$)|(corp.$)|(corp$)|(incorporated$)|(inc.$)|(inc$)|(company$)|(common$)|(com$)'
Data = Union[List, pd.Series, np.ndarray]
class StringCluster(BaseEstimator, TransformerMixin):
"""
Transformer for de-duplicating an array-like container of strings.
Attributes
----------
ngram_size: int
Size of ngrams to use in TfidfVectorizer.
threshold: float
Threshold to determine similarities; only samples above this number are flagged as similar.
stop_tokens: re.Pattern
RegEx pattern of stop tokens for use in TfidfVectorizer.
vec: TfidfVectorizer
Scikit-Learn TfidfVectorizer.
similarity_: np.ndarray
Array of
labels_: np.ndarray
Methods
-------
fit(X: Data, y: Optional[Data] = None)
Fit the transformer to data.
transform(X: Data, y: Optional[Data] = None)
Transform the data.
fit_transform(X: Data, y: Optional[Data] = None, **fit_params)
Fit and transform the data.
"""
def __init__(self, ngram_size: int = 2, threshold: float = 0.8, stop_tokens: str = r'[\W_]+'):
"""
Instantiate a StringCluster object.
Parameters
----------
ngram_size: int
Size of ngrams to use in TfidfVectorizer; default 2.
threshold: float
Threshold to determine similarities; default 0.8; must be between [0, 1].
stop_tokens: re.Pattern
RegEx pattern of stop tokens for use in TfidfVectorizer; default r'[\W_]+'.
"""
self.ngram_size = ngram_size
self.threshold = threshold
self.stop_tokens = re.compile(stop_tokens)
self.vec = TfidfVectorizer(analyzer='char_wb', ngram_range=(ngram_size, ngram_size))
def fit(self, X: Data, y: Optional[Data] = None) -> "StringCluster":
"""
Fit the transformer to data.
Parameters
----------
X: Data
Array like object containing duplicated strings.
y: Optional[Data]
Optional array like object containing 'master list' of values to map similar samples to.
Returns
-------
StringCluster
Self.
"""
self.similarity_ = self._get_cosine_similarity(X, y)
self.labels_ = self._get_labels()
return self
def transform(self, X: Data, y: Optional[Data] = None) -> pd.Series:
"""
Transform data.
Parameters
----------
X: Data
Array like object containing duplicated strings.
y: Optional[Data]
Optional array like object containing 'master list' of values to map similar samples to.
Returns
-------
pd.Series
Pandas Series of de-duplicated values.
"""
if not hasattr(self, 'labels_'):
raise AttributeError(".fit() method must be called before .transform() method.")
if y:
return pd.Series(y)[self.labels_].reset_index(drop=True)
return
|
pd.Series(X)
|
pandas.Series
|
import time
import string
import pickle
import itertools
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
# from nltk.corpus import stopwords
from sklearn.linear_model import SGDClassifier, SGDRegressor, LogisticRegression, LinearRegression
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from scipy.stats import spearmanr
#from tqdm.notebook import tqdm
#from nltk.tokenize import RegexpTokenizer
#from gensim.models import KeyedVectors
#with open('datasets/ME2020/out_features/train_features_avMEVQAframe.pkl', 'rb') as f:
with open('memad_content_segmentation/first_results.pkl', 'rb') as f:
data = pickle.load(f)
print(data)
#print('yoooooooooooooooooooooooooooooo')
print(data['pooled_output_mul'][0])
print(len(data['pooled_output_v'][0]))
print('coucou')
#print('yaaaaaaaaaaaaaaaaaaa')
#with open('datasets/ME2020/out_features/dev_features6framesMEVQA.pkl', 'rb') as f:
#data_bis = pickle.load(f)
#print('yoooooooooooooooooooooooooooooo')
#print(len(data_bis['pooled_output_mul'][0]))
#print('yaaaaaaaaaaaaaaaaaaa')
#data=data_bis
#data.update(data_bis)
#print(len(data))
df_data = pd.read_csv('scores_v2.csv', )
#df_data_dev=pd.read_csv('dev_scores.csv')
#pd.concat([df_data,df_data_dev])
#print(df_data)
X_vilbert = []
#Y = []
#for i, entry in df_data.iterrows():
#y = (entry['part_1_scores'], entry['part_2_scores'])
#Y.append(y)
def enumerate_models(models):
instances = []
for model_name, (model, hyperparameters) in models.items():
configs = {}
if len(hyperparameters) > 0:
params, vals = list(hyperparameters.keys()), list(hyperparameters.values())
configs = [dict(zip(params, vv)) for vv in list(itertools.product(*vals))]
for config in configs:
m = model(**config)
instances.append(m)
else:
instances.append(model())
return instances
regression_models = {
# 'LogisticRegression': (LogisticRegression, {"C": [1e3, 1, 1e-3], "penalty": ['l1', 'l2', 'elasticnet']}),
#'LinearRegression': (LinearRegression, {}),
#'MLPRegressor': (MLPRegressor, {'alpha': [1e-3, 1e-5, 1e-7], 'hidden_layer_sizes': [(10,), (50,), (100,)]}),
#'SGDRegressor': (SGDRegressor, {'alpha': [0.00001, 0.0001, 0.1,]}),
#'SVR': (SVR, {'kernel': ['linear', 'poly', 'rbf'], "C": [1e3, 1., 1e-3]})
'SVR': (SVR, {'kernel': ['rbf'], "C": [ 1e-3]})
}
#X = {'pooled_output_mul': data['pooled_output_mul']}
X=data
folds = {}
Y_st = df_data['part_1_scores']
#print(Y_st)
Y_lt = df_data['part_2_scores']
Y_id=df_data['video_id']
#print(Y_lt)
#for k in range(len(X)):
#print(X['targets'][k])
for k in X:
folds[k] = {}
for regressor in enumerate_models(regression_models):
model_name = str(regressor)
folds[k][model_name] = []
kf = KFold(n_splits=6, random_state=42)
print('Training', model_name, '..')
for i, (train_index, test_index) in enumerate(kf.split(X[k])):
print('Fold #'+ str(i), end='.. ')
t = time.time()
X_train, X_test = X[k][train_index], X[k][test_index]
y_train, y_test = Y_st[train_index], Y_st[test_index]
id_train, id_test = Y_id[train_index], Y_id[test_index]
regressor.fit(X_train.cpu(), y_train)
y_pred = regressor.predict(X_test.cpu())
folds[k][model_name].append((y_pred, y_test,id_test))
print(f'done! ({time.time() - t:2} secs)')
t = time.time()
folds_lt = {}
for k in X:
folds_lt[k] = {}
for regressor in enumerate_models(regression_models):
model_name = str(regressor)
folds_lt[k][model_name] = []
kf = KFold(n_splits=6, random_state=42)
print('Training', model_name, '..')
for i, (train_index, test_index) in enumerate(kf.split(X[k])):
print('Fold #'+ str(i), end='.. ')
t = time.time()
X_train, X_test = X[k][train_index], X[k][test_index]
y_train, y_test = Y_st[train_index], Y_lt[test_index]
id_train, id_test = Y_id[train_index], Y_id[test_index]
regressor.fit(X_train.cpu(), y_train)
y_pred = regressor.predict(X_test.cpu())
folds_lt[k][model_name].append((y_pred, y_test,id_test))
print(f'done! ({time.time() - t:2} secs)')
t = time.time()
spearman = lambda x,y: spearmanr(x, y).correlation
results_st=
|
pd.DataFrame(columns=['prediction','gt','id'])
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, _np_version_under1p9)
from pandas.tseries.index import Timestamp
from pandas.types.common import is_integer
import pandas.util.testing as tm
from .common import TestData
class TestSeriesQuantile(TestData, tm.TestCase):
def test_quantile(self):
from numpy import percentile
q = self.ts.quantile(0.1)
self.assertEqual(q, percentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
self.assertEqual(q, pd.to_timedelta('24:00:00'))
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
self.assertTrue(result is pd.NaT)
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
from numpy import percentile
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([percentile(self.ts.valid(), 10),
percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
tm.assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
tm.assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index(
[], dtype=float))
tm.assert_series_equal(result, expected)
def test_quantile_interpolation(self):
# GH #10174
if _np_version_under1p9:
raise nose.SkipTest("Numpy version is under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
self.assertEqual(q, percentile(self.ts.valid(), 10))
q1 = self.ts.quantile(0.1)
self.assertEqual(q1, percentile(self.ts.valid(), 10))
# test with and without interpolation keyword
self.assertEqual(q, q1)
def test_quantile_interpolation_dtype(self):
# GH #10174
if _np_version_under1p9:
raise nose.SkipTest("Numpy version is under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
self.assertEqual(q, percentile(np.array([1, 3, 4]), 50))
self.assertTrue(is_integer(q))
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
self.assertEqual(q, percentile(np.array([1, 3, 4]), 50))
self.assertTrue(is_integer(q))
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
if not _np_version_under1p9:
raise nose.SkipTest("Numpy version is greater than 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
self.assertEqual(q, percentile(self.ts.valid(), 10))
q1 = self.ts.quantile(0.1)
self.assertEqual(q1, percentile(self.ts.valid(), 10))
# interpolation other than linear
expErrMsg = "Interpolation methods other than "
with tm.assertRaisesRegexp(ValueError, expErrMsg):
self.ts.quantile(0.9, interpolation='nearest')
# object dtype
with tm.assertRaisesRegexp(ValueError, expErrMsg):
q = Series(self.ts, dtype=object).quantile(0.7,
interpolation='higher')
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
self.assertEqual(result, expected)
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
self.assertTrue(np.isnan(res))
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan],
index=[0.2, 0.3]))
def test_quantile_box(self):
cases = [[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
[pd.Timedelta('1 days'),
|
pd.Timedelta('2 days')
|
pandas.Timedelta
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 10 00:10:23 2022
@author: <NAME>
Adapted from <NAME>
"""
r"""
Forward Model
"""
# Standard Library imports
import gzip
import numpy as np
import pandas as pd
import xarray as xr
# Third party imports
from collections import OrderedDict
# Semi-local imports
import name_qch4_couple.io
# Local imports
import routines
# Function used to create the emissions map
# For Section 2.9.: factor_q = a (Table B.1); factor_s = b (Table B.1); factor_sce = 0
# For Section 2.11.: factor_q = 1; factos_s = 1; factor_sce - Table 2.4
def read_Qsink(dates_tHour, factor_q, factor_s, factor_sce):
grid_info = routines.define_grid()
nlat = grid_info['nlat']
nlon = grid_info['nlon']
Qfiles_H2 = OrderedDict([
(0, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_AWB.nc', 'CO_emissions', '1M'],
]),
(1, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_ENE.nc', 'CO_emissions', '1M'],
]),
(2, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_REF.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_IND.nc', 'CO_emissions', '1M'],
]),
(3, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_CDS.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_CRS.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_TRO.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_LTO.nc', 'CO_emissions', '1M'],
]),
(4, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_RCO.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_PRO.nc', 'CO_emissions', '1M'],
]),
(5, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_FFF.nc', 'CO_emissions', '1M'],
]),
(6, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_SWD.nc', 'CO_emissions', '1M'],
]),
(7, [
['inputs/emissions/biomass/gfed_2012.nc', 'H2_emissions', '1M'],
]),
(8, [
['inputs/emissions/prior_edgar_v6_0_PRO_GAS.nc', 'CH4_emissions', '1M'],
]),
])
Q_factor = {
0: [0.0357],
1: [0.0143],
2: [0.0143],
3: [0.0357],
4: [0.0217],
5: [0.0143],
6: [0.005],
7: [1],
8: [factor_sce],
}
Q = np.zeros((nlat, nlon))
for s, vs in Qfiles_H2.items():
for v in vs:
with xr.open_dataset(v[0]) as ds_read:
with ds_read.load() as Q_in:
t_Q = Q_in['time']
if v[2] == '1Y':
t = np.datetime64(
dates_tHour[0].floor('d').replace(month=1, day=1)
)
t_in = min(t_Q, key=lambda x: abs(x - t))
else:
t = np.datetime64(
dates_tHour[0].floor('d').replace(day=1)
)
t_in = min(
t_Q[t_Q.dt.month==dates_tHour[0].month],
key=lambda x: abs(x - t)
)
Q += Q_in[v[1]].sel(time=t_in).values * Q_factor[s] * 1.e3 * factor_q # kg -> g
lwfile = 'inputs/sink/land_mask.nc'
with xr.open_dataset(lwfile) as ds_read:
lwin = ds_read.load()
soil_sink = np.array(lwin.lo_land)/100 * -0.000000005 * factor_s
Q += soil_sink
return Q
def r_decc(fpath):
odata = pd.read_csv(
fpath,
usecols=lambda x: x.lower() in ['time', 'h2_ppb'],
index_col=['time'],
skipinitialspace=True,
parse_dates=['time']
).dropna()
odata.columns = odata.columns.str.lower()
return odata
def read_obs(timestamps, site, factor, resample='1H'):
date = timestamps[0].strftime('%Y-%m')
t0 = timestamps[0].strftime('%Y-%m-%d %H')
t1 = timestamps[-1].strftime('%Y-%m-%d %H')
if site == 'WAO':
ifile = 'inputs/obs/WAO_H2_oct2021.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
elif site == 'MHD_10magl':
ifile = 'inputs/baseline/MHD_2018.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
elif site == 'bas':
ifile = 'outputs/models/mhd_bas/chi0_proc.csv'
col_or_no = 'chi0p_H2'
sigma_col_or_no = 0.2
elif site == 'mod':
ifile = f'outputs/scenarios/new_merged/merged_wao_scenario_{factor}.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
elif site == 'bas_mhd':
ifile = 'outputs/scenarios/merged/merged_bas_mhd.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
elif site == 'bas_wao':
ifile = 'outputs/scenarios/merged/merged_bas_wao.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
else:
ifile = False
col_or_no = np.nan
sigma_col_or_no = np.nan
if ifile:
all_obs_raw = r_decc(ifile).sort_index().loc[t0:t1]
obs_raw = all_obs_raw[col_or_no]
sigma_obs_raw = (all_obs_raw[sigma_col_or_no]
if isinstance(sigma_col_or_no, str) else
pd.Series(sigma_col_or_no, index=all_obs_raw.index))
if isinstance(col_or_no, str):
obs = (obs_raw
if resample is False else
obs_raw.resample('1H').mean().reindex(timestamps))
else:
obs = pd.Series(col_or_no, index=timestamps)
if isinstance(sigma_col_or_no, str) or isinstance(col_or_no, str):
sigma_obs = (
sigma_obs_raw
if resample is False else
sigma_obs_raw.resample('1H').apply(
lambda x: np.sum(x**2)).reindex(timestamps))
else:
sigma_obs = pd.Series(sigma_col_or_no, index=timestamps)
return obs, sigma_obs
def read_baseline(timestamps, site, btype="default"):
date = timestamps[0].strftime('%Y-%m')
year = timestamps[0].strftime('%Y')
if site == 'MHD_10magl':
if btype == 'default':
chi0file = (
'outputs/baseline/baseline-MHD_10magl-h2-2018.nc'
)
with xr.open_dataset(chi0file) as ds_read: #put as
with ds_read.load() as ds:
chi0 = ds.chi_H2.sel(time=date).to_series()
var_chi0 = ds.var_chi_H2.sel(time=date).to_series()
elif btype == 'intem':
if timestamps[0] < pd.to_datetime('2020-07'):
bmonth = '2020-07'
bflag = 1
elif timestamps[0] > pd.to_datetime('2020-12'):
bmonth = '2020-12'
bflag = 2
else:
bmonth = date
bflag = 0
m_sta = (pd.to_datetime(bmonth)).date().strftime('%Y%m%d')
m_end = (
pd.to_datetime(bmonth)+pd.tseries.offsets.MonthEnd(0)
).date().strftime('%Y%m%d')
chi0file = (
'/home/ec5/hpc-work/data_archive/decc/'
'EUROPE_UKV_HFD_100magl/Pos_CH4/'
'H1_C_MHT1T2R1ANH1B2CBWB_ch4_OBUSEXL_4h_Fnc10_'
f'{m_sta}-{m_end}_average_f.gz'
)
with gzip.open(chi0file, mode='r') as chi0in:
chi0_0all = pd.read_csv(
chi0in, sep=' ', skipinitialspace=True,
skiprows=[5], header=4,
parse_dates={'datetime': ['YYYY', 'MM', 'DD', 'HH', 'MI']},
#converters={
# 'datetime': lambda Y, m, d, H, M:
# pd.to_datetime(f'{Y} {m} {d} {H} {M}',
# format='%Y %m %d %H %M'),
# },
#index_col='datetime'
)
chi0_0all.index = pd.to_datetime(
chi0_0all['datetime'], format='%Y %m %d %H %M')
chi0_0 = chi0_0all['BasePos']
var_chi0_0 = chi0_0all['BasePos']
if bflag == 1:
chi0 = pd.Series(chi0_0.iloc[-1], index=timestamps)
var_chi0 =
|
pd.Series(var_chi0_0.iloc[-1], index=timestamps)
|
pandas.Series
|
# License: Apache-2.0
from gators.feature_generation.one_hot import OneHot
from pandas.testing import assert_frame_equal
import numpy as np
import pytest
import pandas as pd
import databricks.koalas as ks
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data():
X = pd.DataFrame(np.array(list('qweqwrasd')).reshape(
3, 3), columns=list('ABC'))
X_expected = pd.DataFrame({
'A': {0: 'q', 1: 'q', 2: 'a'},
'B': {0: 'w', 1: 'w', 2: 's'},
'C': {0: 'e', 1: 'r', 2: 'd'},
'A__onehot__q': {0: True, 1: True, 2: False},
'A__onehot__a': {0: False, 1: False, 2: True},
'B__onehot__w': {0: True, 1: True, 2: False},
'B__onehot__s': {0: False, 1: False, 2: True},
'C__onehot__e': {0: True, 1: False, 2: False},
'C__onehot__d': {0: False, 1: False, 2: True}}
)
categories_dict = {'A': ['q', 'a'], 'B': ['w', 's'], 'C': ['e', 'd']}
obj = OneHot(categories_dict=categories_dict).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_names():
X = pd.DataFrame(np.array(list('qweqwrasd')).reshape(
3, 3), columns=list('ABC'))
X_expected = pd.DataFrame(
{'A': {0: 'q', 1: 'q', 2: 'a'},
'B': {0: 'w', 1: 'w', 2: 's'},
'C': {0: 'e', 1: 'r', 2: 'd'},
'Aq': {0: True, 1: True, 2: False},
'Aa': {0: False, 1: False, 2: True},
'Bw': {0: True, 1: True, 2: False},
'Bs': {0: False, 1: False, 2: True},
'Ce': {0: True, 1: False, 2: False},
'Cd': {0: False, 1: False, 2: True}})
column_names = ['Aq', 'Aa', 'Bw', 'Bs', 'Ce', 'Cd']
categories_dict = {'A': ['q', 'a'], 'B': ['w', 's'], 'C': ['e', 'd']}
obj = OneHot(
categories_dict=categories_dict, column_names=column_names).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_ks():
X = ks.DataFrame(np.array(list('qweqwrasd')).reshape(
3, 3), columns=list('ABC'))
X_expected = pd.DataFrame({
'A': {0: 'q', 1: 'q', 2: 'a'},
'B': {0: 'w', 1: 'w', 2: 's'},
'C': {0: 'e', 1: 'r', 2: 'd'},
'A__onehot__q': {0: True, 1: True, 2: False},
'A__onehot__a': {0: False, 1: False, 2: True},
'B__onehot__w': {0: True, 1: True, 2: False},
'B__onehot__s': {0: False, 1: False, 2: True},
'C__onehot__e': {0: True, 1: False, 2: False},
'C__onehot__d': {0: False, 1: False, 2: True}}
)
categories_dict = {'A': ['q', 'a'], 'B': ['w', 's'], 'C': ['e', 'd']}
obj = OneHot(categories_dict=categories_dict).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_names_ks():
X = ks.DataFrame(np.array(list('qweqwrasd')).reshape(
3, 3), columns=list('ABC'))
X_expected = pd.DataFrame(
{'A': {0: 'q', 1: 'q', 2: 'a'},
'B': {0: 'w', 1: 'w', 2: 's'},
'C': {0: 'e', 1: 'r', 2: 'd'},
'Aq': {0: True, 1: True, 2: False},
'Aa': {0: False, 1: False, 2: True},
'Bw': {0: True, 1: True, 2: False},
'Bs': {0: False, 1: False, 2: True},
'Ce': {0: True, 1: False, 2: False},
'Cd': {0: False, 1: False, 2: True}})
column_names = ['Aq', 'Aa', 'Bw', 'Bs', 'Ce', 'Cd']
categories_dict = {'A': ['q', 'a'], 'B': ['w', 's'], 'C': ['e', 'd']}
obj = OneHot(
categories_dict=categories_dict, column_names=column_names).fit(X)
return obj, X, X_expected
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new.iloc[:, -3:], X_expected.iloc[:, -3:])
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas().iloc[:, -3:], X_expected.iloc[:, -3:])
def test_pd_np(data):
obj, X, X_expected = data
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
|
assert_frame_equal(X_new, X_expected)
|
pandas.testing.assert_frame_equal
|
import sys
import pandas as pd
import yfinance
from datetime import datetime
import os
from os import path
import numpy as np
import requests
import time
#ScriptList contains the list of script requiring analysis
print('Enter your filename:')
stocklistFileHandler = open(input())
stocklist = set()
#Adding script to a set for iteration
for line in stocklistFileHandler:
if len(line.strip()) > 0:
stocklist.add(line.strip())
stocklistFileHandler.close()
fileExt = datetime.today().strftime('%Y%m%d%H%M%S')
folderExt = datetime.today().strftime('%Y%m%d%H%M%S')
#Creating output folder if not exist the format is YYYYMMDDHHMMSS
try:
os.mkdir(folderExt)
except OSError:
print ("Creation of the directory %s failed" % folderExt)
else:
print ("Successfully created the directory %s " % folderExt)
#Dataframe where the summary for the stock is stored
analysisResult =
|
pd.DataFrame(columns = ['Stock', 'Close', '12EMA', '26EMA', 'FastMACD', 'SignalMACD', 'MACDHist', 'ADX', 'RSI', 'CloseSlope', '12EMASlope', '26EMASlope', 'FastMACDSlope', 'SignalMACDSlope', 'MACDHistSlope', 'ADXSlope', 'BuyIndicator'])
|
pandas.DataFrame
|
import QUANTAXIS as QA
import pandas as pd
import json
import datetime
from QUANTAXIS.QAUtil import QASETTING
from QUANTAXIS.TSData.TSRawdata import TSRawdata
def TS_fetch_stock_day_adv(code, start, end):
#get all history data from tdx
# date = datetime.date.today()
# data=QA.QAFetch.QATdx.QA_fetch_get_stock_day('00001','2017-01-01','2019-01-31')
#get data from local database
data = QA.QA_fetch_stock_day_adv(code=code, start=start, end=end)
result = data.data
result = result.sort_index(ascending=True)
result = result.reset_index(level=1)
result = result.drop(columns='code')
result['date'] = result.index
result = result.rename(columns={'close': 'y'})
# print(result)
rawdata = TSRawdata(result)
# print(rawdata.data)
return rawdata
#upload to mongodb
# outcome = rawdata.data
#
# client = QASETTING.client
# database = client['mydatabase']
# datacol = database[code+str(datetime.date.today())]
# outcome = date2str(outcome)
# datacol.insert_many(outcome)
def getrawfrommongodb(start,end,databaseid,collectionid,client = QASETTING.client):
database = client[databaseid]
datacol = database[collectionid]
cursor = datacol.find()
outcome = pd.DataFrame(list(cursor))
outcome = outcome.drop(columns = '_id')
outcome['datetime'] =
|
pd.to_datetime(outcome['datetime'])
|
pandas.to_datetime
|
import logging
import pandas as pd
from common.constant.df_from_csv import KWDF, NGDF
from common.word_format.word_formatter import WordFormatter
import re
from core.nlp.response_generator.product.cct.reaction_generator import SP_I_DF
class TextKwDFGenerator:
def __call__(self, text_df):
w_toks = WordFormatter.Df2WToks(text_df, column_name="base_form")
try:
matched_list = []
for sidx, sent in enumerate(w_toks):
for widx, word in enumerate(sent):
kw_type = self.__find_keywords_from_csv(text_df, sidx, widx, word)
if kw_type == 'EMPHASIS':
matched_list.append([sidx, widx, word, 'emphasis'])
elif kw_type == 'KEYWORD':
matched_list.append([sidx, widx, word, '-'])
if all('-' not in i for i in matched_list):
return None
except Exception:
logging.exception('Error at: ' + str(__name__))
return None
try:
text_kw_df =
|
pd.DataFrame(matched_list)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import lrange, lzip, range
import pandas as pd
from pandas import Index, MultiIndex, Series
import pandas.util.testing as tm
def test_equals(idx):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(idx):
# GH9947, GH10637
index_a = idx
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_equals_multi(idx):
assert idx.equals(idx)
assert not idx.equals(idx.values)
assert idx.equals(Index(idx.values))
assert idx.equal_levels(idx)
assert not idx.equals(idx[:-1])
assert not idx.equals(idx[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], codes=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], codes=index.codes[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_codes = np.array([0, 0, 1, 2, 2, 3])
minor_codes = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
assert not idx.equals(index)
assert not idx.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_codes = np.array([0, 0, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
assert not idx.equals(index)
def test_identical(idx):
mi = idx.copy()
mi2 = idx.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_equals_operator(idx):
# GH9785
assert (idx == idx).all()
def test_equals_missing_values():
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_is_():
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_is_all_dates(idx):
assert not idx.is_all_dates
def test_is_numeric(idx):
# MultiIndex is never numeric
assert not idx.is_numeric()
def test_multiindex_compare():
# GH 21149
# Ensure comparison operations for MultiIndex with nlevels == 1
# behave consistently with those for MultiIndex with nlevels > 1
midx = pd.MultiIndex.from_product([[0, 1]])
# Equality self-test: MultiIndex object vs self
expected = pd.Series([True, True])
result = pd.Series(midx == midx)
tm.assert_series_equal(result, expected)
# Greater than comparison: MultiIndex object vs self
expected =
|
pd.Series([False, False])
|
pandas.Series
|
# Starter code for multiple regressors implemented by <NAME>
# Source code based on Forecasting Favorites, 1owl
# https://www.kaggle.com/the1owl/forecasting-favorites , version 10
# Part II
import numpy as np
import pandas as pd
from sklearn import preprocessing, linear_model, metrics
import gc; gc.enable()
import random
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import TheilSenRegressor, BayesianRidge
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
import time
np.random.seed(1122)
# store the total processing time
start_time = time.time()
tcurrent = start_time
print('Three regressors - Neural network (MLP), Bayesian Ridge, Bagging(4x) and XGBoost (2x)\n')
print('Datasets reading')
# read datasets
dtypes = {'id':'int64', 'item_nbr':'int32', 'store_nbr':'int8', 'onpromotion':str}
data = {
#R 'tra': pd.read_csv('../input/train.csv', dtype=dtypes, parse_dates=['date']),
'tra': pd.read_csv('../input/processed/train_4r.csv', dtype=dtypes, parse_dates=['date']),
'tes': pd.read_csv('../input/test.csv', dtype=dtypes, parse_dates=['date']),
'ite': pd.read_csv('../input/items.csv'),
'sto': pd.read_csv('../input/stores.csv'),
'trn': pd.read_csv('../input/transactions.csv', parse_dates=['date']),
'hol': pd.read_csv('../input/holidays_events.csv', dtype={'transferred':str}, parse_dates=['date']),
'oil': pd.read_csv('../input/oil.csv', parse_dates=['date']),
}
# dataset processing
print('Datasets processing')
# Filter the training data to contain only august starting from the day 16
# which is reasonable since the test period is 2017-08-16 until 2017-08-31
#R train = data['tra'][(data['tra']['date'].dt.month == 8) & (data['tra']['date'].dt.day > 15)]
train = data['tra']
del data['tra']; gc.collect();
target = train['unit_sales'].values
target[target < 0.] = 0.
train['unit_sales'] = np.log1p(target)
def df_lbl_enc(df):
for c in df.columns:
if df[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
df[c] = lbl.fit_transform(df[c])
print(c)
return df
def df_transform(df):
df['date'] = pd.to_datetime(df['date'])
df['yea'] = df['date'].dt.year
df['mon'] = df['date'].dt.month
df['day'] = df['date'].dt.day
df['date'] = df['date'].dt.dayofweek # Replace!!!
df['onpromotion'] = df['onpromotion'].map({'False': 0, 'True': 1})
df['perishable'] = df['perishable'].map({0:1.0, 1:1.25})
df = df.fillna(-1)
return df
data['ite'] = df_lbl_enc(data['ite'])
train = pd.merge(train, data['ite'], how='left', on=['item_nbr'])
test = pd.merge(data['tes'], data['ite'], how='left', on=['item_nbr'])
del data['tes']; gc.collect();
del data['ite']; gc.collect();
train = pd.merge(train, data['trn'], how='left', on=['date','store_nbr'])
test = pd.merge(test, data['trn'], how='left', on=['date','store_nbr'])
del data['trn']; gc.collect();
target = train['transactions'].values
target[target < 0.] = 0.
train['transactions'] = np.log1p(target)
data['sto'] = df_lbl_enc(data['sto'])
train =
|
pd.merge(train, data['sto'], how='left', on=['store_nbr'])
|
pandas.merge
|
"""Tests for time-related quality control functions."""
from datetime import datetime
import pytz
import pytest
import pandas as pd
from pandas.util.testing import assert_series_equal
from pvanalytics.quality import time
@pytest.fixture
def times():
"""One hour in Mountain Standard Time at 10 minute intervals.
Notes
-----
Copyright (c) 2019 SolarArbiter. See the file
LICENSES/SOLARFORECASTARBITER_LICENSE at the top level directory
of this distribution and at `<https://github.com/pvlib/
pvanalytics/blob/master/LICENSES/SOLARFORECASTARBITER_LICENSE>`_
for more information.
"""
MST = pytz.timezone('MST')
return pd.date_range(start=datetime(2018, 6, 15, 12, 0, 0, tzinfo=MST),
end=datetime(2018, 6, 15, 13, 0, 0, tzinfo=MST),
freq='10T')
def test_timestamp_spacing_date_range(times):
"""An index generated by pd.date_range has the expected spacing."""
assert_series_equal(
time.spacing(times, times.freq),
pd.Series(True, index=times)
)
def test_timestamp_spacing_one_timestamp(times):
"""An index with only one timestamp has uniform spacing."""
assert_series_equal(
time.spacing(times[[0]], times.freq),
pd.Series(True, index=[times[0]])
)
def test_timestamp_spacing_one_missing(times):
"""The timestamp following a missing timestamp will be marked False."""
assert_series_equal(
time.spacing(times[[0, 2, 3]], times.freq),
pd.Series([True, False, True], index=times[[0, 2, 3]])
)
def test_timestamp_spacing_too_frequent(times):
"""Timestamps with too high frequency will be marked False."""
assert_series_equal(
time.spacing(times, '30min'),
pd.Series([True] + [False] * (len(times) - 1), index=times)
)
def _get_sunrise(location, tz):
# Get sunrise times for 2020
days = pd.date_range(
start='1/1/2020',
end='1/1/2021',
freq='D',
tz=tz
)
return location.get_sun_rise_set_transit(
days, method='spa'
).sunrise
@pytest.mark.parametrize("tz, observes_dst", [('MST', False),
('America/Denver', True)])
def test_has_dst(tz, observes_dst, albuquerque):
sunrise = _get_sunrise(albuquerque, tz)
dst = time.has_dst(sunrise, 'America/Denver')
expected = pd.Series(False, index=sunrise.index)
expected.loc['2020-03-08'] = observes_dst
expected.loc['2020-11-01'] = observes_dst
assert_series_equal(
expected,
dst,
check_names=False
)
@pytest.mark.parametrize("tz, observes_dst", [('MST', False),
('America/Denver', True)])
def test_has_dst_input_series_not_localized(tz, observes_dst, albuquerque):
sunrise = _get_sunrise(albuquerque, tz)
sunrise = sunrise.tz_localize(None)
expected = pd.Series(False, index=sunrise.index)
expected.loc['2020-03-08'] = observes_dst
expected.loc['2020-11-01'] = observes_dst
dst = time.has_dst(sunrise, 'America/Denver')
assert_series_equal(
expected,
dst
)
@pytest.mark.parametrize("tz, observes_dst", [('MST', False),
('America/Denver', True)])
@pytest.mark.parametrize("freq", ['15T', '30T', 'H'])
def test_has_dst_rounded(tz, freq, observes_dst, albuquerque):
sunrise = _get_sunrise(albuquerque, tz)
# With rounding to 1-hour timestamps we need to reduce how many
# days we look at.
window = 7 if freq != 'H' else 1
expected = pd.Series(False, index=sunrise.index)
expected.loc['2020-03-08'] = observes_dst
expected.loc['2020-11-01'] = observes_dst
dst = time.has_dst(
sunrise.dt.round(freq),
'America/Denver',
window=window
)
assert_series_equal(expected, dst, check_names=False)
def test_has_dst_missing_data(albuquerque):
sunrise = _get_sunrise(albuquerque, 'America/Denver')
sunrise.loc['3/5/2020':'3/10/2020'] = pd.NaT
sunrise.loc['7/1/2020':'7/20/2020'] = pd.NaT
# Doesn't raise since both sides still have some data
expected = pd.Series(False, index=sunrise.index)
expected['3/8/2020'] = True
expected['11/1/2020'] = True
assert_series_equal(
time.has_dst(sunrise, 'America/Denver'),
expected
)
missing_all_before = sunrise.copy()
missing_all_after = sunrise.copy()
missing_all_before.loc['3/1/2020':'3/5/2020'] = pd.NaT
missing_all_after.loc['3/8/2020':'3/14/2020'] = pd.NaT
missing_data_message = r'No data at .*\. ' \
r'Consider passing a larger `window`.'
# Raises for missing data before transition date
with pytest.raises(ValueError, match=missing_data_message):
time.has_dst(missing_all_before, 'America/Denver')
# Raises for missing data after transition date
with pytest.raises(ValueError, match=missing_data_message):
time.has_dst(missing_all_after, 'America/Denver')
# Raises for missing data before and after the shift date
sunrise.loc['3/1/2020':'3/7/2020'] = pd.NaT
sunrise.loc['3/9/2020':'3/14/2020'] = pd.NaT
with pytest.raises(ValueError, match=missing_data_message):
time.has_dst(sunrise, 'America/Denver')
with pytest.warns(UserWarning, match=missing_data_message):
result = time.has_dst(sunrise, 'America/Denver', missing='warn')
expected.loc['3/8/2020'] = False
assert_series_equal(expected, result)
sunrise.loc['3/1/2020':'3/14/2020'] = pd.NaT
with pytest.warns(UserWarning, match=missing_data_message):
result = time.has_dst(sunrise, 'America/Denver', missing='warn')
assert_series_equal(expected, result)
with pytest.raises(ValueError, match=missing_data_message):
time.has_dst(sunrise, 'America/Denver')
def test_has_dst_gaps(albuquerque):
sunrise = _get_sunrise(albuquerque, 'America/Denver')
sunrise.loc['3/5/2020':'3/10/2020'] = pd.NaT
sunrise.loc['7/1/2020':'7/20/2020'] = pd.NaT
sunrise.dropna(inplace=True)
expected = pd.Series(False, index=sunrise.index)
expected['11/1/2020'] = True
assert_series_equal(
time.has_dst(sunrise, 'America/Denver'),
expected
)
def test_has_dst_no_dst_in_date_range(albuquerque):
sunrise = _get_sunrise(albuquerque, 'America/Denver')
july = sunrise['2020-07-01':'2020-07-31']
february = sunrise['2020-02-01':'2020-03-05']
expected_july = pd.Series(False, index=july.index)
expected_march = pd.Series(False, index=february.index)
assert_series_equal(
expected_july,
time.has_dst(july, 'America/Denver')
)
assert_series_equal(
expected_march,
time.has_dst(february, 'MST')
)
@pytest.fixture(scope='module', params=['H', '15T', 'T'])
def midday(request, albuquerque):
solar_position = albuquerque.get_solarposition(
pd.date_range(
start='1/1/2020', end='3/1/2020', closed='left',
tz='MST', freq=request.param
)
)
mid_day = (solar_position['zenith'] < 87).groupby(
solar_position.index.date
).apply(
lambda day: (day[day].index.min()
+ ((day[day].index.max() - day[day].index.min()) / 2))
)
mid_day = mid_day.dt.hour * 60 + mid_day.dt.minute
mid_day.index = pd.DatetimeIndex(mid_day.index, tz='MST')
return mid_day
def requires_ruptures(test):
"""Skip `test` if ruptures is not installed."""
try:
import ruptures # noqa: F401
has_ruptures = True
except ImportError:
has_ruptures = False
return pytest.mark.skipif(
not has_ruptures, reason="requires ruptures")(test)
@requires_ruptures
def test_shift_ruptures_no_shift(midday):
"""Daytime mask with no time-shifts yields a series with 0s for
shift amounts."""
shift_mask, shift_amounts = time.shifts_ruptures(
midday, midday
)
assert not shift_mask.any()
assert_series_equal(
shift_amounts,
pd.Series(0, index=midday.index, dtype='int64'),
check_names=False
)
@requires_ruptures
def test_shift_ruptures_positive_shift(midday):
"""Every day shifted 1 hour later yields a series with shift
of 60 for each day."""
shifted = _shift_between(
midday, 60,
start='2020-01-01',
end='2020-02-29'
)
expected_shift_mask = pd.Series(False, index=midday.index)
expected_shift_mask['2020-01-01':'2020-02-29'] = True
shift_mask, shift_amounts = time.shifts_ruptures(shifted, midday)
|
assert_series_equal(shift_mask, expected_shift_mask, check_names=False)
|
pandas.util.testing.assert_series_equal
|
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertEqual(rng.inferred_freq, '-1A-JAN')
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [
|
tm.makeStringIndex(10)
|
pandas.util.testing.makeStringIndex
|
"""
preprocesslayer labmda function for IPAC-CLABSI
Purpose
-------
Read excell file, seperate into separate patient files, generate 7 days
Indection Windows plots,
total timeline plot per patient and save patient csv into s3 bucekt.
Patient files are saved as {mrn}.csv in source-csv folder, will generate
the job-creation lambda which in terms triggers Sagemaker GroundTruth Module.
-----------------
* csv - tabular data with the patient information
"""
import os
import json
from datetime import timedelta
import datetime
import io
from io import StringIO
from matplotlib.pylab import plt
import pandas as pd
import boto3
s3_path = os.environ.get('S3_raw')
patient_processed = os.environ.get('patient_bucket')
def write_dataframe_to_csv_on_s3(dataframe, filename, bucket):
"""
Write a dataframe to a CSV on S3
----------
fieldname : dataframe
Pandas dataframe
fieldname: filename:
string
filename: bucket
string
"""
# Create buffer
csv_buffer = StringIO()
# Write dataframe to buffer
dataframe.to_csv(csv_buffer)
# Create S3 object
s3_resource = boto3.resource("s3")
# Write buffer to S3 object
s3_resource.Object(bucket, filename).put(
Body=csv_buffer.getvalue(),
ServerSideEncryption="aws:kms",
)
def relative_time_in_days(end_date, start_date):
"""
Returns the difference between dates in day unit.
"""
try:
difference = (end_date - start_date).days
except ValueError:
difference = 0
return difference
def plot_timeline(dataframe, patient):
"""
Generate the timeline plot for a patient
Columns
=======
['encntr_num', 'nursing_unit_short_desc',
'beg_effective_dt_tm','end_effective_dt_tm',
'facility_name_src', 'collection_dt_tm',
'mrn', 'encntr_type_desc_src_at_collection',
'admit_dt_tm', 'clinical_event_code_desc_src',
'collection_date_id', 'loc_room_desc_src_at_collection',
'loc_bed_desc_src_at_collection', 'disch_dt_tm',
'disch_disp_desc_src', 'lab_result',
'med_service_desc_src_at_collection',
'nursing_unit_desc_at_collection',
'nursing_unit_short_desc_at_collection',
'organism',
'result_interpretation_desc_src',
'specimen_type_desc_src', 'transfer_in_to_collect',
'transfer_out_to_collect','ce_dynamic_label_id',
'doc_set_name_result', 'encntr_id',
'first_activity_start_dt_tm',
'first_catheter_type_result',
'first_dressing_type_result',
'first_site_result',
'last_activity_end_dt_tm',
'line_tube_drain_insertion_seq',
'line_insert_to_collection',
'line_remove_to_collect',
'last_temperature_result_pre_collection',
'name_last','name_first',
'birth_date_id','gender_desc_src','bc_phn',
'home_addr_patient_postal_code_forward_sortation_area']
DataTime events
===============
- beg_effective_dt_tm = Nursing unit (ICU) admission date
- end_effective_dt_tm = Nursing unit (ICU) discharge date
- collection_dt_tm = Positive blood collection date
- admit_dt_tm = Admission date (begin of stay)
- disch_dt_tm = Discharge date (end of stay)
- first_activity_start_dt_tm = Catheter insertion
- last_activity_end_dt_tm = Catheter removal
"""
print('Generating timeline plot for {}'.format(patient))
# Convert all datetime values to datetime
datetime_column_names = [
'beg_effective_dt_tm',
'end_effective_dt_tm',
'collection_dt_tm',
'admit_dt_tm',
'disch_dt_tm',
'first_activity_start_dt_tm',
'last_activity_end_dt_tm',
]
# Convert all date to to datetime format, the input data is mm-dd-yyyy
for column_name in datetime_column_names:
dataframe[column_name] = pd.to_datetime(
dataframe[column_name], errors='coerce', format='%m/%d/%Y')
#
fig, axis = plt.subplots(figsize=(
12, 3 + len(dataframe['collection_dt_tm'].unique()) / 4), dpi=300)
collection_times = []
plotted_organisms = []
x_scale_label = {}
y_scale_label = []
dates = {}
# Generate a list of organisms,
# thus same organism found can be shown as the same color
unique_organisms = []
for index in dataframe.index:
organism = dataframe.loc[index, 'organism']
unique_organisms.append(organism)
# Iterate through all records and add them to the plot
for index in dataframe.index:
# Organism found for this record
organism = dataframe.loc[index, 'organism']
# Calcululate the relative date from admission
day = {
key: relative_time_in_days(
dataframe.loc[index, key], sorted(dataframe['admit_dt_tm'])[0])
for key in datetime_column_names
}
# 3 bar graph plots: patient visit, nuring unit, central line
bar_graphs = {
'Patient visit': {
'start': 'admit_dt_tm',
'stop': 'disch_dt_tm',
'y': 0,
'color': [0.8, 0.8, 0.8],
},
dataframe.loc[index, 'nursing_unit_short_desc']: {
'start': 'beg_effective_dt_tm',
'stop': 'end_effective_dt_tm',
'y': 1,
'color': [0.6, 0.6, 0.6],
},
'Central line': {
'start': 'first_activity_start_dt_tm',
'stop': 'last_activity_end_dt_tm',
'y': 2,
'color': [0.4, 0.4, 0.4],
},
}
# One type of markers for the positive blood collection dates
marker_graphs = {
'Blood collection': {
'start': 'collection_dt_tm',
'y': 0,
'color': [0.8, 0.2, 0.2],
},
}
# bar graphs: patient visit, nuring unit, central line
for label in bar_graphs:
period = (
dataframe.loc[index, bar_graphs[label]['start']],
dataframe.loc[index, bar_graphs[label]['stop']]
)
# Do not plot the same period twice
if label not in dates:
dates[label] = []
if period not in dates[label]:
# Bar plot for the period
axis.bar(
[day[bar_graphs[label]['start']]],
[0.8],
width=day[bar_graphs[label]['stop']] -
day[bar_graphs[label]['start']],
bottom=bar_graphs[label]['y'] + 0.1,
color=bar_graphs[label]['color'],
# edgecolor='w',
# linewidth=4,
align='edge',
)
# Put marker to the start and stop date, thus if there is
# a missing date it can still be seen.
axis.plot(
[day[bar_graphs[label]['start']]],
[bar_graphs[label]['y'] + 0.5],
'k>',
)
axis.plot(
[day[bar_graphs[label]['stop']]],
[bar_graphs[label]['y'] + 0.5],
'k<',
)
dates[label].append(period)
x_scale_label[day[bar_graphs[label]['start']]] = dataframe.loc[
index, bar_graphs[label]['start']]
x_scale_label[day[bar_graphs[label]['stop']]] = dataframe.loc[
index, bar_graphs[label]['stop']]
if label not in y_scale_label:
y_scale_label.append(label)
for label in marker_graphs:
# Blood collection
if float(
day[marker_graphs[
label]['start']]) not in collection_times:
if organism not in plotted_organisms:
axis.plot(
[day[marker_graphs[label]['start']]],
[marker_graphs[label]['y'] + 0.5],
marker='o',
markersize=14,
linestyle='',
color=plt.cm.tab10(unique_organisms.index(organism)),
label=organism.replace(', ',"\n"),
)
plotted_organisms.append(organism)
else:
axis.plot(
[day[marker_graphs[label]['start']]],
[marker_graphs[label]['y'] + 0.5],
marker='o',
markersize=14,
linestyle='',
color=plt.cm.tab10(unique_organisms.index(organism)),
)
axis.plot(
[day[marker_graphs[label]['start']]],
[marker_graphs[label]['y'] + 0.5],
'wo',
markersize=5,
color='0.8'
)
collection_times.append(
float(day[marker_graphs[label]['start']]))
x_scale_label[day[
marker_graphs[label]['start']]] = dataframe.loc[
index, marker_graphs[label]['start']]
if label not in dates:
dates[label] = []
dates[label].append(day[marker_graphs[label]['start']])
axis.set_yticks([value + 0.5 for value in range(len(y_scale_label))])
axis.set_yticklabels(y_scale_label)
axis.set_ylim(0, len(y_scale_label))
axis.set_xticks(list(x_scale_label.keys()))
axis.set_xticklabels([
str(value)[:10] for value in x_scale_label.values()], rotation=90)
axis.set_xlabel('Date')
axis.set_axisbelow(True)
plt.legend(
bbox_to_anchor=(1.04, 1), loc='upper left',
ncol=1, title='Positive blood sample')
plt.tight_layout()
buf = io.BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
image = buf.read()
s3_resource = boto3.resource("s3")
# saving the patient total timeline
# plots to processed/images/patient/timeline.png
filename = f'images/{patient}/timeline.png'
bucket = os.environ['patient_bucket']
print('Timeline plot path for patient {}: {}'.format(patient, filename))
s3_resource.Object(bucket, filename).put(
Body=image, ServerSideEncryption="aws:kms")
def get_start_end_time(dataframe):
"""
Creating middle_time, start_time, end_time columns for creation
of Infection Time Window plots, for each collection date.
----------
fieldname : dataframe
Pandas dataframe
Returns
--------
dataframe_
temp dataframe for ploting total timelines
"""
for i in dataframe.index:
dataframe.loc[i, 'middle_time'] = dataframe.loc[
i, 'collection_dt_tm']
dataframe.loc[i, 'start_time'] = dataframe.loc[
i, 'collection_dt_tm'] - timedelta(days=3)
dataframe.loc[i, 'end_time'] = dataframe.loc[
i, 'collection_dt_tm'] + timedelta(days=3)
return dataframe
def estimate_text_size(text):
"""
Provide a text size estimate based on the length of the text
Parameters
----------
text: string
Text meant to print on the IWP plot
Returns
-------
fontsize: float
Estimated best fontsize
"""
fontsize = 12
if len(text) > 50:
fontsize -= (len(text) - 50) / 5
if fontsize < 5:
fontsize = 5
return fontsize
def generate_iwp_plot(dataframe, temperature, plot_index, patient):
"""
Generate individual IWP plot for each positive blood collection.
"""
dataframe = dataframe.copy()
# Convert all datetime values to datetime
datetime_column_names = [
'beg_effective_dt_tm',
'end_effective_dt_tm',
'collection_dt_tm',
'admit_dt_tm',
'disch_dt_tm',
'first_activity_start_dt_tm',
'last_activity_end_dt_tm',
]
# Convert all date to to datetime format, the input data is mm-dd-yyyy
for column_name in datetime_column_names:
dataframe[column_name] = pd.to_datetime(
dataframe[column_name], errors='coerce',
# format='%m/%d/%Y',
)
collection_date = dataframe.loc[plot_index, 'collection_dt_tm']
day3 = pd.Timedelta(days=3)
fig, axis = plt.subplots(
2, 1, True, False,
figsize=(7, 7), dpi=150,
gridspec_kw={'height_ratios': [1, 2.5]},
)
# Generate the temperature plot - top portion
# Fever limit, above limit the
temperature_limit = 38.0
# Mark the temperature limit (38 C) with a solid line
axis[0].plot_date(
[collection_date - day3, collection_date + day3],
[temperature_limit, temperature_limit],
'k-',
color='0.4',
)
# Plot all temperature information
for temperature_index in temperature.index:
temp_date = temperature.loc[temperature_index, 'event_end_dt_tm']
value = temperature.loc[temperature_index, 'result_val']
try:
# Above limit the marker is red
if value < temperature_limit:
markercolor = '0.4'
else:
markercolor = [0.8, 0.2, 0.2]
# Plot the dates - temperature information
axis[0].plot_date(
temp_date, value, 'wo',
markeredgecolor=markercolor,
markersize=6,
markeredgewidth=4,
)
except ValueError:
print('failure in plotting temperature')
# Plot catheter start and end
if not (pd.isnull(dataframe.loc[plot_index, 'first_activity_start_dt_tm'])
or pd.isnull(dataframe.loc[
plot_index, 'last_activity_end_dt_tm'])):
axis[1].plot_date(
[dataframe.loc[plot_index, 'first_activity_start_dt_tm'],
dataframe.loc[plot_index, 'last_activity_end_dt_tm']],
[0.1, 0.1],
'k-',
color='0.8',
linewidth=60,
)
catheter_information = ' - '.join((
str(dataframe.loc[plot_index, 'first_site_result']),
str(dataframe.loc[plot_index, 'first_catheter_type_result']),
))
axis[1].text(
collection_date - pd.Timedelta(days=2),
0.09,
catheter_information,
size=estimate_text_size(catheter_information),
)
# Plot nursing unit start and end
if not (pd.isnull(dataframe.loc[plot_index, 'beg_effective_dt_tm'])
or pd.isnull(dataframe.loc[plot_index, 'end_effective_dt_tm'])):
axis[1].plot_date(
[dataframe.loc[plot_index, 'beg_effective_dt_tm'],
dataframe.loc[plot_index, 'end_effective_dt_tm']],
[0.3, 0.3],
'k-',
color='0.8',
linewidth=60,
)
nursing_inforamtion = ' - '.join((
str(dataframe.loc[plot_index,
'nursing_unit_short_desc_at_collection']),
str(dataframe.loc[plot_index,
'med_service_desc_src_at_collection']),
))
axis[1].text(
collection_date - pd.Timedelta(days=2),
0.29,
nursing_inforamtion,
size=estimate_text_size(nursing_inforamtion),
)
# Helper line for organism and collection dates
axis[1].plot_date(
[dataframe.loc[plot_index, 'collection_dt_tm'] for _ in range(2)],
[0.5, 0.63],
'k-',
color='0.8',
)
# Plot all collection dates
for index in dataframe.index:
axis[1].plot_date(
[dataframe.loc[index, 'collection_dt_tm']],
[0.5],
'ko',
color='0.4',
markersize=16
)
# Corresponding organism
organism_information = dataframe.loc[plot_index, 'organism']
axis[1].text(
collection_date - datetime.timedelta(days=1.2),
0.65,
organism_information,
size=12,
)
# Axis settings
axis[0].set_ylabel('Temperature /C')
axis[0].set_ylim(35, 41)
axis[0].set_yticks(range(35, 42))
axis[0].set_yticklabels(['{}.0'.format(value) for value in range(35, 42)])
axis[0].grid(axis='y', linestyle='-')
collection_date =
|
pd.to_datetime(collection_date)
|
pandas.to_datetime
|
#! /usr/bin/env python3
import re
import math
import json
import inspect
import pkg_resources
import numpy as np
import pandas as pd
from time import time
from joblib import Parallel, delayed
from typing import Any, Dict, List, Optional, Union
from pathlib import Path
from pkg_resources import resource_filename
from pandas.api.types import CategoricalDtype
from .utils import fpath, _mywrap
pkg_resources.require("pandas>=0.21.0")
def convert_med(
pcts: Union[str, List[str]] = ['0001', '01', '05', '100'],
years: Union[int, List[int]] = range(2001, 2013),
data_types: Union[str, List[str]] = ['carc', 'opc', 'bsfab', 'med'],
rg_size: float = 2.5,
parquet_engine: str = 'pyarrow',
compression_type: str = 'SNAPPY',
manual_schema: bool = False,
ehic_xw: bool = True,
n_jobs: int = 6,
med_dta: str = '/disk/aging/medicare/data',
med_pq:
str = '/disk/agebulk3/medicare.work/doyle-DUA51929/barronk-DUA51929/raw/pq'
) -> None: # yapf: disable
"""Convert Medicare Stata files to parquet
Args:
pcts: percent samples to convert
years: file years to convert
data_types:
types of data files to convert
- ``bsfab`` (`Beneficiary Summary File, Base segment`_)
- ``bsfcc`` (`Beneficiary Summary File, Chronic Conditions segment`_)
- ``bsfcu`` (`Beneficiary Summary File, Cost & Use segment`_)
- ``bsfd`` (`Beneficiary Summary File, National Death Index segment`_)
- ``carc`` (`Carrier File, Claims segment`_)
- ``carl`` (`Carrier File, Line segment`_)
- ``den`` (Denominator File)
- ``dmec`` (`Durable Medical Equipment File, Claims segment`_)
- ``dmel`` (`Durable Medical Equipment File, Line segment`_)
- ``hhac`` (`Home Health Agency File, Claims segment`_)
- ``hhar`` (`Home Health Agency File, Revenue Center segment`_)
- ``hosc`` (`Hospice File, Claims segment`_)
- ``hosr`` (`Hospice File, Revenue Center segment`_)
- ``ipc`` (`Inpatient File, Claims segment`_)
- ``ipr`` (`Inpatient File, Revenue Center segment`_)
- ``med`` (`MedPAR File`_)
- ``opc`` (`Outpatient File, Claims segment`_)
- ``opr`` (`Outpatient File, Revenue Center segment`_)
- ``snfc`` (`Skilled Nursing Facility File, Claims segment`_)
- ``snfr`` (`Skilled Nursing Facility File, Revenue Center segment`_)
- ``xw`` (Crosswalks files for ``ehic`` - ``bene_id``)
.. _`Beneficiary Summary File, Base segment`: https://kylebarron.github.io/medicare-documentation/resdac/mbsf/#base-abcd-segment_2
.. _`Beneficiary Summary File, Chronic Conditions segment`: https://kylebarron.github.io/medicare-documentation/resdac/mbsf/#chronic-conditions-segment_2
.. _`Beneficiary Summary File, Cost & Use segment`: https://kylebarron.github.io/medicare-documentation/resdac/mbsf/#cost-and-use-segment_1
.. _`Beneficiary Summary File, National Death Index segment`: https://kylebarron.github.io/medicare-documentation/resdac/mbsf/#national-death-index-segment_1
.. _`Carrier File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/carrier-rif/#carrier-rif_1
.. _`Carrier File, Line segment`: https://kylebarron.github.io/medicare-documentation/resdac/carrier-rif/#line-file
.. _`Durable Medical Equipment File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/dme-rif/#durable-medical-equipment-rif_1
.. _`Durable Medical Equipment File, Line segment`: https://kylebarron.github.io/medicare-documentation/resdac/dme-rif/#line-file
.. _`Home Health Agency File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/hha-rif/#home-health-agency-rif_1
.. _`Home Health Agency File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/hha-rif/#revenue-center-file
.. _`Hospice File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/hospice-rif/#hospice-rif_1
.. _`Hospice File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/hospice-rif/#revenue-center-file
.. _`Inpatient File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/ip-rif/#inpatient-rif_1
.. _`Inpatient File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/ip-rif/#revenue-center-file
.. _`MedPAR File`: https://kylebarron.github.io/medicare-documentation/resdac/medpar-rif/#medpar-rif_1
.. _`Outpatient File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/op-rif/#outpatient-rif_1
.. _`Outpatient File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/op-rif/#revenue-center-file
.. _`Skilled Nursing Facility File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/snf-rif/#skilled-nursing-facility-rif_1
.. _`Skilled Nursing Facility File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/snf-rif/#revenue-center-file
rg_size: size in GB of each Parquet row group
parquet_engine: either 'fastparquet' or 'pyarrow'
compression_type: 'SNAPPY' or 'GZIP'
manual_schema: whether to create manual parquet schema. Doesn't
always work.
ehic_xw: Merge bene_id onto old files with ehic
n_jobs: number of processes to use
med_dta: top of tree for medicare stata files
med_pq: top of tree to output new parquet files
"""
if type(pcts) is str:
pcts = [pcts]
elif type(pcts) is list:
pass
else:
raise TypeError('pcts must be string or list of strings')
if type(years) is int:
years = [years]
elif type(years) is list:
pass
elif type(years) is range:
pass
else:
raise TypeError('years must be int, range, or list of ints')
if type(data_types) is str:
data_types = [data_types]
elif type(data_types) is list:
pass
else:
raise TypeError('data_types must be string or list of strings')
data_list = [[x, y, z] for x in pcts for y in years for z in data_types]
# Drop 100% carrier:
# data_list = [
# x for x in data_list if not (x[2] == 'carc') & (x[0] == '100')]
# Or:
# Replace 100% carrier with 20% carrier:
data_list = [['20', x[1], x[2]]
if ((x[2] == 'carc') & (x[0] == '100')) else x
for x in data_list]
# Make sure list is unique:
data_list = sorted([list(x) for x in set(tuple(y) for y in data_list)])
Parallel(n_jobs=n_jobs)(
delayed(_convert_med)(
*i,
rg_size=rg_size,
parquet_engine=parquet_engine,
compression_type=compression_type,
manual_schema=manual_schema,
ehic_xw=ehic_xw,
med_dta=med_dta,
med_pq=med_pq) for i in data_list)
def _convert_med(
pct: str,
year: int,
data_type: Union[str, List[str]],
rg_size: float = 2.5,
parquet_engine: str = 'pyarrow',
compression_type: str = 'SNAPPY',
manual_schema: bool = False,
ehic_xw: bool = True,
med_dta: str = '/disk/aging/medicare/data',
med_pq:
str = '/disk/agebulk3/medicare.work/doyle-DUA51929/barronk-DUA51929/raw/pq'
) -> None: # yapf: disable
"""Convert a single Medicare file to parquet format.
Args:
pct: percent sample to convert
year: year of data to convert
data_type:
type of data files to convert
- ``bsfab`` Beneficiary Summary File, Base segment
- ``bsfcc`` Beneficiary Summary File, Chronic Conditions segment
- ``bsfcu`` Beneficiary Summary File, Cost & Use segment
- ``bsfd`` Beneficiary Summary File, National Death Index segment
- ``carc`` Carrier File, Claims segment
- ``carl`` Carrier File, Line segment
- ``den`` Denominator File
- ``dmec`` Durable Medical Equipment File, Claims segment
- ``dmel`` Durable Medical Equipment File, Line segment
- ``hhac`` Home Health Agency File, Claims segment
- ``hhar`` Home Health Agency File, Revenue Center segment
- ``hosc`` Hospice File, Claims segment
- ``hosr`` Hospice File, Revenue Center segment
- ``ipc`` Inpatient File, Claims segment
- ``ipr`` Inpatient File, Revenue Center segment
- ``med`` MedPAR File
- ``opc`` Outpatient File, Claims segment
- ``opr`` Outpatient File, Revenue Center segment
- ``snfc`` Skilled Nursing Facility File, Claims segment
- ``snfr`` Skilled Nursing Facility File, Revenue Center segment
- ``xw`` Crosswalks files for ``ehic`` - ``bene_id``
rg_size: size in GB of each Parquet row group
parquet_engine: either 'fastparquet' or 'pyarrow'
compression_type: 'SNAPPY' or 'GZIP'
manual_schema: whether to create manual parquet schema. Doesn't
always work.
med_dta: canonical path for raw medicare dta files
med_pq: top of tree to output new parquet files
ehic_xw: Merge bene_id onto old files with ehic
Returns:
nothing. Writes parquet file to disk.
Raises:
NameError if data_type is not one of the above
"""
if type(pct) != str:
raise TypeError('pct must be str')
if type(year) != int:
raise TypeError('year must be int')
infile = fpath(percent=pct, year=year, data_type=data_type, dta=True)
outfile = fpath(
percent=pct, year=year, data_type=data_type, dta=False, pq_path=med_pq)
if not data_type.startswith('bsf'):
# TODO Refactor this into separate function.
path = resource_filename(
'medicare_utils', f'metadata/xw/{data_type}.json')
try:
with open(path) as f:
varnames = json.load(f)
except OSError:
varnames = {}
rename_dict = {}
for varname, names in varnames.items():
n = {k: v for k, v in names.items() if k == str(year)}
if n:
rename_dict[n[str(year)]['name']] = varname
if rename_dict:
# Remove items from dict that map to duplicate values
# Can't save a parquet file where multiple cols have same name
rev_rename_dict = {}
for key, value in rename_dict.items():
rev_rename_dict.setdefault(value, set()).add(key)
dups = [key for key, val in rev_rename_dict.items() if len(val) > 1]
for k, v in rename_dict.copy().items():
if v in dups:
rename_dict.pop(k)
else:
print(f'Year not in variable dictionary: {year}')
rename_dict = None
else:
rename_dict = None
# Make folder path if it doesn't exist
folder = Path(outfile).parents[0]
folder.mkdir(exist_ok=True, parents=True)
msg = f"""\
Starting {data_type} conversion
- Percent: {pct}
- Year {year}
"""
print(_mywrap(msg))
if ehic_xw and (year <= 2005) and not (data_type.startswith('bsf')):
ehic_xw = fpath(pct, year, 'xw_bsf', pq_path=med_pq)
if not Path(ehic_xw).is_file():
ehic_xw = fpath(pct, year, 'xw_bsf', dta=True, dta_path=med_dta)
else:
ehic_xw = None
try:
convert_file(
infile=infile,
outfile=outfile,
rename_dict=rename_dict,
rg_size=rg_size,
parquet_engine=parquet_engine,
compression_type=compression_type,
manual_schema=manual_schema,
ehic_xw=ehic_xw)
except:
pass
def convert_file(
infile: str,
outfile: str,
rename_dict: Dict[str, str] = None,
rg_size: float = 2.5,
parquet_engine: str = 'pyarrow',
compression_type: str = 'SNAPPY',
manual_schema: bool = False,
ehic_xw: Optional[str] = None) -> None:
"""Convert arbitrary Stata file to Parquet format
Args:
infile: path of file to read from
outfile: path of file to export to
rename_dict: keys should be initial variable names; values should
be new variable names
rg_size: Size in GB of the individual row groups
parquet_engine: either ``pyarrow`` or ``fastparquet``
compression_type: Compression algorithm to use. Can be ``SNAPPY`` or
``GZIP``.
manual_schema: Create parquet schema manually. For use with
pyarrow; doesn't always work
ehic_xw: Merge bene_id onto old files with ehic
Returns:
Writes .parquet file to disk.
"""
if parquet_engine == 'pyarrow':
import pyarrow as pa
import pyarrow.parquet as pq
elif parquet_engine == 'fastparquet':
import fastparquet as fp
t0 = time()
infile = Path(infile)
# File name without suffix
infile_stub = infile.stem
# Extension
infile_type = infile.suffix[1:]
# Set row group size. The following makes an even multiple of row groups
# as close as possible to the given `rg_size`
file_size = infile.stat().st_size / (1024 ** 3)
n_rg = round(file_size / rg_size)
if n_rg == 0:
n_rg += 1
nrow_total = pd.read_stata(infile, iterator=True).nobs
nrow_rg = math.ceil(nrow_total / n_rg)
gb_per_rg = file_size / n_rg
msg = f"""\
Row groups:
- {n_rg} of size {gb_per_rg:.2f} GB
Beginning scanning dtypes of file:
- infile: {infile_stub}.{infile_type}
- time: {(time() - t0) / 60:.2f} minutes
"""
print(_mywrap(msg))
if parquet_engine == 'pyarrow':
dtypes = _scan_file(infile, categorical=False)
elif parquet_engine == 'fastparquet':
dtypes = _scan_file(infile, categorical=True)
if rename_dict is not None:
for old_name, new_name in rename_dict.items():
try:
dtypes[new_name] = dtypes.pop(old_name)
except KeyError:
pass
msg = f"""\
Finished scanning dtypes of file
- infile: {infile_stub}.{infile_type}
- time: {(time() - t0) / 60:.2f} minutes
"""
print(_mywrap(msg))
if ehic_xw:
ehic_xw = Path(ehic_xw)
if ehic_xw.suffix == '.parquet':
xw = pd.read_parquet(ehic_xw, columns=['ehic', 'bene_id'])
elif ehic_xw.suffix == '.dta':
xw = pd.read_stata(ehic_xw, columns=['ehic', 'bene_id'])
xw = xw.set_index('ehic')
itr =
|
pd.read_stata(infile, chunksize=nrow_rg)
|
pandas.read_stata
|
"""
SRTM dataset is a 90m DEM computed from satellite.
Slope and aspect are calculated using:
<NAME>., 1981. Hill shading and the reflectance map. Proceedings of
the IEEE 69, 14–47. doi:10.1109/PROC.1981.11918
"""
import numpy as np
import pandas as pd
import xarray as xr
import richdem as rd
def generate_slope_aspect():
"""Generate slope and aspect from DEM."""
dem_ds = xr.open_dataset(
'/Users/kenzatazi/Downloads/GMTED2010_15n015_00625deg.nc')
dem_ds = dem_ds.assign_coords(
{'nlat': dem_ds.latitude, 'nlon': dem_ds.longitude})
dem_ds = dem_ds.sel(nlat=slice(29, 34), nlon=slice(75, 83))
elev_arr = dem_ds.elevation.values
elev_rd_arr = rd.rdarray(elev_arr, no_data=np.nan)
slope_rd_arr = rd.TerrainAttribute(elev_rd_arr, attrib='slope_riserun')
slope_arr = np.array(slope_rd_arr)
aspect_rd_arr = rd.TerrainAttribute(elev_rd_arr, attrib='aspect')
aspect_arr = np.array(aspect_rd_arr)
dem_ds['slope'] = (('nlat', 'nlon'), slope_arr)
dem_ds['aspect'] = (('nlat', 'nlon'), aspect_arr)
streamlined_dem_ds = dem_ds[['elevation', 'slope', 'aspect']]
streamlined_dem_ds.to_netcdf('_Data/SRTM_data.nc')
def find_slope(station):
"""Return slope for given station."""
dem_ds = xr.open_dataset('Data/SRTM_data.nc')
all_station_dict =
|
pd.DataFrame('Data/gauge_info.csv')
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 13 13:59:04 2021
@author: saidsa
"""
from copy import deepcopy
import pandas as pd
import numpy as np
from numpy.linalg import inv
from Stock import Stock
from scipy.stats import norm
def normalize (x):
#Scaling positive values so that they sum up to 1
x[x>0.001] = x[x>0.001] / x[x>0.001].sum()
#Scaling negative values so that they sum up to -1
x[x<-0.001] = x[x<-0.001] / - x[x<-0.001].sum()
return x
def stock_obj_arr_to_return_mat(stock_obj_arr):
output_dict = {}
for s in stock_obj_arr:
output_dict[s.ticker] = s['PriceClose']
price_df = pd.DataFrame(output_dict)
returns_df = price_df.pct_change(1)
return returns_df.dropna()
def stock_obj_arr_to_signal_mat(stock_obj_arr, signal_func):
output_dict = {}
for s in stock_obj_arr:
output_dict[s.ticker] = signal_func(s)
signal_df = pd.DataFrame(output_dict)
return signal_df.dropna()
def return_mat_to_rolling_var_covar_dict(returns_mat, window=126,
shrinkage_factor=0.8):
var_covar_ts = {}
for i in range(len(returns_mat)-window):
dt = returns_mat.index[i+window]
ret_mat = returns_mat.iloc[i:i+window, :]
var_cov = ret_mat.cov() * 252
# Apply shrinkage factor
# Reduce the off_diagonal terms => cov but not var
# Easier to generate inv matrix
var_cov = var_cov * shrinkage_factor + \
(1.0-shrinkage_factor)*np.diag(np.diag(var_cov))
var_covar_ts[dt] = var_cov
return var_covar_ts
def invert_var_covar_dict(var_covar_ts_dict):
inv_var_covar_ts = deepcopy(var_covar_ts_dict)
for dt, var_cov_mat in var_covar_ts_dict.items():
#Updating the values inside inv_var_covar_ts to preserve the structure
#when doing inverse
inv_var_covar_ts[dt].loc[:, :] = inv(var_cov_mat)
return inv_var_covar_ts
def MVOpt_LS_Fixed_risk(r, Sig, s, Sig_inv = None):
# r is the returns vector for a given day
# Sig is the var_covar mat for a given day
# s is a fixed level of risk for the portfolio
# Given a returns vector, a var_covar matrix, and a specified level of risk,
# we want to construct a Long Short Portfolio that maximizes returns with respect
# to weights such that the sum of weights is = 0 (constraint 1)
# and the variance if the portfolio is equal to s (constarint 2)
if Sig_inv is None:
Sig_inv = inv(Sig)
o = np.ones_like(r)
lam_2_num = o.T.dot(Sig_inv).dot(r)
lam_2_den = o.T.dot(Sig_inv).dot(o)
lam_2 = lam_2_num / lam_2_den
r_lam2_1 = r - lam_2 * o
lam_1_mat_prod = r_lam2_1.T.dot(Sig_inv).dot(r_lam2_1)
lam_1 = np.sqrt(lam_1_mat_prod/(4 * s))
w = (1/(2*lam_1)) * Sig_inv.dot(r_lam2_1)
return w
def MVOpt_LS_Fixed_risk_beta(r, Sig, s, beta, Sig_inv = None):
# adding 3rd constraint: weights * beta = 0 => hedging portfolio
# beta vector same shape as returns, for a given day, we have a vector of betas
# and a vector of returns
if Sig_inv is None:
Sig_inv = inv(Sig)
o = np.ones_like(r)
o_sig_o = o.T.dot(Sig_inv).dot(o)
o_sig_b = o.T.dot(Sig_inv).dot(beta)
b_sig_o = beta.T.dot(Sig_inv).dot(o)
b_sig_b = beta.T.dot(Sig_inv).dot(beta)
o_sig_r = o.T.dot(Sig_inv).dot(r)
b_sig_r = beta.T.dot(Sig_inv).dot(r)
lam_2, lam_3 = inv([[o_sig_o, o_sig_b], [b_sig_o, b_sig_b]]).dot([o_sig_r, b_sig_r])
r_lam_2_lam_3 = r - (lam_2 * o) - (lam_3 * beta)
r_sig_r = r_lam_2_lam_3.T.dot(Sig_inv).dot(r_lam_2_lam_3)
lam_1 = np.sqrt(r_sig_r / (4 * s))
w = (1/ (2 * lam_1)) * Sig_inv.dot(r_lam_2_lam_3)
return w
def MVOpt_L_Min_Var(Sig, Sig_inv = None):
if Sig_inv is None:
Sig_inv = inv(Sig)
o = np.ones(Sig.shape[0])
lam_1 = 1/ (o.T.dot(Sig_inv).dot(o))
w = lam_1 * Sig_inv.dot(o)
return w
class MeanVarianceOptimization(object):
def __init__(self, stock_arr, s = 0.35, shrinkage_factor=0.80):
self.stock_arr = [Stock(s) if isinstance(s, str) else s for s in stock_arr]
self.s = s
self.shrinkage_factor = shrinkage_factor
self.returns_df = stock_obj_arr_to_return_mat(self.stock_arr)
self.returns_shifted_df = self.returns_df.shift(1)
self.var_covar_ts = return_mat_to_rolling_var_covar_dict(self.returns_df,
window=126,
shrinkage_factor=self.shrinkage_factor)
self.inv_var_covar_ts = invert_var_covar_dict(var_covar_ts_dict=self.var_covar_ts)
self.expected_returns_df = self.returns_df.rolling(window = 126).mean().shift(1).dropna()*252
self.weights_df = self.build_weights()
def build_weights(self):
weights_dict = {}
for dt, Sig_inv in self.inv_var_covar_ts.items():
# for a given day, construct the weights of your protfolio
r = self.expected_returns_df.loc[dt,:]
Sig = self.var_covar_ts[dt]
w = MVOpt_LS_Fixed_risk(r = r, Sig = Sig, s = self.s, Sig_inv = Sig_inv)
weights_dict[dt] = w
weights_df = pd.DataFrame(weights_dict).T
return weights_df
class MinVarianceOptimization(object):
def __init__(self, stock_arr, shrinkage_factor=0.80, window=126):
self.stock_arr = [Stock(s) if isinstance(s, str) else s for s in stock_arr]
self.shrinkage_factor = shrinkage_factor
self.window = window
self.returns_df = stock_obj_arr_to_return_mat(self.stock_arr)
self.returns_shifted_df = self.returns_df.shift(1)
self.var_covar_ts = return_mat_to_rolling_var_covar_dict(self.returns_df,
window=self.window,
shrinkage_factor=self.shrinkage_factor)
self.inv_var_covar_ts = invert_var_covar_dict(var_covar_ts_dict=self.var_covar_ts)
self.weights_df = self.build_weights()
def build_weights(self):
weights_dict = {}
for dt, Sig_inv in self.inv_var_covar_ts.items():
# for a given day, construct the weights of your protfolio
Sig = self.var_covar_ts[dt]
w = MVOpt_L_Min_Var(Sig = Sig, Sig_inv = Sig_inv)
weights_dict[dt] = w
weights_df = pd.DataFrame(weights_dict).T
return weights_df
class SimpleBlackLitterman(object):
def __init__(self, stock_arr, signal_func_arr, signal_view_ret_arr,
A=1.0, tau=1.0, shrinkage_factor=0.80):
self.stock_arr = [Stock(s) if isinstance(s, str) else s for s in stock_arr]
self.signal_func_arr = signal_func_arr
self.signal_view_ret_arr = signal_view_ret_arr
self.A = A
self.tau = tau
self.shrinkage_factor = shrinkage_factor
self.returns_df = stock_obj_arr_to_return_mat(self.stock_arr)
self.returns_shifted_df = self.returns_df.shift(1)
self.weights_df = self.build_weights()
self.weights_shifted_df = self.weights_df.shift(1)
self.var_covar_ts = return_mat_to_rolling_var_covar_dict(self.returns_df,
window=126,
shrinkage_factor=self.shrinkage_factor)
self.inv_var_covar_ts = invert_var_covar_dict(var_covar_ts_dict=self.var_covar_ts)
self.implied_returns_df = self.generate_implied_returns()
self.signal_df_dict = {'signal_'+str(i):self.build_signal_df(sf).dropna() \
for i, sf in enumerate(self.signal_func_arr)}
self.signal_ts_dict = self.generate_signal_ts_dict()
self.link_mat_ts = self.generate_link_mats()
self.view_var_covar_ts = self.generate_view_var_covar_mats()
self.view_inv_var_covar_ts = self.generate_view_inv_var_covar_mats()
self.black_litterman_weights_df = self.generate_black_litterman_weights()
def build_weights(self):
output_dict = {}
for s in self.stock_arr:
output_dict[s.ticker] = s['PriceClose'] * s['ShareIssued']
marketcap_df = pd.DataFrame(output_dict).dropna()
weights_df = marketcap_df.apply(lambda x: normalize(x), axis = 1)
return weights_df
def generate_implied_returns(self):
implied_returns_dict = {}
for dt, var_cov_mat in self.var_covar_ts.items():
if dt in self.weights_shifted_df.index:
weigts_arr = self.weights_shifted_df.loc[dt, :]
implied_returns_arr = self.A*var_cov_mat.dot(weigts_arr)
implied_returns_dict[dt] = implied_returns_arr
implied_returns_df =
|
pd.DataFrame(implied_returns_dict)
|
pandas.DataFrame
|
import unittest
import pandas as pd
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_GCToo_logger
import cmapPy.pandasGEXpress.GCToo as GCToo
logger = logging.getLogger(setup_GCToo_logger.LOGGER_NAME)
class TestGctoo(unittest.TestCase):
def test_init(self):
# Create test data
meth_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
index=["A", "B"], columns=["a", "b", "c"])
cov_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
index=["A", "B"], columns=["a", "b", "c"])
row_metadata_df = pd.DataFrame([["rhd_A", "rhd_B"], ["rhd_C", "rhd_D"]],
index=["A", "B"], columns=["rhd1", "rhd2"])
col_metadata_df = pd.DataFrame(["chd_a", "chd_b", "chd_c"],
index=["a", "b", "c"], columns=["chd1"])
# happy path, no multi-index
my_gctoo1 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,
col_metadata_df=col_metadata_df)
self.assertTrue(my_gctoo1.multi_index_meth_df == None,
'Expected no multi-index DataFrame but found {}'.format(my_gctoo1.multi_index_meth_df))
# happy path, with multi-index
my_gctoo2 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,
col_metadata_df=col_metadata_df, make_multiindex = True)
# doesn't pass due to visibility of pd.core?
# self.assertTrue(isinstance(my_gctoo2.multi_index_meth_df.index, pd.core.index.MultiIndex),
# "Expected a multi_index DataFrame but instead found {}". format(my_gctoo2.multi_index_meth_df))
#happy path, no metadata provided
my_gctoo3 = GCToo.GCToo(meth_df, cov_df)
self.assertIsNotNone(my_gctoo3.row_metadata_df)
self.assertIsNotNone(my_gctoo3.col_metadata_df)
def test__setattr__(self):
# case 1: not init yet, should just run __init__
# Create test data
meth_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
index=["A", "B"], columns=["a", "b", "c"])
cov_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
index=["A", "B"], columns=["a", "b", "c"])
row_metadata_df = pd.DataFrame([["rhd_A", "rhd_B"], ["rhd_C", "rhd_D"]],
index=["A", "B"], columns=["rhd1", "rhd2"])
col_metadata_df = pd.DataFrame(["chd_a", "chd_b", "chd_c"],
index=["a", "b", "c"], columns=["chd1"])
## happy path, no multi-index
my_gctoo1 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,col_metadata_df=col_metadata_df)
## reset row_metadata_df: happy case
new_rid_order = ["B", "A"]
new_row_meta1 = my_gctoo1.row_metadata_df.copy().reindex(new_rid_order)
# shouldn't have any problems re-setting row_meta
my_gctoo1.row_metadata_df = new_row_meta1
pd.util.testing.assert_frame_equal(my_gctoo1.row_metadata_df, row_metadata_df)
## reset row_metadata_df: to not a DF
new_row_meta2 = "this is my new row metadata"
with self.assertRaises(Exception) as context:
my_gctoo1.row_metadata_df = new_row_meta2
self.assertTrue("expected Pandas DataFrame, got something else" in str(context.exception))
## reset row_metadata_df: non-matching index values
new_row_meta3 = my_gctoo1.row_metadata_df.copy()
new_row_meta3.index = ["thing1", "thing2"]
with self.assertRaises(Exception) as context:
my_gctoo1.row_metadata_df = new_row_meta3
self.assertTrue("The rids are inconsistent between data_df and row_metadata_df" in str(context.exception))
## reset row_metadata_df: not unique index values
new_row_meta4 = my_gctoo1.row_metadata_df.copy()
new_row_meta4.index = ["A", "A"]
with self.assertRaises(Exception) as context:
my_gctoo1.row_metadata_df = new_row_meta4
self.assertTrue("Index values must be unique" in str(context.exception))
my_gctoo2 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,
col_metadata_df=col_metadata_df)
## reset col_metadata_df: happy case
new_cid_order = ["c", "a", "b"]
new_col_meta1 = my_gctoo2.col_metadata_df.copy().reindex(new_cid_order)
# shouldn't have any problems
my_gctoo2.col_metadata_df = new_col_meta1
pd.util.testing.assert_frame_equal(my_gctoo2.col_metadata_df, col_metadata_df)
## reset col_metadata_df: to not a DF
new_col_meta2 = "this is my new col metadata"
with self.assertRaises(Exception) as context:
my_gctoo2.col_metadata_df = new_col_meta2
self.assertTrue("expected Pandas DataFrame, got something else" in str(context.exception))
## reset col_metadata_df: non-matching index values
new_col_meta3 = my_gctoo2.col_metadata_df.copy()
new_col_meta3.index = ["thing1", "thing2", "thing3"]
with self.assertRaises(Exception) as context:
my_gctoo2.col_metadata_df = new_col_meta3
self.assertTrue("The cids are inconsistent between data_df and col_metadata_df" in str(context.exception))
## reset col_metadata_df: not unique index values
new_col_meta4 = my_gctoo2.col_metadata_df.copy()
new_col_meta4.index = ["a", "b", "a"]
with self.assertRaises(Exception) as context:
my_gctoo2.col_metadata_df = new_col_meta4
self.assertTrue("Index values must be unique" in str(context.exception))
my_gctoo3 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,
col_metadata_df=col_metadata_df)
## reset data_df: happy case
new_data_df1_tmp_m = my_gctoo3.meth_df.copy().reindex(new_rid_order)
new_data_df1_tmp_c = my_gctoo3.cov_df.copy().reindex(new_rid_order)
new_data_df1_m = new_data_df1_tmp_m.reindex(columns=new_cid_order)
new_data_df1_c = new_data_df1_tmp_c.reindex(columns=new_cid_order)
# shouldn't have problems
my_gctoo3.meth_df = new_data_df1_m
my_gctoo3.cov_df = new_data_df1_c
# resetting data_df means rearranging the row and col meta dfs
pd.util.testing.assert_frame_equal(my_gctoo3.meth_df, new_data_df1_m)
pd.util.testing.assert_frame_equal(my_gctoo3.cov_df, new_data_df1_c)
print(my_gctoo3.col_metadata_df)
print(new_col_meta1)
print(my_gctoo3.row_metadata_df)
print(new_row_meta1)
pd.util.testing.assert_frame_equal(my_gctoo3.col_metadata_df, new_col_meta1)
pd.util.testing.assert_frame_equal(my_gctoo3.row_metadata_df, new_row_meta1)
## reset data_df: row_meta doesn't match
new_data_df2 = my_gctoo3.meth_df.copy()
new_data_df2.index = ["blah", "boop"]
with self.assertRaises(Exception) as context:
my_gctoo3.meth_df = new_data_df2
self.assertTrue("The rids are inconsistent between data_df and row_metadata_df" in str(context.exception))
## reset data_df: col_meta doesn't match
new_data_df3 = my_gctoo3.meth_df.copy()
new_data_df3.columns = ["x", "y", "z"]
with self.assertRaises(Exception) as context:
my_gctoo3.meth_df = new_data_df3
self.assertTrue("The cids are inconsistent between data_df and col_metadata_df" in str(context.exception))
my_gctoo4 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,
col_metadata_df=col_metadata_df, make_multiindex= True)
## try to reset multi-index (shouldn't work)
new_multi_index = my_gctoo4.multi_index_meth_df.copy()
with self.assertRaises(Exception) as context:
my_gctoo1.multi_index_meth_df = new_multi_index
self.assertTrue("Cannot reassign value of multi_index_meth_df attribute;" in str(context.exception))
## reset src
my_gctoo1.src = "other_src"
self.assertTrue(my_gctoo1.src == "other_src",
("src should just be re-set with object's set_attr method but doesn't appear to be") +
(" expected {} but found {}").format("other_src", my_gctoo1.src))
## reset version
my_gctoo1.version = "other_version"
self.assertTrue(my_gctoo1.version == "other_version",
("version should just be re-set with object's set_attr method but doesn't appear to be") +
("expected {} but found {}").format("other_version", my_gctoo1.version))
## needs rearrangement upon initializing
my_gctoo5 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df, col_metadata_df=new_col_meta1)
pd.util.testing.assert_frame_equal(my_gctoo5.col_metadata_df, col_metadata_df)
def test_check_df(self):
not_unique_data_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
index=["A", "B"], columns=["a", "b", "a"])
not_unique_rhd = pd.DataFrame([["rhd_A", "rhd_B"], ["rhd_C", "rhd_D"]],
index=["A", "B"], columns=["rhd1", "rhd1"])
# cids in data_df are not unique
with self.assertRaises(Exception) as context:
GCToo.GCToo(meth_df=not_unique_data_df,
row_metadata_df=pd.DataFrame(index=["A","B"]),
col_metadata_df=pd.DataFrame(index=["a","b","c"]))
print(str(not_unique_data_df.columns))
self.assertTrue(str(not_unique_data_df.columns) in str(context.exception))
# rhds are not unique in row_metadata_df
with self.assertRaises(Exception) as context:
GCToo.GCToo(meth_df=pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=["A","B"], columns=["a","b","c"]),
row_metadata_df=not_unique_rhd,
col_metadata_df=pd.DataFrame(index=["a","b","c"]))
self.assertTrue("'rhd1' 'rhd1'" in str(context.exception))
def test_assemble_multi_index_df(self):
# TODO: Add test of only row ids present as metadata
# TODO: Add test of only col ids present as metadata
g = GCToo.GCToo(meth_df = pd.DataFrame({10:range(13,16), 11:range(16,19), 12:range(19,22)}, index=range(4,7)),
cov_df = pd.DataFrame({10:range(13,16), 11:range(16,19), 12:range(19,22)}, index=range(4,7)),
row_metadata_df=pd.DataFrame({"a":range(3)}, index=range(4,7)),
col_metadata_df=pd.DataFrame({"b":range(7,10)}, index=range(10,13)),
make_multiindex = True)
assert "a" in g.multi_index_meth_df.index.names, g.multi_index_meth_df.index.names
assert "rid" in g.multi_index_meth_df.index.names, g.multi_index_meth_df.index.names
assert "b" in g.multi_index_meth_df.columns.names, g.multi_index_meth_df.columns.names
assert "cid" in g.multi_index_meth_df.columns.names, g.multi_index_meth_df.columns.names
r = g.multi_index_meth_df.xs(7, level="b", axis=1)
logger.debug("r: {}".format(r))
assert r.xs(4, level="rid", axis=0).values[0][0] == 13, r.xs(4, level="rid", axis=0).values[0][0]
assert r.xs(5, level="rid", axis=0).values[0][0] == 14, r.xs(5, level="rid", axis=0).values[0][0]
assert r.xs(6, level="rid", axis=0).values[0][0] == 15, r.xs(6, level="rid", axis=0).values[0][0]
def test_multi_index_df_to_component_dfs(self):
mi_df_index = pd.MultiIndex.from_arrays(
[["D", "E"], [-666, -666], ["dd", "ee"]],
names=["rid", "rhd1", "rhd2"])
mi_df_columns = pd.MultiIndex.from_arrays(
[["A", "B", "C"], [1, 2, 3], ["Z", "Y", "X"]],
names=["cid", "chd1", "chd2"])
mi_df = pd.DataFrame(
[[1, 3, 5], [7, 11, 13]],
index=mi_df_index, columns=mi_df_columns)
e_row_metadata_df = pd.DataFrame(
[[-666, "dd"], [-666, "ee"]],
index=pd.Index(["D", "E"], name="rid"),
columns=pd.Index(["rhd1", "rhd2"], name="rhd"))
e_col_metadata_df = pd.DataFrame(
[[1, "Z"], [2, "Y"], [3, "X"]],
index=pd.Index(["A", "B", "C"], name="cid"),
columns=pd.Index(["chd1", "chd2"], name="chd"))
e_data_df = pd.DataFrame(
[[1, 3, 5], [7, 11, 13]],
index=pd.Index(["D", "E"], name="rid"),
columns=pd.Index(["A", "B", "C"], name="cid"))
(data_df, row_df, col_df) = GCToo.multi_index_df_to_component_dfs(mi_df)
self.assertTrue(col_df.equals(e_col_metadata_df))
self.assertTrue(row_df.equals(e_row_metadata_df))
self.assertTrue(data_df.equals(e_data_df))
# edge case: if the index (or column) of the multi-index has only one
# level, it becomes a regular index
mi_df_index_plain = pd.MultiIndex.from_arrays(
[["D", "E"]], names=["rid"])
mi_df2 = pd.DataFrame(
[[1, 3, 5], [7, 11, 13]],
index=mi_df_index_plain, columns=mi_df_columns)
# row df should be empty
e_row_df2 =
|
pd.DataFrame(index=["D", "E"])
|
pandas.DataFrame
|
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
def test_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.round(freq)
def test_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.min.ceil("s")
expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.max.floor("s")
expected = Timedelta.max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
)
def test_round_sanity(self, method, n, request):
val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
td = Timedelta(val)
assert method(td, "ns") == td
res = method(td, "us")
nanos = 1000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "ms")
nanos = 1_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "s")
nanos = 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "min")
nanos = 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = Timedelta(10, unit="d")
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
assert Timedelta("10") == np.timedelta64(10, "ns")
assert Timedelta("10ns") == np.timedelta64(10, "ns")
assert Timedelta("100") == np.timedelta64(100, "ns")
assert Timedelta("100ns") == np.timedelta64(100, "ns")
assert Timedelta("1000") == np.timedelta64(1000, "ns")
assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
assert Timedelta("10us") == np.timedelta64(10000, "ns")
assert Timedelta("100us") == np.timedelta64(100000, "ns")
assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns")
assert Timedelta("1d") == conv(np.timedelta64(1, "D"))
assert Timedelta("-1d") == -conv(np.timedelta64(1, "D"))
assert Timedelta("1D") == conv(np.timedelta64(1, "D"))
assert Timedelta("10D") == conv(np.timedelta64(10, "D"))
assert Timedelta("100D") == conv(np.timedelta64(100, "D"))
assert Timedelta("1000D") == conv(np.timedelta64(1000, "D"))
assert Timedelta("10000D") == conv(np.timedelta64(10000, "D"))
# space
assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D"))
assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D"))
# invalid
msg = "invalid unit abbreviation"
with pytest.raises(ValueError, match=msg):
Timedelta("1foo")
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
Timedelta("foo")
def test_full_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
d1 = np.timedelta64(1, "D")
assert Timedelta("1days") == conv(d1)
assert Timedelta("1days,") == conv(d1)
assert Timedelta("- 1days,") == -conv(d1)
assert Timedelta("00:00:01") == conv(np.timedelta64(1, "s"))
assert Timedelta("06:00:01") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.0") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.01") == conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
assert Timedelta("- 1days, 00:00:01") == conv(-d1 + np.timedelta64(1, "s"))
assert Timedelta("1days, 06:00:01") == conv(
d1 + np.timedelta64(6 * 3600 + 1, "s")
)
assert Timedelta("1days, 06:00:01.01") == conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
# invalid
msg = "have leftover units"
with pytest.raises(ValueError, match=msg):
Timedelta("- 1days, 00")
def test_pickle(self):
v = Timedelta("1 days 10:11:12.0123456")
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, "D")
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = [Timedelta(seconds=1) + Timedelta(days=n) for n in range(20)]
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, "ns")
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == iNaT + 1
assert max_td.value == lib.i8max
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, "ns")) is NaT
msg = "int too (large|big) to convert"
with pytest.raises(OverflowError, match=msg):
min_td - Timedelta(2, "ns")
with pytest.raises(OverflowError, match=msg):
max_td + Timedelta(1, "ns")
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, "ns")
assert td is NaT
with pytest.raises(OverflowError, match=msg):
Timedelta(min_td.value - 2, "ns")
with pytest.raises(OverflowError, match=msg):
Timedelta(max_td.value + 1, "ns")
def test_total_seconds_precision(self):
# GH 19458
assert Timedelta("30S").total_seconds() == 30.0
assert Timedelta("0").total_seconds() == 0.0
assert Timedelta("-2S").total_seconds() == -2.0
assert Timedelta("5.324S").total_seconds() == 5.324
assert (
|
Timedelta("30S")
|
pandas.Timedelta
|
"""
This module contains the Connector class.
Every data fetching action should begin with instantiating this Connector class.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from jinja2 import Environment, StrictUndefined, Template
from requests import Request, Response, Session
from ..errors import UnreachableError
from .config_manager import config_directory, ensure_config
from .errors import RequestError
from .implicit_database import ImplicitDatabase, ImplicitTable
INFO_TEMPLATE = Template(
"""{% for tb in tbs.keys() %}
Table {{dbname}}.{{tb}}
Parameters
----------
{% if tbs[tb].required_params %}{{", ".join(tbs[tb].required_params)}} required {% endif %}
{% if tbs[tb].optional_params %}{{", ".join(tbs[tb].optional_params)}} optional {% endif %}
Examples
--------
>>> dc.query({{", ".join(["\\\"{}\\\"".format(tb)] + tbs[tb].joined_query_fields)}})
>>> dc.show_schema("{{tb}}")
{% endfor %}
"""
)
class Connector:
"""
This is the main class of data_connector component.
Initialize Connector class as the example code.
Parameters
----------
config_path
The path to the config. It can be hosted, e.g. "yelp", or from
local filesystem, e.g. "./yelp"
auth_params
The parameter for authentication, e.g. OAuth2
kwargs
Additional parameters
Example
-------
>>> from dataprep.data_connector import Connector
>>> dc = Connector("yelp", auth_params={"access_token":access_token})
"""
_impdb: ImplicitDatabase
_vars: Dict[str, Any]
_auth_params: Dict[str, Any]
_session: Session
_jenv: Environment
def __init__(
self,
config_path: str,
auth_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
self._session = Session()
if (
config_path.startswith(".")
or config_path.startswith("/")
or config_path.startswith("~")
):
path = Path(config_path).resolve()
else:
# From Github!
ensure_config(config_path)
path = config_directory() / config_path
self._impdb = ImplicitDatabase(path)
self._vars = kwargs
self._auth_params = auth_params or {}
self._jenv = Environment(undefined=StrictUndefined)
def _fetch(
self,
table: ImplicitTable,
auth_params: Optional[Dict[str, Any]],
kwargs: Dict[str, Any],
) -> Response:
method = table.method
url = table.url
req_data: Dict[str, Dict[str, Any]] = {
"headers": {},
"params": {},
"cookies": {},
}
merged_vars = {**self._vars, **kwargs}
if table.authorization is not None:
table.authorization.build(req_data, auth_params or self._auth_params)
for key in ["headers", "params", "cookies"]:
if getattr(table, key) is not None:
instantiated_fields = getattr(table, key).populate(
self._jenv, merged_vars
)
req_data[key].update(**instantiated_fields)
if table.body is not None:
# TODO: do we support binary body?
instantiated_fields = table.body.populate(self._jenv, merged_vars)
if table.body_ctype == "application/x-www-form-urlencoded":
req_data["data"] = instantiated_fields
elif table.body_ctype == "application/json":
req_data["json"] = instantiated_fields
else:
raise UnreachableError
resp: Response = self._session.send( # type: ignore
Request(
method=method,
url=url,
headers=req_data["headers"],
params=req_data["params"],
json=req_data.get("json"),
data=req_data.get("data"),
cookies=req_data["cookies"],
).prepare()
)
if resp.status_code != 200:
raise RequestError(status_code=resp.status_code, message=resp.text)
return resp
@property
def table_names(self) -> List[str]:
"""
Return all the names of the available tables in a list.
Note
----
We abstract each website as a database containing several tables.
For example in Spotify, we have artist and album table.
"""
return list(self._impdb.tables.keys())
def info(self) -> None:
"""
Show the basic information and provide guidance for users to issue queries.
"""
# get info
tbs: Dict[str, Any] = {}
for cur_table in self._impdb.tables:
table_config_content = self._impdb.tables[cur_table].config
params_required = []
params_optional = []
example_query_fields = []
count = 1
for k, val in table_config_content["request"]["params"].items():
if isinstance(val, bool) and val:
params_required.append(k)
example_query_fields.append(f"""{k}="word{count}\"""")
count += 1
elif isinstance(val, bool):
params_optional.append(k)
tbs[cur_table] = {}
tbs[cur_table]["required_params"] = params_required
tbs[cur_table]["optional_params"] = params_optional
tbs[cur_table]["joined_query_fields"] = example_query_fields
# show table info
print(
INFO_TEMPLATE.render(
ntables=len(self.table_names), dbname=self._impdb.name, tbs=tbs
)
)
def show_schema(self, table_name: str) -> pd.DataFrame:
"""
This method shows the schema of the table that will be returned,
so that the user knows what information to expect.
Parameters
----------
table_name
The table name.
Returns
-------
pd.DataFrame
The returned data's schema.
Note
----
The schema is defined in the configuration file.
The user can either use the default one or change it by editing the configuration file.
"""
print(f"table: {table_name}")
table_config_content = self._impdb.tables[table_name].config
schema = table_config_content["response"]["schema"]
new_schema_dict: Dict[str, List[Any]] = {}
new_schema_dict["column_name"] = []
new_schema_dict["data_type"] = []
for k in schema.keys():
new_schema_dict["column_name"].append(k)
new_schema_dict["data_type"].append(schema[k]["type"])
return
|
pd.DataFrame.from_dict(new_schema_dict)
|
pandas.DataFrame.from_dict
|
import argparse
import pandas as pd
import numpy as np
import seaborn as sns
from datetime import datetime
from itertools import cycle
from collections import Counter
import os
import pickle
import re
import sys
import function
def figsize_column(scale, height_ratio=1.0):
fig_width_pt = 433 # Get this from LaTeX using \the\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * golden_mean * height_ratio # height in inches
fig_size = [fig_width, fig_height]
return fig_size
def figsize_text(scale, height_ratio=1.0):
fig_width_pt = 433 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * golden_mean * height_ratio # height in inches
fig_size = [fig_width, fig_height]
return fig_size
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 9,
"font.size": 9,
"legend.fontsize": 9,
"xtick.labelsize": 9,
"ytick.labelsize": 9,
"figure.figsize": figsize_column(1.0), # default fig size of 0.9 textwidth
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
]
}
sns.set_style("whitegrid", pgf_with_latex)
sns.set_context("paper")
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
LABELS = {
'strategy': 'Strategy',
'pressure_max': 'Max. Affinity Pressure',
'instance': 'Instance',
'first_assigned': 'First Assigned',
'rotation': 'Full Rotations',
'fullrotations': 'Rotational Diversity',
'rel_profit': 'Profit (% of FOP)',
'nb_agents': 'Agents',
'nb_tasks': 'Tasks',
'agentavail': 'Avail. Agents',
'taskavail': 'Avail. Tasks'
}
STRATEGY_LONG = {
'profit': 'Profit',
'affinity': 'Affinity',
'switch3': 'Switch@3',
'switch2': 'Switch@2',
'wppshared': 'WPP/s',
'wppind': 'WPP',
'productcomb': 'Product Combination'
}
STRATEGY_SHORT = {
'profit': 'FOP',
'affinity': 'FOA',
'switch1': 'OS/1',
'switch2': 'OS/2',
'switch3': 'OS/3',
'switch4': 'OS/4',
'switch10': 'OS/10',
'switch20': 'OS/20',
'switch30': 'OS/30',
'switch40': 'OS/40',
'wppshared': 'WPP/s',
'wppind': 'WPP',
'productcomb': 'PC',
'profit-limit': 'FOP',
'affinity-limit': 'FOA',
'switch1-limit': 'OS/1',
'switch2-limit': 'OS/2',
'switch3-limit': 'OS/3',
'switch4-limit': 'OS/4',
'switch10-limit': 'OS/10',
'switch20-limit': 'OS/20',
'switch30-limit': 'OS/30',
'switch40-limit': 'OS/40',
'wppshared-limit': 'WPP/s',
'wppind-limit': 'WPP',
'productcomb-limit': 'PC'
}
FULL_LABELS = {**LABELS, **STRATEGY_LONG}
instance_cache = {}
def read_results(log_filename, ignore_errors=False):
try:
assign_filename = log_filename.replace('_log.csv', '_assignment.csv')
instance = load_instance(log_filename)
log_df = read_csv(log_filename, ignore_errors)
assign_df = read_csv(assign_filename, ignore_errors)
stats_df = assignment_statistics(assign_df, instance)
nb_tasks = len(instance['tasks'])
nb_agents = len(instance['agents'])
agentavail = instance['aa_perc']
taskavail = instance['ta_perc']
log_df['nb_tasks'] = nb_tasks
log_df['nb_agents'] = nb_agents
log_df['agentavail'] = agentavail
log_df['taskavail'] = taskavail
assign_df['nb_tasks'] = nb_tasks
assign_df['nb_agents'] = nb_agents
assign_df['agentavail'] = agentavail
assign_df['taskavail'] = taskavail
stats_df['nb_tasks'] = nb_tasks
stats_df['nb_agents'] = nb_agents
stats_df['agentavail'] = agentavail
stats_df['taskavail'] = taskavail
return log_df, assign_df, stats_df
except:
print(log_filename)
raise
def read_csv(filename, ignore_errors=False):
try:
return pd.read_csv(filename, sep=';')
except Exception as e:
print(filename, e)
if not ignore_errors:
raise
def identify_ignored_tasks(no_tasks, outfiles):
executions = {i: [] for i in range(1, no_tasks + 1)}
for of in sorted(outfiles):
cycleid = int(of.rsplit('_', 2)[-2])
_, _, _, _, assignments = function.load_instance(of)
for assigned in assignments.values():
for taskid in assigned:
executions[taskid].append(cycleid)
ignored_tasks = [taskid for taskid, execs in executions.items() if len(execs) == 0]
return ignored_tasks
def limitation(df, ax=None):
gdf = df[['instance', 'assigned', 'utilization']].groupby('instance', as_index=False).max()
task_limited = gdf[gdf.assigned > 0.99]
agent_limited = gdf[gdf.utilization > 0.99]
print('Task-limited: %d (%.2f)\n' % (len(task_limited), len(task_limited) / len(gdf) * 100))
print('Agent-limited: %d (%.2f)\n' % (len(agent_limited), len(agent_limited) / len(gdf) * 100))
print(gdf)
def assignment_statistics(df, instance):
task_columns = df.columns[~df.columns.isin(['instance', 'strategy', 'cycle', 'run'])]
tdf = df[task_columns]
# Cycle in which the task was first assigned to any agent; does not consider unavailability
first_assigned = tdf.apply(lambda s: df.cycle[s.gt(0).idxmax()] if s.gt(0).any() else np.nan) # - s.eq(-1).sum())
# first_assigned /= tdf.shape[0]
nb_actual_assignments = (tdf > 0).sum()
nb_possible_assignments = (tdf >= 0).sum()
nb_seen_agents = tdf[tdf > 0].nunique()
wm = instance['weight_mat']
nb_comp_agents = (wm[:, 1:] > 0).sum(axis=1)
# DF which relative frequencies of assign. between tasks and agents
# Rows: Agents (-1: Task unavailable, 0: Unassigned), Columns: Tasks
# NaN: Assignment not possible
freq = pd.DataFrame(index=np.arange(-1, wm.shape[1]))
freq = freq.join(tdf.apply(lambda x: x.value_counts(normalize=True)))
wm2 = np.zeros((wm.shape[1] + 1, wm.shape[0]))
wm2[1:, :] = wm.T
freq[freq.isnull() & (wm2 > 0)] = 0
# How even are tasks distributed? -> mean absolute deviation of assignment distribution
stddev = freq[freq.index > 0].mad()
stddev[stddev.isnull()] = 0
# Max./Avg./Min. no. of cycles between assignment to the same agent
cycle_stats = tdf.apply(lambda s: pd.Series(assignment_cycle_stats(s)))
cycle_stats = cycle_stats.T
assert (cycle_stats.shape == (len(task_columns), 3))
# Full Rotations: Min. no. of assignments to one agent
def fullrotations(s):
sf = s[s > 0]
if len(sf) > 0:
return pd.Series(Counter(sf).most_common()[-1][1])
else:
return
|
pd.Series(0)
|
pandas.Series
|
import os
import csv
import shutil
import hashlib
import tempfile
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem, MACCSkeys
from rdkit.Chem import MolFromSmiles
from padelpy import padeldescriptor # required to calculate KlekotaRothFingerPrint
from metstab_shap.config import csv_section, utils_section
DATA = 'DATA'
test = 'test'
def load_data(data_config, fingerprint, morgan_nbits=None):
datasets = []
indices = []
this_start = 0
for path in sorted(data_config[DATA].values()):
x, y, smiles = preprocess_dataset(path=path, data_config=data_config,
fingerprint=fingerprint, morgan_nbits=morgan_nbits)
datasets.append((x, y, smiles))
indices.append((this_start, this_start+len(y)))
this_start += len(y)
x = np.vstack([el[0] for el in datasets])
y = np.hstack([el[1] for el in datasets])
smiles = np.hstack([el[2] for el in datasets])
cv_split = get_cv_split(indices)
# test set
test_x, test_y, test_smiles = preprocess_dataset(path=data_config[utils_section][test],
data_config=data_config,
fingerprint=fingerprint,
morgan_nbits=morgan_nbits)
return x, y, cv_split, test_x, test_y, smiles, test_smiles
def load_data_from_df(dataset_paths, smiles_index, y_index, skip_line=False, delimiter=',', scale=None, average=None):
"""
Load multiple files from csvs, concatenate and return smiles and ys
:param dataset_paths: list: paths to csv files with data
:param smiles_index: int: index of the column with smiles
:param y_index: int: index of the column with the label
:param skip_line: boolean: True if the first line of the file contains column names, False otherwise
:param delimiter: delimeter used in csv
:param scale: should y be scaled? (useful with skewed distributions of y)
:param average: if the same SMILES appears multiple times how should its values be averaged?
:return: (smiles, labels) - np.arrays
"""
# column names present in files?
header = 0 if skip_line else None
# load all files
dfs = []
for data_path in dataset_paths:
dfs.append(pd.read_csv(data_path, delimiter=delimiter, header=header))
# merge
data_df = pd.concat(dfs)
# scaling ys
if scale is not None:
if 'sqrt' == scale.lower().strip():
data_df.iloc[:, y_index] = np.sqrt(data_df.iloc[:, y_index])
elif 'log' == scale.lower().strip():
data_df.iloc[:, y_index] = np.log(1 + data_df.iloc[:, y_index])
else:
raise NotImplementedError(f"Scale {scale} is not implemented.")
# averaging when one smiles has multiple values
if average is not None:
smiles_col = data_df.iloc[:, smiles_index].name
y_col = data_df.iloc[:, y_index].name
data_df = data_df.loc[:, [smiles_col, y_col]] # since now: smiles is 0, y_col is 1, dropping other columns
smiles_index = 0
y_index = 1
if 'median' == average.lower().strip():
data_df[y_col] = data_df[y_col].groupby(data_df[smiles_col]).transform('median')
else:
raise NotImplementedError(f"Averaging {average} is not implemented.")
# breaking into x and y
data_df = data_df.values
data_x = data_df[:, smiles_index]
data_y = data_df[:, y_index]
if data_y.dtype == np.float64:
data_y = data_y.astype(np.float32)
return data_x, data_y
def preprocess_dataset(path, data_config, fingerprint, morgan_nbits=None):
"""Calculate representation for each smiles in the dataset."""
if fingerprint == 'morgan':
assert morgan_nbits is not None, 'Parameter `morgan_nbits` must be set when using Morgan fingerprint.'
smiles, labels = load_data_from_df([path,], **data_config[csv_section])
x = []
y = []
calculated_smiles = []
# we go smiles by smiles because some compounds make rdkit throw errors
for this_smiles, this_label in zip(smiles, labels):
try:
mol = Chem.MolFromSmiles(this_smiles)
if fingerprint == 'morgan':
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 6, nBits=morgan_nbits)
fp = [int(i) for i in fp.ToBitString()]
elif fingerprint == 'maccs':
fp = MACCSkeys.GenMACCSKeys(mol)
fp = np.array(fp)[1:] # index 0 is unset
elif fingerprint == 'krfp':
fp = krfp(this_smiles)
else:
pass # unknown fingerprint
x.append(fp)
y.append(this_label)
calculated_smiles.append(this_smiles)
except Exception as e:
print('exp', e)
return np.array(x), np.array(y), calculated_smiles
def krfp(smi):
"""Calculate Klekota-Roth fingerprint using padelpy."""
# Warning: as this function uses padel it requires descriptors.xml to be
# in the running directory and have KlekotaRothFingerprinter set to true
# we don't want to copy and remove the descriptors.xml file for each smiles
# separately, so we check if it exists and if it has the proper content
cwd = os.getcwd()
descriptors_filename = 'descriptors.xml'
descriptors_hash = 'f6145f57ff346599b907b044316c4e71'
try:
with open(os.path.join(cwd, descriptors_filename), 'r') as desc_file:
desc_file_content = desc_file.read()
m = hashlib.md5()
m.update(desc_file_content.encode('utf-8'))
if m.hexdigest() == descriptors_hash:
pass # descriptors.xml exists and has the right content
else:
# the file exists but it has a wrong content
raise RuntimeError("The descriptors.xml was found in the running directory but its content doesn't match the prototype content. Aborting.")
except FileNotFoundError:
# the file doesn't exist, we have to create it
src_directory = os.path.dirname(os.path.realpath(__file__))
shutil.copyfile(os.path.join(src_directory, descriptors_filename),
os.path.join(cwd, descriptors_filename))
# # #
# # # descriptors.xml exists and looks good, we can continue with calculating the representation
# on prometheus we use SCRATCH, everywhere else the default location is fine
with tempfile.TemporaryDirectory(dir=os.getenv('SCRATCH', None)) as tmpdirname:
smi_file = os.path.join(tmpdirname, "molecules.smi")
with open(smi_file, 'w') as sf:
sf.write(smi)
out = os.path.join(tmpdirname, "out.csv")
padeldescriptor(mol_dir=smi_file, d_file=out, fingerprints=True, retainorder=True)
fp =
|
pd.read_csv(out)
|
pandas.read_csv
|
import os
import numpy as np
import pandas as pd
import scipy.special
import uproot
import oyaml as yaml
from . import geometry
__all__ = [
"create_hdf_from_root",
"convert",
"convert_table_to_fixed",
"basic_query",
"basic_eval",
"rename_column",
"object_cross_cleaning",
"shift_2dvector",
"object_cross_dphi",
"mindphi",
"weight_sigmoid",
"object_groupby",
"histogram",
]
def create_hdf_from_root(path, *cfgs):
for cfg in cfgs:
outpath = os.path.join(
cfg["output"]["direc"],
os.path.splitext(os.path.basename(path))[0]+".h5",
)
if os.path.exists(outpath):
os.remove(outpath)
for cfg in cfgs:
with open(cfg["dataset"]["cfg"], 'r') as f:
dataset_cfg = yaml.safe_load(f)["datasets"]
# find cfg for current path
path_cfg = None
for dataset in dataset_cfg:
if path in dataset["files"]:
path_cfg = dataset
break
outpath = os.path.join(
cfg["output"]["direc"],
os.path.splitext(os.path.basename(path))[0]+".h5",
)
for df in uproot.pandas.iterate(path, cfg["tree"], **cfg["iterate_kwargs"]):
df = df.astype(cfg.get("dtypes", {}))
for key in cfg["dataset"]["keys"]:
df[key] = path_cfg[key]
df.to_hdf(
outpath, cfg["output"]["tree"],
format='table', append=True,
complib='zlib', complevel=9,
)
def convert(path, trees, outdir, kwargs):
for tree in trees:
new_path = os.path.join(
outdir, os.path.basename(path),
)
pd.read_hdf(path, tree).to_hdf(
new_path, tree,
**kwargs,
)
def convert_table_to_fixed(path, *tables):
"""Simply read in a dataframe and output as a fixed table for quicker IO"""
with pd.HDFStore(path) as store:
for table in tables:
if table in store:
df = store.select(table)
# event index and/or object ID is saved in columns. Keep the dataframe
# index as a separate thing. Unsure the best way to go, but the output
# is a different file to the input right now
df.reset_index(drop=True).to_hdf(
path.replace(".h5", "_v2.h5"), table, format='fixed',
append=False, complib='zlib', complevel=9,
)
def basic_query(path, *cfgs):
"""Apply a query string to a dataframe and output into the same file"""
for cfg in cfgs:
df = (
pd.read_hdf(path, cfg["input"])
.query(cfg["query"])
.reset_index(drop=True)
)
# Reset object_id for tables with multiple parent_event rows
if "object_id" in df.columns:
df["object_id"] = df.groupby("parent_event", sort=False).cumcount()
df.to_hdf(
path, cfg["output"], format='fixed', append=False,
complib='zlib', complevel=9,
)
def basic_eval(path, *cfgs):
for cfg in cfgs:
df = pd.read_hdf(path, cfg["input"])
for eval_str in cfg["evals"]:
df.eval(eval_str, inplace=True)
df.to_hdf(
path, cfg["output"], format='fixed', append=False,
complib='zlib', complevel=9,
)
def rename_column(path, *cfgs):
for cfg in cfgs:
df = (
pd.read_hdf(path, cfg["input"])
.rename(columns=cfg["rename"])
)
df.to_hdf(
path, cfg["output"], format='fixed', append=False,
complib='zlib', complevel=9,
)
def drop_column(path, *cfgs):
for cfg in cfgs:
df = (
pd.read_hdf(path, cfg["input"])
.drop(cfg["drop"], axis=1)
)
df.to_hdf(
path, cfg["output"], format='fixed', append=False,
complib='zlib', complevel=9,
)
def object_cross_cleaning(path, *cfgs):
"""
Remove objects from the input collection which match to any objects in any
of the reference collections through a distance match in eta-phi space with
a configurable cut.
"""
for cfg in cfgs:
# Retain the original dataframe for writing the skimmed version out
df_orig = pd.read_hdf(path, cfg["input"]["table"])
df = df_orig[[
"parent_event", "object_id",
cfg["input"]["table"].format("eta"),
cfg["input"]["table"].format("phi"),
]].sort_values(["parent_event", "object_id"]).copy(deep=True)
df.columns = ["parent_event", "object_id1", "eta1", "phi1"]
df["matched"] = False
for cfg_ref in cfg["references"]:
df_ref = pd.read_hdf(path, cfg_ref["table"])
df_ref = df_ref.query(cfg_ref["query"])[[
"parent_event", "object_id",
cfg_ref["variable"].format("eta"),
cfg_ref["variable"].format("phi"),
]].sort_values(["parent_event", "object_id"])
df_ref.columns = ["parent_event", "object_id2", "eta2", "phi2"]
df_cross = df.merge(df_ref, on='parent_event', how='left')
dr = geometry.deltar(
df_cross.eval("eta1-eta2").values,
df_cross.eval("phi1-phi2").values,
)
# Remove warnings by making sure nans fail. I.e. inf < 0.4 is always
# False
dr[np.isnan(dr)] = np.inf
df_cross["matched"] = (dr < cfg["distance"])
# Default set to no match (False) and then take the logical OR with
# any matched object of this particular reference
df["matched"] = df["matched"] | (
df_cross.groupby(["parent_event", "object_id1"], sort=False)["matched"]
.any().values
)
df_orig[cfg["output"]["variable"]] = (~df["matched"])
df_orig.to_hdf(
path, cfg["output"]["table"], format='fixed', append=False,
complib='zlib', complevel=9,
)
def shift_2dvector(path, **kwargs):
# Save original for output
df_orig = pd.read_hdf(path, kwargs["input"])
for cfg in kwargs["args"]:
df = df_orig[[
cfg["variables"]["pt"], cfg["variables"]["phi"],
]].copy(deep=True)
df.columns = ["pt", "phi"]
px, py = geometry.radial_to_cartesian2d(
df["pt"].values, df["phi"].values,
)
for cfg_shift in cfg["shifters"]:
df_shift =
|
pd.read_hdf(path, cfg_shift["table"]["name"])
|
pandas.read_hdf
|
import pandas as pd
df1 = pd.read_csv('/tigress/np5/all_df.csv', index_col=0)
df2 = pd.read_csv('/tigress/np5/all_df_params.csv', index_col=0)
df3 = pd.read_csv('/tigress/np5/df_fit_params.csv', index_col=0)
df =
|
pd.merge(df1, df2, left_index=True, right_index=True, how='inner')
|
pandas.merge
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# 3/28/2013
# by nthmost (<EMAIL>)
import pandas
from bitstring import ReadError
from .base_parser_class import InteropBinParser
class InteropControlMetrics(InteropBinParser):
__version = 0.1
supported_versions = [1]
codename = 'control'
def _init_variables(self):
self.data = { 'lane': [],
'tile': [],
'read': [],
'control_str': [],
'index_str': [],
'clusters': []
}
def parse_binary(self):
bs = self.bs
# Control Metrics (ControlMetricsOut.bin)
# Contains pull out information for Illumina in-line sample controls
# Format:
# byte 0: file version number (1) bytes (variable length): record:
# 2 bytes: lane number (uint16)
# 2 bytes: tile number (uint16)
# 2 bytes: read number (uint16)
# 2 bytes: number bytes X for control name(uint16)
# X bytes: control name string (string in UTF8Encoding)
# 2 bytes: number bytes Y for index name(uint16)
# Y bytes: index name string (string in UTF8Encoding)
# 4 bytes: num of clusters identified as control (uint32)
self.apparent_file_version = bs.read('uintle:8') # version number of binary
self.check_version(self.apparent_file_version)
try:
while True:
self.data['lane'].append(bs.read('uintle:16'))
self.data['tile'].append(bs.read('uintle:16'))
self.data['read'].append(bs.read('uintle:16'))
# next 2 bytes: expected control name length in bytes.
nextbytes = bs.read('uintle:16')
self.data['control_str'].append(bs.read('bytes:%i' % (nextbytes)))
# next 2 bytes: expected index name length in bytes.
nextbytes = bs.read('uintle:16')
self.data['index_str'].append(bs.read('bytes:%i' % (nextbytes)))
self.data['clusters'].append(bs.read('uintle:32'))
except ReadError:
pass
self.df =
|
pandas.DataFrame(self.data)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 17:28:04 2020
@author: shlomi
"""
from PW_paths import work_yuval
from matplotlib import rcParams
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from PW_paths import savefig_path
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from PW_stations import produce_geo_gnss_solved_stations
tela_results_path = work_yuval / 'GNSS_stations/tela/rinex/30hr/results'
tela_solutions = work_yuval / 'GNSS_stations/tela/gipsyx_solutions'
sound_path = work_yuval / 'sounding'
phys_soundings = sound_path / 'bet_dagan_phys_sounding_2007-2019.nc'
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
dem_path = work_yuval / 'AW3D30'
era5_path = work_yuval / 'ERA5'
hydro_path = work_yuval / 'hydro'
ceil_path = work_yuval / 'ceilometers'
aero_path = work_yuval / 'AERONET'
climate_path = work_yuval / 'climate'
df_gnss = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
st_order_climate = [x for x in df_gnss.dropna().sort_values(
['groups_climate', 'lat', 'lon'], ascending=[1, 0, 0]).index]
rc = {
'font.family': 'serif',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large'}
for key, val in rc.items():
rcParams[key] = val
# sns.set(rc=rc, style='white')
seasonal_colors = {'DJF': 'tab:blue',
'SON': 'tab:red',
'JJA': 'tab:green',
'MAM': 'tab:orange',
'Annual': 'tab:purple'}
def get_twin(ax, axis):
assert axis in ("x", "y")
siblings = getattr(ax, f"get_shared_{axis}_axes")().get_siblings(ax)
for sibling in siblings:
if sibling.bbox.bounds == ax.bbox.bounds and sibling is not ax:
return sibling
return None
def sci_notation(num, decimal_digits=1, precision=None, exponent=None):
"""
Returns a string representation of the scientific
notation of the given number formatted for use with
LaTeX or Mathtext, with specified number of significant
decimal digits and precision (number of decimal digits
to show). The exponent to be used can also be specified
explicitly.
"""
from math import floor, log10
if exponent is None:
exponent = int(floor(log10(abs(num))))
coeff = round(num / float(10**exponent), decimal_digits)
if precision is None:
precision = decimal_digits
return r"${0:.{2}f}\cdot10^{{{1:d}}}$".format(coeff, exponent, precision)
def utm_from_lon(lon):
"""
utm_from_lon - UTM zone for a longitude
Not right for some polar regions (Norway, Svalbard, Antartica)
:param float lon: longitude
:return: UTM zone number
:rtype: int
"""
from math import floor
return floor((lon + 180) / 6) + 1
def scale_bar(ax, proj, length, location=(0.5, 0.05), linewidth=3,
units='km', m_per_unit=1000, bounds=None):
"""
http://stackoverflow.com/a/35705477/1072212
ax is the axes to draw the scalebar on.
proj is the projection the axes are in
location is center of the scalebar in axis coordinates ie. 0.5 is the middle of the plot
length is the length of the scalebar in km.
linewidth is the thickness of the scalebar.
units is the name of the unit
m_per_unit is the number of meters in a unit
"""
import cartopy.crs as ccrs
from matplotlib import patheffects
# find lat/lon center to find best UTM zone
try:
x0, x1, y0, y1 = ax.get_extent(proj.as_geodetic())
except AttributeError:
if bounds is not None:
x0, x1, y0, y1 = bounds
# Projection in metres
utm = ccrs.UTM(utm_from_lon((x0+x1)/2))
# Get the extent of the plotted area in coordinates in metres
x0, x1, y0, y1 = ax.get_extent(utm)
# Turn the specified scalebar location into coordinates in metres
sbcx, sbcy = x0 + (x1 - x0) * location[0], y0 + (y1 - y0) * location[1]
# Generate the x coordinate for the ends of the scalebar
bar_xs = [sbcx - length * m_per_unit/2, sbcx + length * m_per_unit/2]
# buffer for scalebar
buffer = [patheffects.withStroke(linewidth=5, foreground="w")]
# Plot the scalebar with buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, path_effects=buffer)
# buffer for text
buffer = [patheffects.withStroke(linewidth=3, foreground="w")]
# Plot the scalebar label
t0 = ax.text(sbcx, sbcy, str(length) + ' ' + units, transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
left = x0+(x1-x0)*0.05
# Plot the N arrow
t1 = ax.text(left, sbcy, u'\u25B2\nN', transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
# Plot the scalebar without buffer, in case covered by text buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, zorder=3)
return
@ticker.FuncFormatter
def lon_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$W'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$E'.format(abs(x))
elif x == 0:
return r'0$\degree$'
@ticker.FuncFormatter
def lat_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$S'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$N'.format(abs(x))
elif x == 0:
return r'0$\degree$'
def align_yaxis_np(ax1, ax2):
"""Align zeros of the two axes, zooming them out by same ratio"""
import numpy as np
axes = np.array([ax1, ax2])
extrema = np.array([ax.get_ylim() for ax in axes])
tops = extrema[:,1] / (extrema[:,1] - extrema[:,0])
# Ensure that plots (intervals) are ordered bottom to top:
if tops[0] > tops[1]:
axes, extrema, tops = [a[::-1] for a in (axes, extrema, tops)]
# How much would the plot overflow if we kept current zoom levels?
tot_span = tops[1] + 1 - tops[0]
extrema[0,1] = extrema[0,0] + tot_span * (extrema[0,1] - extrema[0,0])
extrema[1,0] = extrema[1,1] + tot_span * (extrema[1,0] - extrema[1,1])
[axes[i].set_ylim(*extrema[i]) for i in range(2)]
# def align_yaxis(ax1, v1, ax2, v2):
# """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# _, y1 = ax1.transData.transform((0, v1))
# _, y2 = ax2.transData.transform((0, v2))
# inv = ax2.transData.inverted()
# _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
# miny, maxy = ax2.get_ylim()
# ax2.set_ylim(miny+dy, maxy+dy)
def get_legend_labels_handles_title_seaborn_histplot(ax):
old_legend = ax.legend_
handles = old_legend.legendHandles
labels = [t.get_text() for t in old_legend.get_texts()]
title = old_legend.get_title().get_text()
return handles, labels, title
def alignYaxes(axes, align_values=None):
'''Align the ticks of multiple y axes
Args:
axes (list): list of axes objects whose yaxis ticks are to be aligned.
Keyword Args:
align_values (None or list/tuple): if not None, should be a list/tuple
of floats with same length as <axes>. Values in <align_values>
define where the corresponding axes should be aligned up. E.g.
[0, 100, -22.5] means the 0 in axes[0], 100 in axes[1] and -22.5
in axes[2] would be aligned up. If None, align (approximately)
the lowest ticks in all axes.
Returns:
new_ticks (list): a list of new ticks for each axis in <axes>.
A new sets of ticks are computed for each axis in <axes> but with equal
length.
'''
from matplotlib.pyplot import MaxNLocator
import numpy as np
nax = len(axes)
ticks = [aii.get_yticks() for aii in axes]
if align_values is None:
aligns = [ticks[ii][0] for ii in range(nax)]
else:
if len(align_values) != nax:
raise Exception(
"Length of <axes> doesn't equal that of <align_values>.")
aligns = align_values
bounds = [aii.get_ylim() for aii in axes]
# align at some points
ticks_align = [ticks[ii]-aligns[ii] for ii in range(nax)]
# scale the range to 1-100
ranges = [tii[-1]-tii[0] for tii in ticks]
lgs = [-np.log10(rii)+2. for rii in ranges]
igs = [np.floor(ii) for ii in lgs]
log_ticks = [ticks_align[ii]*(10.**igs[ii]) for ii in range(nax)]
# put all axes ticks into a single array, then compute new ticks for all
comb_ticks = np.concatenate(log_ticks)
comb_ticks.sort()
locator = MaxNLocator(nbins='auto', steps=[1, 2, 2.5, 3, 4, 5, 8, 10])
new_ticks = locator.tick_values(comb_ticks[0], comb_ticks[-1])
new_ticks = [new_ticks/10.**igs[ii] for ii in range(nax)]
new_ticks = [new_ticks[ii]+aligns[ii] for ii in range(nax)]
# find the lower bound
idx_l = 0
for i in range(len(new_ticks[0])):
if any([new_ticks[jj][i] > bounds[jj][0] for jj in range(nax)]):
idx_l = i-1
break
# find the upper bound
idx_r = 0
for i in range(len(new_ticks[0])):
if all([new_ticks[jj][i] > bounds[jj][1] for jj in range(nax)]):
idx_r = i
break
# trim tick lists by bounds
new_ticks = [tii[idx_l:idx_r+1] for tii in new_ticks]
# set ticks for each axis
for axii, tii in zip(axes, new_ticks):
axii.set_yticks(tii)
return new_ticks
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
adjust_yaxis(ax2, (y1 - y2) / 2, v2)
adjust_yaxis(ax1, (y2 - y1) / 2, v1)
def adjust_yaxis(ax, ydif, v):
"""shift axis ax by ydiff, maintaining point v at the same location"""
inv = ax.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - v, maxy - v
if -miny > maxy or (-miny == maxy and dy > 0):
nminy = miny
nmaxy = miny * (maxy + dy) / (miny + dy)
else:
nmaxy = maxy
nminy = maxy * (miny + dy) / (maxy + dy)
ax.set_ylim(nminy + v, nmaxy + v)
def qualitative_cmap(n=2):
import matplotlib.colors as mcolors
if n == 2:
colorsList = [mcolors.BASE_COLORS['r'], mcolors.BASE_COLORS['g']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 4:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 5:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m'],
mcolors.BASE_COLORS['b']]
cmap = mcolors.ListedColormap(colorsList)
return cmap
def caption(text, color='blue', **kwargs):
from termcolor import colored
print(colored('Caption:', color, attrs=['bold'], **kwargs))
print(colored(text, color, attrs=['bold'], **kwargs))
return
def adjust_lightness(color, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
def produce_colors_for_pwv_station(scope='annual', zebra=False,
as_dict=False, as_cat_dict=False):
import pandas as pd
stns = group_sites_to_xarray(scope=scope)
cdict = {'coastal': 'tab:blue',
'highland': 'tab:green',
'eastern': 'tab:orange'}
if as_cat_dict:
return cdict
# for grp, color in cdict.copy().items():
# cdict[grp] = to_rgba(get_named_colors_mapping()[
# color], alpha=1)
ds = stns.to_dataset('group')
colors = []
for group in ds:
sts = ds[group].dropna('GNSS').values
for i, st in enumerate(sts):
color = cdict.get(group)
if zebra:
if i % 2 != 0:
# rgba = np.array(rgba)
# rgba[-1] = 0.5
color = adjust_lightness(color, 0.5)
colors.append(color)
# colors = [item for sublist in colors for item in sublist]
stns = stns.T.values.ravel()
stns = stns[~pd.isnull(stns)]
if as_dict:
colors = dict(zip(stns, colors))
return colors
def fix_time_axis_ticks(ax, limits=None, margin=15):
import pandas as pd
import matplotlib.dates as mdates
if limits is not None:
ax.set_xlim(*pd.to_datetime(limits))
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(mdates.MonthLocator())
# locator = mdates.AutoDateLocator(minticks=3, maxticks=7)
# formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
return ax
def plot_qflux_climatotlogy_israel(path=era5_path, save=True, reduce='mean',
plot_type='uv'):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
ds = xr.load_dataset(path / 'ERA5_UVQ_mm_israel_1979-2020.nc')
ds = ds.sel(expver=1).reset_coords(drop=True)
if plot_type == 'uv':
f1 = ds['q'] * ds['u']
f2 = ds['q'] * ds['v']
elif plot_type == 'md':
qu = ds['q'] * ds['u']
qv = ds['q'] * ds['v']
f1 = np.sqrt(qu**2 + qv**2)
f2 = np.rad2deg(np.arctan2(qv, qu))
if reduce == 'mean':
f1_clim = f1.groupby('time.month').mean().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').mean().mean(
'longitude').mean('latitude')
center = 0
cmap = 'bwr'
elif reduce == 'std':
f1_clim = f1.groupby('time.month').std().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').std().mean(
'longitude').mean('latitude')
center = None
cmap = 'viridis'
ds_clim = xr.concat([f1_clim, f2_clim], 'direction')
ds_clim['direction'] = ['zonal', 'meridional']
if plot_type == 'md':
fg, axes = plt.subplots(1, 2, figsize=(14, 7))
f1_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[0])
f2_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[1])
else:
fg = ds_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(
levels=41,
yincrease=False,
cmap=cmap,
center=center,
col='direction',
figsize=(
15,
6))
fg.fig.suptitle('Moisture flux climatology over Israel')
# fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# qu_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[0], cmap='bwr', center=0)
# qv_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[1], cmap='bwr', center=0)
fg.fig.subplots_adjust(top=0.923,
bottom=0.102,
left=0.058,
right=0.818,
hspace=0.2,
wspace=0.045)
if save:
filename = 'moisture_clim_from_ERA5_over_israel.png'
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fg
def plot_mean_std_count(da_ts, time_reduce='hour', reduce='mean',
count_factor=1):
import xarray as xr
import seaborn as sns
"""plot mean, std and count of Xarray dataarray time-series"""
cmap = sns.color_palette("colorblind", 2)
time_dim = list(set(da_ts.dims))[0]
grp = '{}.{}'.format(time_dim, time_reduce)
if reduce == 'mean':
mean = da_ts.groupby(grp).mean()
elif reduce == 'median':
mean = da_ts.groupby(grp).median()
std = da_ts.groupby(grp).std()
mean_plus_std = mean + std
mean_minus_std = mean - std
count = da_ts.groupby(grp).count()
if isinstance(da_ts, xr.Dataset):
dvars = [x for x in da_ts.data_vars.keys()]
assert len(dvars) == 2
secondary_y = dvars[1]
else:
secondary_y = None
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 15))
mean_df = mean.to_dataframe()
if secondary_y is not None:
axes[0] = mean_df[dvars[0]].plot(
ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
ax2mean = mean_df[secondary_y].plot(
ax=axes[0],
linewidth=2.0,
marker='s',
color=cmap[1],
secondary_y=True)
h1, l1 = axes[0].get_legend_handles_labels()
h2, l2 = axes[0].right_ax.get_legend_handles_labels()
handles = h1 + h2
labels = l1 + l2
axes[0].legend(handles, labels)
axes[0].fill_between(mean_df.index.values,
mean_minus_std[dvars[0]].values,
mean_plus_std[dvars[0]].values,
color=cmap[0],
alpha=0.5)
ax2mean.fill_between(
mean_df.index.values,
mean_minus_std[secondary_y].values,
mean_plus_std[secondary_y].values,
color=cmap[1],
alpha=0.5)
ax2mean.tick_params(axis='y', colors=cmap[1])
else:
mean_df.plot(ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
axes[0].fill_between(
mean_df.index.values,
mean_minus_std.values,
mean_plus_std.values,
color=cmap[0],
alpha=0.5)
axes[0].grid()
count_df = count.to_dataframe() / count_factor
count_df.plot.bar(ax=axes[1], rot=0)
axes[0].xaxis.set_tick_params(labelbottom=True)
axes[0].tick_params(axis='y', colors=cmap[0])
fig.tight_layout()
if secondary_y is not None:
return axes, ax2mean
else:
return axes
def plot_seasonal_histogram(da, dim='sound_time', xlim=None, xlabel=None,
suptitle=''):
fig_hist, axs = plt.subplots(2, 2, sharex=False, sharey=True,
figsize=(10, 8))
seasons = ['DJF', 'MAM', 'JJA', 'SON']
cmap = sns.color_palette("colorblind", 4)
for i, ax in enumerate(axs.flatten()):
da_season = da.sel(
{dim: da['{}.season'.format(dim)] == seasons[i]}).dropna(dim)
ax = sns.distplot(da_season, ax=ax, norm_hist=False,
color=cmap[i], hist_kws={'edgecolor': 'k'},
axlabel=xlabel,
label=seasons[i])
ax.set_xlim(xlim)
ax.legend()
# axes.set_xlabel('MLH [m]')
ax.set_ylabel('Frequency')
fig_hist.suptitle(suptitle)
fig_hist.tight_layout()
return axs
def plot_two_histograms_comparison(x, y, bins=None, labels=['x', 'y'],
ax=None, colors=['b', 'r']):
import numpy as np
import matplotlib.pyplot as plt
x_w = np.empty(x.shape)
x_w.fill(1/x.shape[0])
y_w = np.empty(y.shape)
y_w.fill(1/y.shape[0])
if ax is None:
fig, ax = plt.subplots()
ax.hist([x, y], bins=bins, weights=[x_w, y_w], color=colors,
label=labels)
ax.legend()
return ax
def plot_diurnal_wind_hodograph(path=ims_path, station='TEL-AVIV-COAST',
season=None, cmax=None, ax=None):
import xarray as xr
from metpy.plots import Hodograph
# import matplotlib
import numpy as np
colorbar = False
# from_list = matplotlib.colors.LinearSegmentedColormap.from_list
cmap = plt.cm.get_cmap('hsv', 24)
# cmap = from_list(None, plt.cm.jet(range(0,24)), 24)
U = xr.open_dataset(path / 'IMS_U_israeli_10mins.nc')
V = xr.open_dataset(path / 'IMS_V_israeli_10mins.nc')
u_sta = U[station]
v_sta = V[station]
u_sta.load()
v_sta.load()
if season is not None:
print('{} season selected'.format(season))
u_sta = u_sta.sel(time=u_sta['time.season'] == season)
v_sta = v_sta.sel(time=v_sta['time.season'] == season)
u = u_sta.groupby('time.hour').mean()
v = v_sta.groupby('time.hour').mean()
if ax is None:
colorbar = True
fig, ax = plt.subplots()
max_uv = max(max(u.values), max(v.values)) + 1
if cmax is None:
max_uv = max(max(u.values), max(v.values)) + 1
else:
max_uv = cmax
h = Hodograph(component_range=max_uv, ax=ax)
h.add_grid(increment=0.5)
# hours = np.arange(0, 25)
lc = h.plot_colormapped(u, v, u.hour, cmap=cmap,
linestyle='-', linewidth=2)
#ticks = np.arange(np.min(hours), np.max(hours))
# cb = fig.colorbar(lc, ticks=range(0,24), label='Time of Day [UTC]')
if colorbar:
cb = ax.figure.colorbar(lc, ticks=range(
0, 24), label='Time of Day [UTC]')
# cb.ax.tick_params(length=0)
if season is None:
ax.figure.suptitle('{} diurnal wind Hodograph'.format(station))
else:
ax.figure.suptitle(
'{} diurnal wind Hodograph {}'.format(station, season))
ax.set_xlabel('North')
ax.set_ylabel('East')
ax.set_title('South')
ax2 = ax.twinx()
ax2.tick_params(axis='y', right=False, labelright=False)
ax2.set_ylabel('West')
# axcb = fig.colorbar(lc)
return ax
def plot_MLR_GNSS_PW_harmonics_facetgrid(path=work_yuval, season='JJA',
n_max=2, ylim=None, scope='diurnal',
save=True, era5=False, leg_size=15):
"""
Parameters
----------
path : TYPE, optional
DESCRIPTION. The default is work_yuval.
season : TYPE, optional
DESCRIPTION. The default is 'JJA'.
n_max : TYPE, optional
DESCRIPTION. The default is 2.
ylim : TYPE, optional
the ylimits of each panel use [-6,8] for annual. The default is None.
scope : TYPE, optional
DESCRIPTION. The default is 'diurnal'.
save : TYPE, optional
DESCRIPTION. The default is True.
era5 : TYPE, optional
DESCRIPTION. The default is False.
leg_size : TYPE, optional
DESCRIPTION. The default is 15.
Returns
-------
None.
"""
import xarray as xr
from aux_gps import run_MLR_harmonics
from matplotlib.ticker import AutoMinorLocator
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
sns.set_style('whitegrid')
sns.set_style('ticks')
geo = produce_geo_gnss_solved_stations(add_distance_to_coast=True, plot=False)
if scope == 'diurnal':
cunits = 'cpd'
ticks = np.arange(0, 23, 3)
xlabel = 'Hour of day [UTC]'
elif scope == 'annual':
cunits = 'cpy'
ticks = np.arange(1, 13, 1)
xlabel = 'month'
print('producing {} harmonics plot.'.format(scope))
if era5:
harmonics = xr.load_dataset(path / 'GNSS_PW_era5_harmonics_{}.nc'.format(scope))
else:
harmonics = xr.load_dataset(path / 'GNSS_PW_harmonics_{}.nc'.format(scope))
# sites = sorted(list(set([x.split('_')[0] for x in harmonics])))
# da = xr.DataArray([x for x in range(len(sites))], dims='GNSS')
# da['GNSS'] = sites
sites = group_sites_to_xarray(upper=False, scope=scope)
sites_flat = [x for x in sites.values.flatten()]
da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
da['GNSS'] = [x for x in range(len(da))]
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
site = sites.values[i, j]
ax = fg.axes[i, j]
try:
harm_site = harmonics[[x for x in harmonics if site in x]]
if site in ['nrif']:
leg_loc = 'upper center'
elif site in ['yrcm', 'ramo']:
leg_loc = 'lower center'
# elif site in ['katz']:
# leg_loc = 'upper right'
else:
leg_loc = None
if scope == 'annual':
leg_loc = 'upper left'
ax, handles, labels = run_MLR_harmonics(harm_site, season=season,
cunits=cunits,
n_max=n_max, plot=True, ax=ax,
legend_loc=leg_loc, ncol=1,
legsize=leg_size, lw=2.5,
legend_S_only=True)
ax.set_xlabel(xlabel, fontsize=16)
if ylim is not None:
ax.set_ylim(*ylim)
ax.tick_params(axis='x', which='major', labelsize=18)
# if scope == 'diurnal':
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(axis='y', which='major', labelsize=18)
ax.yaxis.tick_left()
ax.xaxis.set_ticks(ticks)
ax.grid()
ax.set_title('')
ax.set_ylabel('')
ax.grid(axis='y', which='minor', linestyle='--')
# get this for upper legend:
# handles, labels = ax.get_legend_handles_labels()
if scope == 'annual':
site_label = '{} ({:.0f})'.format(
site.upper(), geo.loc[site].alt)
label_coord = [0.52, 0.87]
fs = 18
elif scope == 'diurnal':
site_label = site.upper()
label_coord = [0.1, 0.85]
fs = 20
ax.text(*label_coord, site_label,
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes, fontsize=fs)
if j == 0:
ax.set_ylabel('PWV anomalies [mm]', fontsize=16)
# if j == 0:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# elif j == 1:
# if i>5:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
except TypeError:
print('{}, {} axis off'.format(i, j))
ax.set_axis_off()
# for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
# harm_site = harmonics[[x for x in harmonics if sites[i] in x]]
# if site in ['elat', 'nrif']:
# loc = 'upper center'
# text = 0.1
# elif site in ['elro', 'yrcm', 'ramo', 'slom', 'jslm']:
# loc = 'upper right'
# text = 0.1
# else:
# loc = None
# text = 0.1
# ax = run_MLR_diurnal_harmonics(harm_site, season=season, n_max=n_max, plot=True, ax=ax, legend_loc=loc)
# ax.set_title('')
# ax.set_ylabel('PW anomalies [mm]')
# if ylim is not None:
# ax.set_ylim(ylim[0], ylim[1])
# ax.text(text, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
# for i, ax in enumerate(fg.axes.flatten()):
# if i > (da.GNSS.telasize-1):
# ax.set_axis_off()
# pass
# add upper legend for all factes:
S_labels = labels[:-2]
S_labels = [x.split(' ')[0] for x in S_labels]
last_label = 'Mean PWV anomalies'
sum_label = labels[-2].split("'")[1]
S_labels.append(sum_label)
S_labels.append(last_label)
fg.fig.legend(handles=handles, labels=S_labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.032,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
if save:
if era5:
filename = 'pw_era5_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
else:
filename = 'pw_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_gustiness(path=work_yuval, ims_path=ims_path, site='tela',
ims_site='HAIFA-TECHNION', season='JJA', month=None, pts=7,
ax=None):
import xarray as xr
import numpy as np
g = xr.open_dataset(
ims_path / 'IMS_G{}_israeli_10mins_daily_anoms.nc'.format(pts))[ims_site]
g.load()
if season is not None:
g = g.sel(time=g['time.season'] == season)
label = 'Gustiness {} IMS station in {} season'.format(
site, season)
elif month is not None:
g = g.sel(time=g['time.month'] == month)
label = 'Gustiness {} IMS station in {} month'.format(
site, month)
elif season is not None and month is not None:
raise('pls pick either season or month...')
# date = groupby_date_xr(g)
# # g_anoms = g.groupby('time.month') - g.groupby('time.month').mean('time')
# g_anoms = g.groupby(date) - g.groupby(date).mean('time')
# g_anoms = g_anoms.reset_coords(drop=True)
G = g.groupby('time.hour').mean('time') * 100.0
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
Gline = G.plot(ax=ax, color='b', marker='o', label='Gustiness')
ax.set_title(label)
ax.axhline(0, color='b', linestyle='--')
ax.set_ylabel('Gustiness anomalies [dimensionless]', color='b')
ax.set_xlabel('Time of day [UTC]')
# ax.set_xticks(np.arange(0, 24, step=1))
ax.yaxis.label.set_color('b')
ax.tick_params(axis='y', colors='b')
ax.xaxis.set_ticks(np.arange(0, 23, 3))
ax.grid()
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_hourly_anoms_thresh_50_homogenized.nc')[site]
pw.load().dropna('time')
if season is not None:
pw = pw.sel(time=pw['time.season'] == season)
elif month is not None:
pw = pw.sel(time=pw['time.month'] == month)
# date = groupby_date_xr(pw)
# pw = pw.groupby(date) - pw.groupby(date).mean('time')
# pw = pw.reset_coords(drop=True)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
PWline = pw.plot.line(ax=axpw, color='tab:green',
marker='s', label='PW ({})'.format(season))
axpw.axhline(0, color='k', linestyle='--')
lns = Gline + PWline
axpw.set_ylabel('PW anomalies [mm]')
align_yaxis(ax, 0, axpw, 0)
return lns
def plot_gustiness_facetgrid(path=work_yuval, ims_path=ims_path,
season='JJA', month=None, save=True):
import xarray as xr
gnss_ims_dict = {
'alon': 'ASHQELON-PORT', 'bshm': 'HAIFA-TECHNION', 'csar': 'HADERA-PORT',
'tela': 'TEL-AVIV-COAST', 'slom': 'BESOR-FARM', 'kabr': 'SHAVE-ZIYYON',
'nzrt': 'DEIR-HANNA', 'katz': 'GAMLA', 'elro': 'MEROM-GOLAN-PICMAN',
'mrav': 'MAALE-GILBOA', 'yosh': 'ARIEL', 'jslm': 'JERUSALEM-GIVAT-RAM',
'drag': 'METZOKE-DRAGOT', 'dsea': 'SEDOM', 'ramo': 'MIZPE-RAMON-20120927',
'nrif': 'NEOT-SMADAR', 'elat': 'ELAT', 'klhv': 'SHANI',
'yrcm': 'ZOMET-HANEGEV', 'spir': 'PARAN-20060124'}
da = xr.DataArray([x for x in gnss_ims_dict.values()], dims=['GNSS'])
da['GNSS'] = [x for x in gnss_ims_dict.keys()]
to_remove = ['kabr', 'nzrt', 'katz', 'elro', 'klhv', 'yrcm', 'slom']
sites = [x for x in da['GNSS'].values if x not in to_remove]
da = da.sel(GNSS=sites)
gnss_order = ['bshm', 'mrav', 'drag', 'csar', 'yosh', 'dsea', 'tela', 'jslm',
'nrif', 'alon', 'ramo', 'elat']
df = da.to_dataframe('gnss')
da = df.reindex(gnss_order).to_xarray()['gnss']
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
lns = plot_gustiness(path=path, ims_path=ims_path,
ims_site=gnss_ims_dict[site],
site=site, season=season, month=month, ax=ax)
labs = [l.get_label() for l in lns]
if site in ['tela', 'alon', 'dsea', 'csar', 'elat', 'nrif']:
ax.legend(lns, labs, loc='upper center', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
elif site in ['drag']:
ax.legend(lns, labs, loc='upper right', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
else:
ax.legend(lns, labs, loc='best', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
ax.set_title('')
ax.set_ylabel(r'G anomalies $\times$$10^{2}$')
# ax.text(.8, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
for i, ax in enumerate(fg.axes.flatten()):
if i > (da.GNSS.size-1):
ax.set_axis_off()
pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.974,
bottom=0.053,
left=0.041,
right=0.955,
hspace=0.15,
wspace=0.3)
filename = 'gustiness_israeli_gnss_pw_diurnal_{}.png'.format(season)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_fft_diurnal(path=work_yuval, save=True):
import xarray as xr
import numpy as np
import matplotlib.ticker as tck
sns.set_style("whitegrid",
{'axes.grid': True,
'xtick.bottom': True,
'font.family': 'serif',
'ytick.left': True})
sns.set_context('paper')
power = xr.load_dataset(path / 'GNSS_PW_power_spectrum_diurnal.nc')
power = power.to_array('site')
sites = [x for x in power.site.values]
fg = power.plot.line(col='site', col_wrap=4,
sharex=False, figsize=(20, 18))
fg.set_xlabels('Frequency [cpd]')
fg.set_ylabels('PW PSD [dB]')
ticklabels = np.arange(0, 7)
for ax, site in zip(fg.axes.flatten(), sites):
sns.despine()
ax.set_title('')
ax.set_xticklabels(ticklabels)
# ax.tick_params(axis='y', which='minor')
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.set_xlim(0, 6.5)
ax.set_ylim(70, 125)
ax.grid(True)
ax.grid(which='minor', axis='y')
ax.text(.8, .85, site.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
fg.fig.tight_layout()
filename = 'power_pw_diurnal.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_rinex_availability_with_map(path=work_yuval, gis_path=gis_path,
scope='diurnal', ims=True,
dem_path=dem_path, fontsize=18, save=True):
# TODO: add box around merged stations and removed stations
# TODO: add color map labels to stations removed and merged
from aux_gps import gantt_chart
import xarray as xr
import pandas as pd
import geopandas as gpd
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
from matplotlib.colors import ListedColormap
from aux_gps import path_glob
sns.set_style('whitegrid')
sns.set_style('ticks')
print('{} scope selected.'.format(scope))
fig = plt.figure(figsize=(20, 15))
# grid = plt.GridSpec(1, 2, width_ratios=[
# 5, 2], wspace=0.1)
grid = plt.GridSpec(1, 2, width_ratios=[
5, 3], wspace=0.05)
ax_gantt = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_map = fig.add_subplot(grid[0, 1]) # plt.subplot(122)
# fig, ax = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(20, 6))
# RINEX gantt chart:
if scope == 'diurnal':
file = path_glob(path, 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')[-1]
elif scope == 'annual':
file = path / 'GNSS_PW_monthly_thresh_50.nc'
ds = xr.open_dataset(file)
just_pw = [x for x in ds if 'error' not in x]
ds = ds[just_pw]
da = ds.to_array('station').sel(time=slice(None,'2019'))
da['station'] = [x.upper() for x in da.station.values]
ds = da.to_dataset('station')
# reorder for annual, coastal, highland and eastern:
stns = group_sites_to_xarray(scope='annual', upper=True).T.values.ravel()
stns = stns[~pd.isnull(stns)]
ds = ds[stns]
# colors:
colors = produce_colors_for_pwv_station(scope=scope, zebra=False)
title = 'Daily RINEX files availability for the Israeli GNSS stations'
ax_gantt = gantt_chart(
ds,
ax=ax_gantt,
fw='bold', grid=True,
title='', colors=colors,
pe_dict=None, fontsize=fontsize, linewidth=24, antialiased=False)
years_fmt = mdates.DateFormatter('%Y')
# ax_gantt.xaxis.set_major_locator(mdates.YearLocator())
ax_gantt.xaxis.set_major_locator(mdates.YearLocator(4))
ax_gantt.xaxis.set_minor_locator(mdates.YearLocator(1))
ax_gantt.xaxis.set_major_formatter(years_fmt)
# ax_gantt.xaxis.set_minor_formatter(years_fmt)
ax_gantt.tick_params(axis='x', labelrotation=0)
# Israel gps ims map:
ax_map = plot_israel_map(
gis_path=gis_path, ax=ax_map, ticklabelsize=fontsize)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
# scale_bar(ax_map, 50)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level',
size=fontsize, weight='normal')
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
# removed = ['hrmn', 'nizn', 'spir']
# removed = ['hrmn']
if scope == 'diurnal':
removed = ['hrmn', 'gilb', 'lhav']
elif scope == 'annual':
removed = ['hrmn', 'gilb', 'lhav']
print('removing {} stations from map.'.format(removed))
# merged = ['klhv', 'lhav', 'mrav', 'gilb']
merged = []
gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.loc[gps_list, :].plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=35, facecolor="None", linewidth=2, zorder=3)
# gps.loc[removed, :].plot(ax=ax_map, color='black', edgecolor='black', marker='s',
# alpha=1.0, markersize=25, facecolor='white')
# gps.loc[merged, :].plot(ax=ax_map, color='black', edgecolor='r', marker='s',
# alpha=0.7, markersize=25)
gps_stations = gps_list # [x for x in gps.index]
# to_plot_offset = ['mrav', 'klhv', 'nzrt', 'katz', 'elro']
to_plot_offset = []
for x, y, label in zip(gps.loc[gps_stations, :].lon, gps.loc[gps_stations,
:].lat, gps.loc[gps_stations, :].index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -6),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
# geo_annotate(ax_map, gps_normal_anno.lon, gps_normal_anno.lat,
# gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# geo_annotate(ax_map, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax_map, color='black', edgecolor='black',
marker='x', linewidth=2, zorder=2)
geo_annotate(ax_map, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=fontsize - 2, colorupdown=False)
# plt.legend(['GNSS \nreceiver sites',
# 'removed \nGNSS sites',
# 'merged \nGNSS sites',
# 'radiosonde\nstation'],
# loc='upper left', framealpha=0.7, fancybox=True,
# handletextpad=0.2, handlelength=1.5)
if ims:
print('getting IMS temperature stations metadata...')
ims = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims.plot(ax=ax_map, marker='o', edgecolor='tab:orange', alpha=1.0,
markersize=35, facecolor="tab:orange", zorder=1)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
print('getting solved GNSS israeli stations metadata...')
plt.legend(['GNSS \nstations',
'radiosonde\nstation', 'IMS stations'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
else:
plt.legend(['GNSS \nstations',
'radiosonde\nstation'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
fig.subplots_adjust(top=0.95,
bottom=0.11,
left=0.05,
right=0.95,
hspace=0.2,
wspace=0.2)
# plt.legend(['IMS stations', 'GNSS stations'], loc='upper left')
filename = 'rinex_israeli_gnss_map_{}.png'.format(scope)
# caption('Daily RINEX files availability for the Israeli GNSS station network at the SOPAC/GARNER website')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_means_box_plots(path=work_yuval, thresh=50, kind='box',
x='month', col_wrap=5, ylimits=None, twin=None,
twin_attrs=None,
xlimits=None, anoms=True, bins=None,
season=None, attrs_plot=True, save=True, ds_input=None):
import xarray as xr
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
attrs = [x.attrs for x in pw.data_vars.values()]
if x == 'month':
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# pw = pw.resample(time='MS').mean('time')
elif x == 'hour':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
if twin is not None:
twin = twin.groupby('time.month') - \
twin.groupby('time.month').mean('time')
twin = twin.reset_coords(drop=True)
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
elif x == 'day':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_daily_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
pw = pw.groupby('time.dayofyear') - \
pw.groupby('time.dayodyear').mean('time')
if season is not None:
if season != 'all':
print('{} season is selected'.format(season))
pw = pw.sel(time=pw['time.season'] == season)
all_seas = False
if twin is not None:
twin = twin.sel(time=twin['time.season'] == season)
else:
print('all seasons selected')
all_seas = True
else:
all_seas = False
for i, da in enumerate(pw.data_vars):
pw[da].attrs = attrs[i]
if not attrs_plot:
attrs = None
if ds_input is not None:
# be carful!:
pw = ds_input
fg = plot_multi_box_xr(pw, kind=kind, x=x, col_wrap=col_wrap,
ylimits=ylimits, xlimits=xlimits, attrs=attrs,
bins=bins, all_seasons=all_seas, twin=twin,
twin_attrs=twin_attrs)
attrs = [x.attrs for x in pw.data_vars.values()]
for i, ax in enumerate(fg.axes.flatten()):
try:
mean_years = float(attrs[i]['mean_years'])
# print(i)
# print(mean_years)
except IndexError:
ax.set_axis_off()
pass
if kind != 'hist':
[fg.axes[x, 0].set_ylabel('PW [mm]')
for x in range(len(fg.axes[:, 0]))]
# [fg.axes[-1, x].set_xlabel('month') for x in range(len(fg.axes[-1, :]))]
fg.fig.subplots_adjust(top=0.98,
bottom=0.05,
left=0.025,
right=0.985,
hspace=0.27,
wspace=0.215)
if season is not None:
filename = 'pw_{}ly_means_{}_seas_{}.png'.format(x, kind, season)
else:
filename = 'pw_{}ly_means_{}.png'.format(x, kind)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_interannual_MLR_results(path=climate_path, fontsize=16, save=True):
import matplotlib.pyplot as plt
from climate_works import run_best_MLR
# rds = xr.load_dataset(path / 'best_MLR_interannual_gnss_pwv.nc')
model_lci, rdf_lci = run_best_MLR(plot=False, heatmap=False, keep='lci',
add_trend=True)
rds_lci = model_lci.results_
model_eofi, rdf_eofi = run_best_MLR(plot=False, heatmap=False, keep='eofi',
add_trend=False)
rds_eofi = model_eofi.results_
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 7))
origln = rds_lci['original'].plot.line('k-.', ax=axes[0], linewidth=1.5)
predln_lci = rds_lci['predict'].plot.line('b-', ax=axes[0], linewidth=1.5)
predln_eofi = rds_eofi['predict'].plot.line(
'g-', ax=axes[0], linewidth=1.5)
r2_lci = rds_lci['r2_adj'].item()
r2_eofi = rds_eofi['r2_adj'].item()
axes[0].legend(origln+predln_lci+predln_eofi, ['mean PWV (12m-mean)', 'MLR with LCI (Adj R$^2$:{:.2f})'.format(
r2_lci), 'MLR with EOFs (Adj R$^2$:{:.2f})'.format(r2_eofi)], fontsize=fontsize-2)
axes[0].grid()
axes[0].set_xlabel('')
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[0].grid(which='minor', color='k', linestyle='--')
residln_lci = rds_lci['resid'].plot.line('b-', ax=axes[1])
residln_eofi = rds_eofi['resid'].plot.line('g-', ax=axes[1])
axes[1].legend(residln_lci+residln_eofi, ['MLR with LCI',
'MLR with EOFs'], fontsize=fontsize-2)
axes[1].grid()
axes[1].set_ylabel('Residuals [mm]', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('')
years_fmt = mdates.DateFormatter('%Y')
# ax.figure.autofmt_xdate()
axes[1].xaxis.set_major_locator(mdates.YearLocator(2))
axes[1].xaxis.set_minor_locator(mdates.YearLocator(1))
axes[1].xaxis.set_major_formatter(years_fmt)
axes[1].grid(which='minor', color='k', linestyle='--')
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
axes[1].figure.autofmt_xdate()
fig.tight_layout()
fig.subplots_adjust()
if save:
filename = 'pw_interannual_MLR_comparison.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_annual_pw(path=work_yuval, fontsize=20, labelsize=18, compare='uerra',
ylim=[7.5, 40], save=True, kind='violin', bins=None, ds=None,
add_temperature=False):
"""kind can be violin or hist, for violin choose ylim=7.5,40 and for hist
choose ylim=0,0.3"""
import xarray as xr
import pandas as pd
import numpy as np
from synoptic_procedures import slice_xr_with_synoptic_class
gnss_filename = 'GNSS_PW_monthly_thresh_50.nc'
# gnss_filename = 'first_climatol_try.nc'
pw = xr.load_dataset(path / gnss_filename)
df_annual = pw.to_dataframe()
hue = None
if compare is not None:
df_annual = prepare_reanalysis_monthly_pwv_to_dataframe(
path, re=compare, ds=ds)
hue = 'source'
if not add_temperature:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind=kind,
fg=None,
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, hue=hue,
save=False, bins=bins)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.029,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
filename = 'pw_annual_means_{}.png'.format(kind)
else:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind='mean_month',
fg=None, ticklabelcolor='tab:blue',
ylim=[10, 31], color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, hue=None,
save=False, bins=None)
# tmm = xr.load_dataset(path / 'GNSS_TD_monthly_1996_2020.nc')
tmm = xr.load_dataset(path / 'IMS_T/GNSS_TD_daily.nc')
tmm = tmm.groupby('time.month').mean()
dftm = tmm.to_dataframe()
# dftm.columns = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
sites = group_sites_to_xarray(scope='annual')
sites_flat = sites.values.ravel()
# sites = sites[~pd.isnull(sites)]
for i, ax in enumerate(fg.axes.flat):
if pd.isnull(sites_flat[i]):
continue
twinax = ax.twinx()
twinax.plot(dftm.index.values, dftm[sites_flat[i]].values, color='tab:red',
markersize=10, marker='s', lw=1, markerfacecolor="None",
label='Temperature')
# dftm[sites[i]].plot(ax=twinax, color='r', markersize=10,
# marker='s', lw=1, markerfacecolor="None")
twinax.set_ylim(5, 37)
twinax.set_yticks(np.arange(5, 40, 10))
twinax.tick_params(axis='y', which='major', labelcolor='tab:red',
labelsize=labelsize)
if sites_flat[i] in sites.sel(group='eastern'):
twinax.set_ylabel(r'Temperature [$\degree$ C]',
fontsize=labelsize)
# fg.fig.canvas.draw()
# twinax.xaxis.set_ticks(np.arange(1, 13))
# twinax.tick_params(axis='x', which='major', labelsize=labelsize-2)
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = twinax.get_legend_handles_labels()
labels = ['PWV', 'Surface Temperature']
fg.fig.legend(handles=lines+lines2, labels=labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.97,
bottom=0.029,
left=0.049,
right=0.96,
hspace=0.15,
wspace=0.17)
filename = 'pw_annual_means_temperature.png'
if save:
if compare is not None:
filename = 'pw_annual_means_{}_with_{}.png'.format(kind, compare)
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_multi_box_xr(pw, kind='violin', x='month', sharex=False, sharey=False,
col_wrap=5, ylimits=None, xlimits=None, attrs=None,
bins=None, all_seasons=False, twin=None, twin_attrs=None):
import xarray as xr
pw = pw.to_array('station')
if twin is not None:
twin = twin.to_array('station')
fg = xr.plot.FacetGrid(pw, col='station', col_wrap=col_wrap, sharex=sharex,
sharey=sharey)
for i, (sta, ax) in enumerate(zip(pw['station'].values, fg.axes.flatten())):
pw_sta = pw.sel(station=sta).reset_coords(drop=True)
if all_seasons:
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'DJF')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='o')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'MAM')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='^')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'JJA')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='s')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'SON')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='x')
df = pw_sta.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='d')
if sta == 'nrif' or sta == 'elat':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper center', framealpha=0.5, fancybox=True)
elif sta == 'yrcm' or sta == 'ramo':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper right', framealpha=0.5, fancybox=True)
else:
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='best', framealpha=0.5, fancybox=True)
else:
# if x == 'hour':
# # remove seasonal signal:
# pw_sta = pw_sta.groupby('time.dayofyear') - pw_sta.groupby('time.dayofyear').mean('time')
# elif x == 'month':
# # remove daily signal:
# pw_sta = pw_sta.groupby('time.hour') - pw_sta.groupby('time.hour').mean('time')
df = pw_sta.to_dataframe(sta)
if twin is not None:
twin_sta = twin.sel(station=sta).reset_coords(drop=True)
twin_df = twin_sta.to_dataframe(sta)
else:
twin_df = None
if attrs is not None:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i],
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
else:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None,
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
return fg
def plot_box_df(df, x='month', title='TELA', marker='o',
ylabel=r'IWV [kg$\cdot$m$^{-2}$]', ax=None, kind='violin',
ylimits=(5, 40), xlimits=None, attrs=None, bins=None, twin_df=None,
twin_attrs=None):
# x=hour is experimental
import seaborn as sns
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
# df = da_ts.to_dataframe()
if x == 'month':
df[x] = df.index.month
pal = sns.color_palette("Paired", 12)
elif x == 'hour':
df[x] = df.index.hour
if twin_df is not None:
twin_df[x] = twin_df.index.hour
# df[x] = df.index
pal = sns.color_palette("Paired", 12)
y = df.columns[0]
if ax is None:
fig, ax = plt.subplots()
if kind is None:
df = df.groupby(x).mean()
df.plot(ax=ax, legend=False, marker=marker)
if twin_df is not None:
twin_df = twin_df.groupby(x).mean()
twinx = ax.twinx()
twin_df.plot.line(ax=twinx, color='r', marker='s')
ax.axhline(0, color='k', linestyle='--')
if twin_attrs is not None:
twinx.set_ylabel(twin_attrs['ylabel'])
align_yaxis(ax, 0, twinx, 0)
ax.set_xlabel('Time of day [UTC]')
elif kind == 'violin':
sns.violinplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
gridsize=250, inner='quartile', scale='area')
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'box':
kwargs = dict(markerfacecolor='r', marker='o')
sns.boxplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
whis=1.0, flierprops=kwargs, showfliers=False)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'hist':
if bins is None:
bins = 15
a = df[y].dropna()
sns.distplot(ax=ax, a=a, norm_hist=True, bins=bins, axlabel='PW [mm]')
xmean = df[y].mean()
xmedian = df[y].median()
std = df[y].std()
sk = skew(df[y].dropna().values)
kurt = kurtosis(df[y].dropna().values)
# xmode = df[y].mode().median()
data_x, data_y = ax.lines[0].get_data()
ymean = np.interp(xmean, data_x, data_y)
ymed = np.interp(xmedian, data_x, data_y)
# ymode = np.interp(xmode, data_x, data_y)
ax.vlines(x=xmean, ymin=0, ymax=ymean, color='r', linestyle='--')
ax.vlines(x=xmedian, ymin=0, ymax=ymed, color='g', linestyle='-')
# ax.vlines(x=xmode, ymin=0, ymax=ymode, color='k', linestyle='-')
# ax.legend(['Mean:{:.1f}'.format(xmean),'Median:{:.1f}'.format(xmedian),'Mode:{:.1f}'.format(xmode)])
ax.legend(['Mean: {:.1f}'.format(xmean),
'Median: {:.1f}'.format(xmedian)])
ax.text(0.55, 0.45, "Std-Dev: {:.1f}\nSkewness: {:.1f}\nKurtosis: {:.1f}".format(
std, sk, kurt), transform=ax.transAxes)
ax.yaxis.set_minor_locator(MultipleLocator(5))
ax.yaxis.grid(True, which='minor', linestyle='--', linewidth=1, alpha=0.7)
ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
title = ax.get_title().split('=')[-1].strip(' ')
if attrs is not None:
mean_years = float(attrs['mean_years'])
ax.set_title('')
ax.text(.2, .85, y.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
if kind is not None:
if kind != 'hist':
ax.text(.22, .72, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
ax.yaxis.tick_left()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
if ylimits is not None:
ax.set_ylim(*ylimits)
if twin_attrs is not None:
twinx.set_ylim(*twin_attrs['ylimits'])
align_yaxis(ax, 0, twinx, 0)
if xlimits is not None:
ax.set_xlim(*xlimits)
return ax
def plot_means_pw(load_path=work_yuval, ims_path=ims_path, thresh=50,
col_wrap=5, means='hour', save=True):
import xarray as xr
import numpy as np
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
if means == 'hour':
# remove long term monthly means:
pw_clim = pw.groupby('time.month') - \
pw.groupby('time.month').mean('time')
pw_clim = pw_clim.groupby('time.{}'.format(means)).mean('time')
else:
pw_clim = pw.groupby('time.{}'.format(means)).mean('time')
# T = xr.load_dataset(
# ims_path /
# 'GNSS_5mins_TD_ALL_1996_2020.nc')
# T_clim = T.groupby('time.month').mean('time')
attrs = [x.attrs for x in pw.data_vars.values()]
fg = pw_clim.to_array('station').plot(col='station', col_wrap=col_wrap,
color='b', marker='o', alpha=0.7,
sharex=False, sharey=True)
col_arr = np.arange(0, len(pw_clim))
right_side = col_arr[col_wrap-1::col_wrap]
for i, ax in enumerate(fg.axes.flatten()):
title = ax.get_title().split('=')[-1].strip(' ')
try:
mean_years = float(attrs[i]['mean_years'])
ax.set_title('')
ax.text(.2, .85, title.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
ax.text(.2, .73, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
# ax_t = ax.twinx()
# T_clim['{}'.format(title)].plot(
# color='r', linestyle='dashed', marker='s', alpha=0.7,
# ax=ax_t)
# ax_t.set_ylim(0, 30)
fg.fig.canvas.draw()
# labels = [item.get_text() for item in ax_t.get_yticklabels()]
# ax_t.yaxis.set_ticklabels([])
# ax_t.tick_params(axis='y', color='r')
# ax_t.set_ylabel('')
# if i in right_side:
# ax_t.set_ylabel(r'Surface temperature [$\degree$C]', fontsize=10)
# ax_t.yaxis.set_ticklabels(labels)
# ax_t.tick_params(axis='y', labelcolor='r', color='r')
# show months ticks and grid lines for pw:
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.yaxis.grid()
# ax.legend([ax.lines[0], ax_t.lines[0]], ['PW', 'T'],
# loc='upper right', fontsize=10, prop={'size': 8})
# ax.legend([ax.lines[0]], ['PW'],
# loc='upper right', fontsize=10, prop={'size': 8})
except IndexError:
pass
# change bottom xticks to 1-12 and show them:
# fg.axes[-1, 0].xaxis.set_ticks(np.arange(1, 13))
[fg.axes[x, 0].set_ylabel('PW [mm]') for x in range(len(fg.axes[:, 0]))]
# adjust subplots:
fg.fig.subplots_adjust(top=0.977,
bottom=0.039,
left=0.036,
right=0.959,
hspace=0.185,
wspace=0.125)
filename = 'PW_{}_climatology.png'.format(means)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_gnss_radiosonde_monthly_means(sound_path=sound_path, path=work_yuval,
times=['2014', '2019'], sample='MS',
gps_station='tela', east_height=5000):
import xarray as xr
from aux_gps import path_glob
import pandas as pd
file = path_glob(sound_path, 'bet_dagan_phys_PW_Tm_Ts_*.nc')
phys = xr.load_dataset(file[0])['PW']
if east_height is not None:
file = path_glob(sound_path, 'bet_dagan_edt_sounding*.nc')
east = xr.load_dataset(file[0])['east_distance']
east = east.resample(sound_time=sample).mean().sel(
Height=east_height, method='nearest')
east_df = east.reset_coords(drop=True).to_dataframe()
if times is not None:
phys = phys.sel(sound_time=slice(*times))
ds = phys.resample(sound_time=sample).mean(
).to_dataset(name='Bet-dagan-radiosonde')
ds = ds.rename({'sound_time': 'time'})
gps = xr.load_dataset(
path / 'GNSS_PW_thresh_50_homogenized.nc')[gps_station]
if times is not None:
gps = gps.sel(time=slice(*times))
ds[gps_station] = gps.resample(time=sample).mean()
df = ds.to_dataframe()
# now plot:
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
# [x.set_xlim([pd.to_datetime(times[0]), pd.to_datetime(times[1])])
# for x in axes]
df.columns = ['Bet dagan soundings', '{} GNSS station'.format(gps_station)]
sns.lineplot(data=df, markers=['o', 's'], linewidth=2.0, ax=axes[0])
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 1] - df.iloc[:, 0]
df_r.columns = ['Residual distribution']
sns.lineplot(data=df_r, color='k', marker='o', linewidth=1.5, ax=axes[1])
if east_height is not None:
ax_east = axes[1].twinx()
sns.lineplot(data=east_df, color='red',
marker='x', linewidth=1.5, ax=ax_east)
ax_east.set_ylabel(
'East drift at {} km altitude [km]'.format(east_height / 1000.0))
axes[1].axhline(y=0, color='r')
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
axes[0].set_ylabel('Precipitable Water [mm]')
axes[1].set_ylabel('Residuals [mm]')
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
return ds
def plot_wetz_example(path=tela_results_path, plot='WetZ', fontsize=16,
save=True):
from aux_gps import path_glob
import matplotlib.pyplot as plt
from gipsyx_post_proc import process_one_day_gipsyx_output
filepath = path_glob(path, 'tela*_smoothFinal.tdp')[3]
if plot is None:
df, meta = process_one_day_gipsyx_output(filepath, True)
return df, meta
else:
df, meta = process_one_day_gipsyx_output(filepath, False)
if not isinstance(plot, str):
raise ValueError('pls pick only one field to plot., e.g., WetZ')
error_plot = '{}_error'.format(plot)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
desc = meta['desc'][plot]
unit = meta['units'][plot]
df[plot].plot(ax=ax, legend=False, color='k')
ax.fill_between(df.index, df[plot] - df[error_plot],
df[plot] + df[error_plot], alpha=0.5)
ax.grid()
# ax.set_title('{} from station TELA in {}'.format(
# desc, df.index[100].strftime('%Y-%m-%d')))
ax.set_ylabel('WetZ [{}]'.format(unit), fontsize=fontsize)
ax.set_xlabel('Time [UTC]', fontsize=fontsize)
ax.tick_params(which='both', labelsize=fontsize)
ax.grid('on')
fig.tight_layout()
filename = 'wetz_tela_daily.png'
caption('{} from station TELA in {}. Note the error estimation from the GipsyX software(filled)'.format(
desc, df.index[100].strftime('%Y-%m-%d')))
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_figure_3(path=tela_solutions, year=2004, field='WetZ',
middle_date='11-25', zooms=[10, 3, 0.5], save=True):
from gipsyx_post_proc import analyse_results_ds_one_station
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
dss = xr.open_dataset(path / 'TELA_ppp_raw_{}.nc'.format(year))
nums = sorted(list(set([int(x.split('-')[1])
for x in dss if x.split('-')[0] == field])))
ds = dss[['{}-{}'.format(field, i) for i in nums]]
da = analyse_results_ds_one_station(dss, field=field, plot=False)
fig, axes = plt.subplots(ncols=1, nrows=3, sharex=False, figsize=(16, 10))
for j, ax in enumerate(axes):
start = pd.to_datetime('{}-{}'.format(year, middle_date)
) - pd.Timedelta(zooms[j], unit='D')
end = pd.to_datetime('{}-{}'.format(year, middle_date)
) + pd.Timedelta(zooms[j], unit='D')
daa = da.sel(time=slice(start, end))
for i, ppp in enumerate(ds):
ds['{}-{}'.format(field, i)].plot(ax=ax, linewidth=3.0)
daa.plot.line(marker='.', linewidth=0., ax=ax, color='k')
axes[j].set_xlim(start, end)
axes[j].set_ylim(daa.min() - 0.5, daa.max() + 0.5)
try:
axes[j - 1].axvline(x=start, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
axes[j - 1].axvline(x=end, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
except IndexError:
pass
units = ds.attrs['{}>units'.format(field)]
sta = da.attrs['station']
desc = da.attrs['{}>desc'.format(field)]
ax.set_ylabel('{} [{}]'.format(field, units))
ax.set_xlabel('')
ax.grid()
# fig.suptitle(
# '30 hours stitched {} for GNSS station {}'.format(
# desc, sta), fontweight='bold')
fig.tight_layout()
caption('20, 6 and 1 days of zenith wet delay in 2004 from the TELA GNSS station for the top, middle and bottom figures respectively. The colored segments represent daily solutions while the black dots represent smoothed mean solutions.')
filename = 'zwd_tela_discon_panel.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
# fig.subplots_adjust(top=0.95)
return axes
def plot_figure_3_1(path=work_yuval, data='zwd'):
import xarray as xr
from aux_gps import plot_tmseries_xarray
from PW_stations import load_gipsyx_results
if data == 'zwd':
tela = load_gipsyx_results('tela', sample_rate='1H', plot_fields=None)
label = 'ZWD [cm]'
title = 'Zenith wet delay derived from GPS station TELA'
ax = plot_tmseries_xarray(tela, 'WetZ')
elif data == 'pw':
ds = xr.open_dataset(path / 'GNSS_hourly_PW.nc')
tela = ds['tela']
label = 'PW [mm]'
title = 'Precipitable water derived from GPS station TELA'
ax = plot_tmseries_xarray(tela)
ax.set_ylabel(label)
ax.set_xlim('1996-02', '2019-07')
ax.set_title(title)
ax.set_xlabel('')
ax.figure.tight_layout()
return ax
def plot_ts_tm(path=sound_path, model='TSEN',
times=['2007', '2019'], fontsize=14, save=True):
"""plot ts-tm relashonship"""
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
from PW_stations import ML_Switcher
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from sounding_procedures import get_field_from_radiosonde
models_dict = {'LR': 'Linear Regression',
'TSEN': 'Theil–Sen Regression'}
# sns.set_style('whitegrid')
pds = xr.Dataset()
Ts = get_field_from_radiosonde(path=sound_path, field='Ts',
data_type='phys', reduce=None, times=times,
plot=False)
Tm = get_field_from_radiosonde(path=sound_path, field='Tm',
data_type='phys', reduce='min', times=times,
plot=False)
pds['Tm'] = Tm
pds['Ts'] = Ts
pds = pds.dropna('sound_time')
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
pds.plot.scatter(
x='Ts',
y='Tm',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.grid()
ml = ML_Switcher()
fit_model = ml.pick_model(model)
X = pds.Ts.values.reshape(-1, 1)
y = pds.Tm.values
fit_model.fit(X, y)
predict = fit_model.predict(X)
coef = fit_model.coef_[0]
inter = fit_model.intercept_
ax.plot(X, predict, c='r')
bevis_tm = pds.Ts.values * 0.72 + 70.0
ax.plot(pds.Ts.values, bevis_tm, c='purple')
ax.legend(['{} ({:.2f}, {:.2f})'.format(models_dict.get(model),
coef, inter), 'Bevis 1992 et al. (0.72, 70.0)'], fontsize=fontsize-4)
# ax.set_xlabel('Surface Temperature [K]')
# ax.set_ylabel('Water Vapor Mean Atmospheric Temperature [K]')
ax.set_xlabel('Ts [K]', fontsize=fontsize)
ax.set_ylabel('Tm [K]', fontsize=fontsize)
ax.set_ylim(265, 320)
ax.tick_params(labelsize=fontsize)
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = predict - y
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", 'edgecolor': 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(y, predict))
print(rmean, rmse)
r2 = r2_score(y, predict)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[K]')
textstr = '\n'.join(['n={}'.format(pds.Ts.size),
'RMSE: ', '{:.2f} K'.format(rmse)]) # ,
# r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
# axin1.text(0.2, 0.9, 'n={}'.format(pds.Ts.size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.78, 0.9, 'RMSE: {:.2f} K'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
axin1.set_xlim(-15, 15)
fig.tight_layout()
filename = 'Bet_dagan_ts_tm_fit_{}-{}.png'.format(times[0], times[1])
caption('Water vapor mean temperature (Tm) vs. surface temperature (Ts) of the Bet-Dagan radiosonde station. Ordinary least squares linear fit(red) yields the residual distribution with RMSE of 4 K. Bevis(1992) model is plotted(purple) for comparison.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_pw_tela_bet_dagan_scatterplot(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
cats=None,
times=['2007', '2019'], wv_name='pw',
r2=False, fontsize=14,
save=True):
"""plot the PW of Bet-Dagan vs. PW of gps station"""
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# sns.set_style('white')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path, sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
tpw = 'tpw_bet_dagan'
ds = ds[[tpw, 'tela_pw']].dropna('time')
ds = ds.sel(time=slice(*times))
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
ds.plot.scatter(x=tpw,
y='tela_pw',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.plot(ds[tpw], ds[tpw], c='r')
ax.legend(['y = x'], loc='upper right', fontsize=fontsize)
if wv_name == 'pw':
ax.set_xlabel('PWV from Bet-Dagan [mm]', fontsize=fontsize)
ax.set_ylabel('PWV from TELA GPS station [mm]', fontsize=fontsize)
elif wv_name == 'iwv':
ax.set_xlabel(
r'IWV from Bet-Dagan station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.set_ylabel(
r'IWV from TELA GPS station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.grid()
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = ds.tela_pw.values - ds[tpw].values
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", "edgecolor": 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(ds[tpw].values, ds.tela_pw.values))
r2s = r2_score(ds[tpw].values, ds.tela_pw.values)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[mm]')
ax.tick_params(labelsize=fontsize)
if wv_name == 'pw':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse)])
elif wv_name == 'iwv':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(rmse)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
#
# axin1.text(0.2, 0.95, 'n={}'.format(ds[tpw].size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.3, 0.85, 'bias: {:.2f} mm'.format(rmean),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.35, 0.75, 'RMSE: {:.2f} mm'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# fig.suptitle('Precipitable Water comparison for the years {} to {}'.format(*times))
fig.tight_layout()
caption(
'PW from TELA GNSS station vs. PW from Bet-Dagan radiosonde station in {}-{}. A 45 degree line is plotted(red) for comparison. Note the skew in the residual distribution with an RMSE of 4.37 mm.'.format(times[0], times[1]))
# fig.subplots_adjust(top=0.95)
filename = 'Bet_dagan_tela_pw_compare_{}-{}.png'.format(times[0], times[1])
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ds
def plot_tela_bet_dagan_comparison(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
times=['2007', '2020'], cats=None,
compare='pwv',
save=True):
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.dates as mdates
# sns.set_style('whitegrid')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path,
sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
ds = ds.dropna('time')
ds = ds.sel(time=slice(*times))
if compare == 'zwd':
df = ds[['zwd_bet_dagan', 'tela']].to_dataframe()
elif compare == 'pwv':
df = ds[['tpw_bet_dagan', 'tela_pw']].to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
df.columns = ['Bet-Dagan soundings', 'TELA GNSS station']
sns.scatterplot(
data=df,
s=20,
ax=axes[0],
style='x',
linewidth=0,
alpha=0.8)
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residual distribution']
sns.scatterplot(
data=df_r,
color='k',
s=20,
ax=axes[1],
linewidth=0,
alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
if compare == 'zwd':
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
elif compare == 'pwv':
axes[0].set_ylabel('Precipitable Water Vapor [mm]')
axes[1].set_ylabel('Residuals [mm]')
# axes[0].set_title('Zenith wet delay from Bet-Dagan radiosonde station and TELA GNSS satation')
sonde_change_x = pd.to_datetime('2013-08-20')
axes[1].axvline(sonde_change_x, color='red')
axes[1].annotate(
'changed sonde type from VIZ MK-II to PTU GPS',
(mdates.date2num(sonde_change_x),
10),
xytext=(
15,
15),
textcoords='offset points',
arrowprops=dict(
arrowstyle='fancy',
color='red'),
color='red')
# axes[1].set_aspect(3)
[x.set_xlim(*[pd.to_datetime(times[0]), pd.to_datetime(times[1])])
for x in axes]
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
filename = 'Bet_dagan_tela_{}_compare.png'.format(compare)
caption('Top: zenith wet delay from Bet-dagan radiosonde station(blue circles) and from TELA GNSS station(orange x) in 2007-2019. Bottom: residuals. Note the residuals become constrained from 08-2013 probebly due to an equipment change.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def plot_israel_map_from_shape_file(gis_path=gis_path):
import geopandas as gpd
agr = gpd.read_file(gis_path/'ISR_agriculture_districts.shp')
isr = gpd.GeoSeries(agr.geometry.unary_union)
isr.crs = agr.crs
isr = isr.to_crs(epsg=4326)
return isr
def plot_israel_map(gis_path=gis_path, rc=rc, ticklabelsize=12, ax=None):
"""general nice map for israel, need that to plot stations,
and temperature field on top of it"""
import geopandas as gpd
import contextily as ctx
import seaborn as sns
import cartopy.crs as ccrs
sns.set_style("ticks", rc=rc)
isr_with_yosh = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
isr_with_yosh.crs = {'init': 'epsg:4326'}
# isr_with_yosh = isr_with_yosh.to_crs(epsg=3857)
crs_epsg = ccrs.epsg('3857')
# crs_epsg = ccrs.epsg('2039')
if ax is None:
# fig, ax = plt.subplots(subplot_kw={'projection': crs_epsg},
# figsize=(6, 15))
bounds = isr_with_yosh.geometry.total_bounds
extent = [bounds[0], bounds[2], bounds[1], bounds[3]]
# ax.set_extent([bounds[0], bounds[2], bounds[1], bounds[3]], crs=crs_epsg)
# ax.add_geometries(isr_with_yosh.geometry, crs=crs_epsg)
ax = isr_with_yosh.plot(alpha=0.0, figsize=(6, 15))
else:
isr_with_yosh.plot(alpha=0.0, ax=ax)
ctx.add_basemap(
ax,
source=ctx.providers.Stamen.TerrainBackground,
crs='epsg:4326')
ax.xaxis.set_major_locator(ticker.MaxNLocator(2))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
ax.yaxis.set_major_formatter(lat_formatter)
ax.xaxis.set_major_formatter(lon_formatter)
ax.tick_params(top=True, bottom=True, left=True, right=True,
direction='out', labelsize=ticklabelsize)
# scale_bar(ax, ccrs.Mercator(), 50, bounds=bounds)
return ax
def plot_israel_with_stations(gis_path=gis_path, dem_path=dem_path, ims=True,
gps=True, radio=True, terrain=True, alt=False,
ims_names=False, gps_final=False, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import geopandas as gpd
ax = plot_israel_map(gis_path)
station_names = []
legend = []
if ims:
print('getting IMS temperature stations metadata...')
ims_t = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims_t.plot(ax=ax, color='red', edgecolor='black', alpha=0.5)
station_names.append('ims')
legend.append('IMS stations')
if ims_names:
geo_annotate(ax, ims_t.lon, ims_t.lat,
ims_t['name_english'], xytext=(3, 3), fmt=None,
c='k', fw='normal', fs=7, colorupdown=False)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
if gps:
print('getting solved GNSS israeli stations metadata...')
gps_df = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
if gps_final:
to_drop = ['gilb', 'lhav', 'hrmn', 'nizn', 'spir']
gps_final_stations = [x for x in gps_df.index if x not in to_drop]
gps = gps_df.loc[gps_final_stations, :]
gps.plot(ax=ax, color='k', edgecolor='black', marker='s')
gps_stations = [x for x in gps.index]
to_plot_offset = ['gilb', 'lhav']
# [gps_stations.remove(x) for x in to_plot_offset]
gps_normal_anno = gps.loc[gps_stations, :]
# gps_offset_anno = gps.loc[to_plot_offset, :]
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
if alt:
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.alt, xytext=(4, -6), fmt='{:.0f}',
c='k', fw='bold', fs=9, colorupdown=False)
# geo_annotate(ax, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('gps')
legend.append('GNSS stations')
if terrain:
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax.set_xlabel('')
ax.set_ylabel('')
if radio: # plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax, color='black', edgecolor='black',
marker='+')
geo_annotate(ax, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('radio')
legend.append('radiosonde')
if legend:
plt.legend(legend, loc='upper left')
plt.tight_layout()
plt.subplots_adjust(bottom=0.05)
if station_names:
station_names = '_'.join(station_names)
else:
station_names = 'no_stations'
filename = 'israel_map_{}.png'.format(station_names)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_zwd_lapse_rate(path=work_yuval, fontsize=18, model='TSEN', save=True):
from PW_stations import calculate_zwd_altitude_fit
df, zwd_lapse_rate = calculate_zwd_altitude_fit(path=path, model=model,
plot=True, fontsize=fontsize)
if save:
filename = 'zwd_lapse_rate.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_ims_T_lapse_rate(ims_path=ims_path, dt='2013-10-19T22:00:00',
fontsize=16, save=True):
from aux_gps import path_glob
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# from matplotlib import rc
def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):
ts = tdf.loc[dt, :]
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
# ts.name = dt_col
# Tloc_df = Tloc_df.join(ts, how='right')
# Tloc_df = Tloc_df.dropna(axis=0)
ts_vs_alt = pd.Series(ts.values, index=T_alts)
ts_vs_alt_for_fit = ts_vs_alt.dropna()
[a, b] = np.polyfit(ts_vs_alt_for_fit.index.values,
ts_vs_alt_for_fit.values, 1)
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
if lapse_rate < 5.0:
lapse_rate = 5.0
elif lapse_rate > 10.0:
lapse_rate = 10.0
return ts_vs_alt, lapse_rate
# rc('text', usetex=False)
# rc('text',latex.unicode=False)
glob_str = 'IMS_TD_israeli_10mins*.nc'
file = path_glob(ims_path, glob_str=glob_str)[0]
ds = xr.open_dataset(file)
time_dim = list(set(ds.dims))[0]
# slice to a starting year(1996?):
ds = ds.sel({time_dim: slice('1996', None)})
# years = sorted(list(set(ds[time_dim].dt.year.values)))
# get coords and alts of IMS stations:
T_alts = np.array([ds[x].attrs['station_alt'] for x in ds])
# T_lats = np.array([ds[x].attrs['station_lat'] for x in ds])
# T_lons = np.array([ds[x].attrs['station_lon'] for x in ds])
print('loading IMS_TD of israeli stations 10mins freq..')
# transform to dataframe and add coords data to df:
tdf = ds.to_dataframe()
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
dt = pd.to_datetime(dt)
# prepare the ims coords and temp df(Tloc_df) and the lapse rate:
ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(tdf, dt, T_alts, 'auto')
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(x=ts_vs_alt.index, y=ts_vs_alt.values, color='r',
scatter_kws={'color': 'k'}, ax=ax_lapse)
# suptitle = dt.strftime('%Y-%m-%d %H:%M')
ax_lapse.set_xlabel('Altitude [m]', fontsize=fontsize)
ax_lapse.set_ylabel(r'Temperature [$\degree$C]', fontsize=fontsize)
ax_lapse.text(0.5, 0.95, r'Lapse rate: {:.2f} $\degree$C/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize,
transform=ax_lapse.transAxes, color='k')
ax_lapse.grid()
ax_lapse.tick_params(labelsize=fontsize)
# ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
fig.tight_layout()
filename = 'ims_lapse_rate_example.png'
caption('Temperature vs. altitude for 10 PM in 2013-10-19 for all automated 10 mins IMS stations. The lapse rate is calculated using ordinary least squares linear fit.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax_lapse
def plot_figure_9(hydro_path=hydro_path, gis_path=gis_path, pw_anom=False,
max_flow_thresh=None, wv_name='pw', save=True):
from hydro_procedures import get_hydro_near_GNSS
from hydro_procedures import loop_over_gnss_hydro_and_aggregate
import matplotlib.pyplot as plt
df = get_hydro_near_GNSS(
radius=5,
hydro_path=hydro_path,
gis_path=gis_path,
plot=False)
ds = loop_over_gnss_hydro_and_aggregate(df, pw_anom=pw_anom,
max_flow_thresh=max_flow_thresh,
hydro_path=hydro_path,
work_yuval=work_yuval, ndays=3,
plot=False, plot_all=False)
names = [x for x in ds.data_vars]
fig, ax = plt.subplots(figsize=(10, 6))
for name in names:
ds.mean('station').mean('tide_start')[name].plot.line(
marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days before tide event')
ax.grid()
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x, y, z)
for x, y, z in fmt])
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_xticklabels()]
xlabels = [x.replace('−', '') for x in labels]
ax.set_xticklabels(xlabels)
fig.canvas.draw()
if wv_name == 'pw':
if pw_anom:
ax.set_ylabel('PW anomalies [mm]')
else:
ax.set_ylabel('PW [mm]')
elif wv_name == 'iwv':
if pw_anom:
ax.set_ylabel(r'IWV anomalies [kg$\cdot$m$^{-2}$]')
else:
ax.set_ylabel(r'IWV [kg$\cdot$m$^{-2}$]')
fig.tight_layout()
# if pw_anom:
# title = 'Mean PW anomalies for tide stations near all GNSS stations'
# else:
# title = 'Mean PW for tide stations near all GNSS stations'
# if max_flow_thresh is not None:
# title += ' (max_flow > {} m^3/sec)'.format(max_flow_thresh)
# ax.set_title(title)
if pw_anom:
filename = 'hydro_tide_lag_pw_anom.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
else:
filename = 'hydro_tide_lag_pw.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def produce_table_1(removed=['hrmn', 'nizn', 'spir'], merged={'klhv': ['klhv', 'lhav'],
'mrav': ['gilb', 'mrav']}, add_location=False,
scope='annual', remove_distance=True):
"""for scope='diurnal' use removed=['hrmn'], add_location=True
and remove_distance=False"""
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
sites = group_sites_to_xarray(upper=False, scope=scope)
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
new = sites.T.values.ravel()
if scope == 'annual':
new = [x for x in new.astype(str) if x != 'nan']
df_gnss = df_gnss.reindex(new)
df_gnss['ID'] = df_gnss.index.str.upper()
pd.options.display.float_format = '{:.2f}'.format
df = df_gnss[['name', 'ID', 'lat', 'lon', 'alt', 'distance']]
df['alt'] = df['alt'].map('{:,.0f}'.format)
df['distance'] = df['distance'].astype(int)
cols = ['GNSS Station name', 'Station ID', 'Latitude [N]',
'Longitude [E]', 'Altitude [m a.s.l]', 'Distance from shore [km]']
df.columns = cols
if scope != 'annual':
df.loc['spir', 'GNSS Station name'] = 'Sapir'
if remove_distance:
df = df.iloc[:, 0:-1]
if add_location:
groups = group_sites_to_xarray(upper=False, scope=scope)
coastal = groups.sel(group='coastal').values
coastal = coastal[~pd.isnull(coastal)]
highland = groups.sel(group='highland').values
highland = highland[~pd.isnull(highland)]
eastern = groups.sel(group='eastern').values
eastern = eastern[~pd.isnull(eastern)]
df.loc[coastal, 'Location'] = 'Coastal'
df.loc[highland, 'Location'] = 'Highland'
df.loc[eastern, 'Location'] = 'Eastern'
if removed is not None:
df = df.loc[[x for x in df.index if x not in removed], :]
if merged is not None:
return df
print(df.to_latex(index=False))
return df
def produce_table_stats(thresh=50, add_location=True, add_height=True):
"""add plot sd to height with se_sd errorbars"""
from PW_stations import produce_pw_statistics
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
import xarray as xr
sites = group_sites_to_xarray(upper=False, scope='annual')
new = sites.T.values.ravel()
sites = group_sites_to_xarray(upper=False, scope='annual')
new = [x for x in new.astype(str) if x != 'nan']
pw_mm = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
pw_mm = pw_mm[new]
df = produce_pw_statistics(
thresh=thresh, resample_to_mm=False, pw_input=pw_mm)
if add_location:
cols = [x for x in df.columns]
cols.insert(1, 'Location')
gr_df = sites.to_dataframe('sites')
location = [gr_df[gr_df == x].dropna().index.values.item()[
1].title() for x in new]
df['Location'] = location
df = df[cols]
if add_height:
cols = [x for x in df.columns]
if add_location:
cols.insert(2, 'Height [m a.s.l]')
else:
cols.insert(1, 'Height [m a.s.l]')
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=False)
# pd.options.display.float_format = '{:.2f}'.format
df['Height [m a.s.l]'] = df_gnss['alt'].map('{:.0f}'.format)
df = df[cols]
print(df.to_latex(index=False))
return df
def plot_pwv_longterm_trend(path=work_yuval, model_name='LR', save=True,
fontsize=16, add_era5=True):
import matplotlib.pyplot as plt
from aux_gps import linear_fit_using_scipy_da_ts
# from PW_stations import ML_Switcher
import xarray as xr
from aux_gps import anomalize_xr
"""TSEN and LR for linear fit"""
# load GNSS Israel:
# pw = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50_homogenized.nc')
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_mean = pw_anoms.to_array('station').mean('station')
pw_std = pw_anoms.to_array('station').std('station')
pw_weights = 1 / pw_anoms.to_array('station').count('station')
# add ERA5:
era5 = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
era5_anoms = anomalize_xr(era5, 'MS', verbose=False)
era5_anoms = era5_anoms.sel(time=slice(
pw_mean.time.min(), pw_mean.time.max()))
era5_mean = era5_anoms.to_array('station').mean('station')
era5_std = era5_anoms.to_array('station').std('station')
# init linear models
# ml = ML_Switcher()
# model = ml.pick_model(model_name)
if add_era5:
fig, ax = plt.subplots(2, 1, figsize=(15, 7.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=pw_weights)
pwln = pw_mean.plot(ax=ax[0], color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax[0], color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_lo.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax[0].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[0].grid()
ax[0].set_xlabel('')
ax[0].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[0].tick_params(labelsize=fontsize)
trend1, trend_hi1, trend_lo1, slope1, slope_hi1, slope_lo1 = linear_fit_using_scipy_da_ts(era5_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=era5_std)
era5ln = era5_mean.plot(ax=ax[1], color='k', marker='o', linewidth=1.5)
trendln1 = trend1.plot(ax=ax[1], color='r', linewidth=2)
trend_hi1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_lo1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope1, slope_lo1, slope_hi1)
handles = era5ln+trendln1
labels = ['ERA5-mean']
labels.append(trend_label)
ax[1].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[1].grid()
ax[1].set_xlabel('')
ax[1].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[1].tick_params(labelsize=fontsize)
else:
fig, ax = plt.subplots(1, 1, figsize=(15, 5.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None)
pwln = pw_mean.plot(ax=ax, color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax, color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax, linewidth=1.5)
trend_lo.plot.line('r--', ax=ax, linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax.legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax.grid()
ax.set_xlabel('')
ax.set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
fig.suptitle('PWV mean anomalies and linear trend',
fontweight='bold', fontsize=fontsize)
fig.tight_layout()
if save:
filename = 'pwv_mean_trend_{}.png'.format(model_name)
plt.savefig(savefig_path / filename, orientation='portrait')
return ax
def plot_trend_filled_pwv_and_era5_barh_plot(path=work_yuval):
import xarray as xr
from aux_gps import path_glob
from PW_stations import process_mkt_from_dataset
import pandas as pd
import seaborn as sns
file = sorted(
path_glob(path, 'GNSS_PW_monthly_homogenized_filled_*.nc'))[0]
gnss = xr.load_dataset(path / file)
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5 = era5.sel(time=slice(gnss.time.min(), gnss.time.max()))
era5 = era5[[x for x in era5 if x in gnss]]
df_gnss = process_mkt_from_dataset(
gnss,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_gnss = add_location_to_GNSS_stations_dataframe(df_gnss)
df_gnss['sig'] = df_gnss['p'].astype(float) <= 0.05
df_era5 = process_mkt_from_dataset(
era5,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_era5 = add_location_to_GNSS_stations_dataframe(df_era5)
df_era5['sig'] = df_era5['p'].astype(float) <= 0.05
df = pd.concat([df_gnss, df_era5], keys=['GNSS', 'ERA5'])
df1 = df.unstack(level=0)
df = df1.stack().reset_index()
df.columns = ['station', '', 'p', 'Tau', 'slope', 'intercept', 'CI_5_low',
'CI_5_high', 'Location', 'sig']
sns.barplot(x="slope", y='station', hue='', data=df[df['sig']])
# df['slope'].unstack(level=0).plot(kind='barh', subplots=False, xerr=1)
return df
def produce_filled_pwv_and_era5_mann_kendall_table(path=work_yuval):
import xarray as xr
from aux_gps import path_glob
file = sorted(
path_glob(path, 'GNSS_PW_monthly_homogenized_filled_*.nc'))[0]
gnss = xr.load_dataset(path / file)
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5 = era5.sel(time=slice(gnss.time.min(), gnss.time.max()))
df = add_comparison_to_mann_kendall_table(gnss, era5, 'GNSS', 'ERA5')
print(df.to_latex(header=False, index=False))
return df
def add_comparison_to_mann_kendall_table(ds1, ds2, name1='GNSS', name2='ERA5',
alpha=0.05):
df1 = produce_table_mann_kendall(ds1, alpha=alpha)
df2 = produce_table_mann_kendall(ds2, alpha=alpha)
df = df1['Site ID'].to_frame()
df[name1+'1'] = df1["Kendall's Tau"]
df[name2+'1'] = df2["Kendall's Tau"]
df[name1+'2'] = df1['P-value']
df[name2+'2'] = df2['P-value']
df[name1+'3'] = df1["Sen's slope"]
df[name2+'3'] = df2["Sen's slope"]
df[name1+'4'] = df1["Percent change"]
df[name2+'4'] = df2["Percent change"]
return df
def produce_table_mann_kendall(pwv_ds, alpha=0.05,
sort_by=['groups_annual', 'lat']):
from PW_stations import process_mkt_from_dataset
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import reduce_tail_xr
import xarray as xr
def table_process_df(df, means):
df_sites = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df_sites.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 0]).index
# calculate percent changes from last decade means:
df['CI95'] = '(' + df['CI_95_low'].map('{:.2f}'.format).astype(
str) + ', ' + df['CI_95_high'].map('{:.2f}'.format).astype(str) + ')'
df['means'] = means
df['Pct_change'] = 100 * df['slope'] / df['means']
Pct_high = 100 * df['CI_95_high'] / df['means']
Pct_low = 100 * df['CI_95_low'] / df['means']
df['Pct_change_CI95'] = '(' + Pct_low.map('{:.2f}'.format).astype(
str) + ', ' + Pct_high.map('{:.2f}'.format).astype(str) + ')'
# df['Temperature change'] = df['Percent change'] / 7.0
df.drop(['means', 'CI_95_low', 'CI_95_high'], axis=1, inplace=True)
# station id is big:
df['id'] = df.index.str.upper()
# , 'Temperature change']]
df = df[['id', 'Tau', 'p', 'slope', 'CI95',
'Pct_change', 'Pct_change_CI95']]
# filter for non significant trends:
# df['slope'] = df['slope'][df['p'] < 0.05]
# df['Pct_change'] = df['Pct_change'][df['p'] < 0.05]
# df['CI95'] = df['CI95'][df['p'] < 0.05]
# df['Pct_change_CI95'] = df['Pct_change_CI95'][df['p'] < 0.05]
# higher and better results:
df.loc[:, 'p'][df['p'] < 0.001] = '<0.001'
df['p'][df['p'] != '<0.001'] = df['p'][df['p'] !=
'<0.001'].astype(float).map('{:,.3f}'.format)
df['Tau'] = df['Tau'].map('{:,.3f}'.format)
df['slope'] = df['slope'].map('{:,.2f}'.format)
df['slope'][df['slope'] == 'nan'] = '-'
df.columns = [
'Site ID',
"Kendall's Tau",
'P-value',
"Sen's slope", "Sen's slope CI 95%",
'Percent change', 'Percent change CI 95%'] # , 'Temperature change']
df['Percent change'] = df['Percent change'].map('{:,.1f}'.format)
df['Percent change'] = df[df["Sen's slope"] != '-']['Percent change']
df['Percent change'] = df['Percent change'].fillna('-')
df["Sen's slope CI 95%"] = df["Sen's slope CI 95%"].fillna(' ')
df['Percent change CI 95%'] = df['Percent change CI 95%'].fillna(' ')
df["Sen's slope"] = df["Sen's slope"].astype(
str) + ' ' + df["Sen's slope CI 95%"].astype(str)
df['Percent change'] = df['Percent change'].astype(
str) + ' ' + df['Percent change CI 95%'].astype(str)
df.drop(['Percent change CI 95%', "Sen's slope CI 95%"],
axis=1, inplace=True)
# df['Temperature change'] = df['Temperature change'].map('{:,.1f}'.format)
# df['Temperature change'] = df[df["Sen's slope"] != '-']['Temperature change']
# df['Temperature change'] = df['Temperature change'].fillna('-')
# last, reindex according to geography:
# gr = group_sites_to_xarray(scope='annual')
# new = [x for x in gr.T.values.ravel() if isinstance(x, str)]
new = [x for x in sites if x in df.index]
df = df.reindex(new)
return df
# if load_data == 'pwv-homo':
# print('loading homogenized (RH) pwv dataset.')
# data = xr.load_dataset(work_yuval /
# 'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# elif load_data == 'pwv-orig':
# print('loading original pwv dataset.')
# data = xr.load_dataset(work_yuval /
# 'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
# elif load_data == 'pwv-era5':
# print('loading era5 pwv dataset.')
# data = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
# if pwv_ds is not None:
# print('loading user-input pwv dataset.')
# data = pwv_ds
df = process_mkt_from_dataset(
pwv_ds,
alpha=alpha,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_mean = reduce_tail_xr(pwv_ds, reduce='mean', records=120,
return_df=True)
table = table_process_df(df, df_mean)
# print(table.to_latex(index=False))
return table
def plot_filled_and_unfilled_pwv_monthly_anomalies(pw_da, anomalize=True,
max_gap=6,
method='cubic',
ax=None):
from aux_gps import anomalize_xr
import matplotlib.pyplot as plt
import numpy as np
if anomalize:
pw_da = anomalize_xr(pw_da, 'MS')
max_gap_td = np.timedelta64(max_gap, 'M')
filled = pw_da.interpolate_na('time', method=method, max_gap=max_gap_td)
if ax is None:
fig, ax = plt.subplots(figsize=(15, 5))
filledln = filled.plot.line('b-', ax=ax)
origln = pw_da.plot.line('r-', ax=ax)
ax.legend(origln + filledln,
['original time series',
'filled using {} interpolation with max gap of {} months'.format(method,
max_gap)])
ax.grid()
ax.set_xlabel('')
ax.set_ylabel('PWV [mm]')
ax.set_title('PWV station {}'.format(pw_da.name.upper()))
return ax
def plot_pwv_statistic_vs_height(pwv_ds, stat='mean', x='alt', season=None,
ax=None, color='b'):
from PW_stations import produce_geo_gnss_solved_stations
import matplotlib.pyplot as plt
from aux_gps import calculate_std_error
import pandas as pd
if season is not None:
print('{} season selected'.format(season))
pwv_ds = pwv_ds.sel(time=pwv_ds['time.season'] == season)
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
if stat == 'mean':
pw_stat = pwv_ds.mean()
pw_stat_error = pwv_ds.map(calculate_std_error, statistic=stat)
elif stat == 'std':
pw_stat = pwv_ds.std()
pw_stat_error = pwv_ds.map(calculate_std_error, statistic=stat)
df[stat] = pd.Series(
pw_stat.to_array(
dim='gnss'),
index=pw_stat.to_array('gnss')['gnss'])
df['{}_error'.format(stat)] = pd.Series(pw_stat_error.to_array(
dim='gnss'), index=pw_stat_error.to_array('gnss')['gnss'])
if ax is None:
fig, ax = plt.subplots()
if x == 'alt':
ax.set_xlabel('Altitude [m a.s.l]')
elif x == 'distance':
ax.set_xlabel('Distance to sea shore [km]')
ax.set_ylabel('{} [mm]'.format(stat))
ax.errorbar(df[x],
df[stat],
df['{}_error'.format(stat)],
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color=color)
if season is not None:
ax.set_title('{} season'.format(season))
ax.grid()
return ax
def add_location_to_GNSS_stations_dataframe(df, scope='annual'):
import pandas as pd
# load location data:
gr = group_sites_to_xarray(scope=scope)
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
return df
def plot_peak_amplitude_altitude_long_term_pwv(path=work_yuval, era5=False,
add_a1a2=True, save=True, fontsize=16):
import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from fitting_routines import fit_poly_model_xr
from aux_gps import remove_suffix_from_ds
from PW_stations import produce_geo_gnss_solved_stations
# load alt data, distance etc.,
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
df_geo = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
if era5:
dss = xr.load_dataset(path / 'GNSS_PW_ERA5_harmonics_annual.nc')
else:
dss = xr.load_dataset(path / 'GNSS_PW_harmonics_annual.nc')
dss = dss[[x for x in dss if '_params' in x]]
dss = remove_suffix_from_ds(dss)
df = dss.sel(cpy=1, params='ampl').reset_coords(drop=True).to_dataframe().T
df.columns = ['A1', 'A1std']
df = df.join(dss.sel(cpy=2, params='ampl').reset_coords(drop=True).to_dataframe().T)
# abs bc sometimes the fit get a sine amp negative:
df = np.abs(df)
df.columns =['A1', 'A1std', 'A2', 'A2std']
df['A2A1'] = df['A2'] / df['A1']
a2a1std = np.sqrt((df['A2std']/df['A1'])**2 + (df['A2']*df['A1std']/df['A1']**2)**2)
df['A2A1std'] = a2a1std
# load location data:
gr = group_sites_to_xarray(scope='annual')
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
df['alt'] = df_geo['alt']
df = df.set_index('alt')
df = df.sort_index()
cdict = produce_colors_for_pwv_station(scope='annual', as_cat_dict=True)
cdict = dict(zip([x.capitalize() for x in cdict.keys()], cdict.values()))
if add_a1a2:
fig, axes=plt.subplots(2, 1, sharex=False, figsize=(8, 12))
ax = axes[0]
else:
ax = None
# colors=produce_colors_for_pwv_station(scope='annual')
ax = sns.scatterplot(data=df, y='A1', x='alt', hue='Location',
palette=cdict, ax=ax, s=100, zorder=20)
# ax.legend(prop={'size': fontsize})
x_coords = []
y_coords = []
colors = []
for point_pair in ax.collections:
colors.append(point_pair.get_facecolor())
for x, y in point_pair.get_offsets():
x_coords.append(x)
y_coords.append(y)
ax.errorbar(x_coords, y_coords,
yerr=df['A1std'].values, ecolor=colors[0][:,0:-1],
ls='', capsize=None, fmt=" ")#, zorder=-1)
# linear fit:
x = df.index.values
y = df['A1'].values
p = fit_poly_model_xr(x, y, 1, plot=None, ax=None, return_just_p=True)
fit_label = r'Fitted line, slope: {:.2f} mm$\cdot$km$^{{-1}}$'.format(p[0] * -1000)
fit_poly_model_xr(x,y,1,plot='manual', ax=ax, fit_label=fit_label)
ax.set_ylabel('PWV annual amplitude [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_yticks(np.arange(1, 6, 1))
if add_a1a2:
ax.set_xlabel('')
else:
ax.set_xlabel('GNSS station height [m a.s.l]')
ax.grid(True)
ax.legend(prop={'size': fontsize-3})
if add_a1a2:
# convert to percent:
df['A2A1'] = df['A2A1'].mul(100)
df['A2A1std'] = df['A2A1std'].mul(100)
ax = sns.scatterplot(data=df, y='A2A1', x='alt',
hue='Location', ax=axes[1],
legend=True, palette=cdict,
s=100, zorder=20)
x_coords = []
y_coords = []
colors = []
# ax.legend(prop={'size':fontsize+4}, fontsize=fontsize)
for point_pair in ax.collections:
colors.append(point_pair.get_facecolor())
for x, y in point_pair.get_offsets():
x_coords.append(x)
y_coords.append(y)
ax.errorbar(x_coords, y_coords,
yerr=df['A2A1std'].values, ecolor=colors[0][:,0:-1],
ls='', capsize=None, fmt=" ")#, zorder=-1)
df_upper = df.iloc[9:]
y = df_upper['A2A1'].values
x = df_upper.index.values
p = fit_poly_model_xr(x, y, 1, return_just_p=True)
fit_label = r'Fitted line, slope: {:.1f} %$\cdot$km$^{{-1}}$'.format(p[0] * 1000)
p = fit_poly_model_xr(x, y, 1, plot='manual', ax=ax,
return_just_p=False, color='r',
fit_label=fit_label)
df_lower = df.iloc[:11]
mean = df_lower['A2A1'].mean()
std = df_lower['A2A1'].std()
stderr = std / np.sqrt(len(df_lower))
ci = 1.96 * stderr
ax.hlines(xmin=df_lower.index.min(), xmax=df_lower.index.max(), y=mean,
color='k', label='Mean ratio: {:.1f} %'.format(mean))
ax.fill_between(df_lower.index.values, mean + ci, mean - ci, color="#b9cfe7", edgecolor=None, alpha=0.6)
# y = df_lower['A2A1'].values
# x = df_lower.index.values
# p = fit_poly_model_xr(x, y, 1, return_just_p=True)
# fit_label = 'Linear Fit intercept: {:.2f} %'.format(p[1])
# p = fit_poly_model_xr(x, y, 1, plot='manual', ax=ax,
# return_just_p=False, color='k',
# fit_label=fit_label)
# arrange the legend a bit:
handles, labels = ax.get_legend_handles_labels()
h_stns = handles[1:4]
l_stns = labels[1:4]
h_fits = [handles[0] , handles[-1]]
l_fits = [labels[0], labels[-1]]
ax.legend(handles=h_fits+h_stns, labels=l_fits+l_stns, loc='upper left', prop={'size':fontsize-3})
ax.set_ylabel('PWV semi-annual to annual amplitude ratio [%]', fontsize=fontsize)
ax.set_xlabel('GNSS station height [m a.s.l]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.grid(True)
ax.set_yticks(np.arange(0, 100, 20))
fig.tight_layout()
if save:
filename = 'pwv_peak_amplitude_altitude.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_peak_hour_distance(path=work_yuval, season='JJA',
remove_station='dsea', fontsize=22, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import groupby_half_hour_xr
from aux_gps import xr_reindex_with_date_range
import xarray as xr
import pandas as pd
import seaborn as sns
import numpy as np
from sklearn.metrics import r2_score
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw.load()
pw = pw.sel(time=pw['time.season'] == season)
pw = pw.map(xr_reindex_with_date_range)
df = groupby_half_hour_xr(pw)
halfs = [df.isel(half_hour=x)['half_hour'] for x in df.argmax().values()]
names = [x for x in df]
dfh = pd.DataFrame(halfs, index=names)
geo = produce_geo_gnss_solved_stations(
add_distance_to_coast=True, plot=False)
geo['phase'] = dfh
geo = geo.dropna()
groups = group_sites_to_xarray(upper=False, scope='diurnal')
geo.loc[groups.sel(group='coastal').values, 'group'] = 'coastal'
geo.loc[groups.sel(group='highland').values, 'group'] = 'highland'
geo.loc[groups.sel(group='eastern').values, 'group'] = 'eastern'
fig, ax = plt.subplots(figsize=(14, 10))
ax.grid()
if remove_station is not None:
removed = geo.loc[remove_station].to_frame().T
geo = geo.drop(remove_station, axis=0)
# lnall = sns.scatterplot(data=geo.loc[only], x='distance', y='phase', ax=ax, hue='group', s=100)
# geo['phase'] = pd.to_timedelta(geo['phase'], unit='H')
coast = geo[geo['group'] == 'coastal']
yerr = 1.0
lncoast = ax.errorbar(x=coast.loc[:,
'distance'],
y=coast.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='b')
# lncoast = ax.scatter(coast.loc[:, 'distance'], coast.loc[:, 'phase'], color='b', s=50)
highland = geo[geo['group'] == 'highland']
# lnhighland = ax.scatter(highland.loc[:, 'distance'], highland.loc[:, 'phase'], color='brown', s=50)
lnhighland = ax.errorbar(x=highland.loc[:,
'distance'],
y=highland.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='brown')
eastern = geo[geo['group'] == 'eastern']
# lneastern = ax.scatter(eastern.loc[:, 'distance'], eastern.loc[:, 'phase'], color='green', s=50)
lneastern = ax.errorbar(x=eastern.loc[:,
'distance'],
y=eastern.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='green')
lnremove = ax.scatter(
removed.loc[:, 'distance'], removed.loc[:, 'phase'], marker='x', color='k', s=50)
ax.legend([lncoast,
lnhighland,
lneastern,
lnremove],
['Coastal stations',
'Highland stations',
'Eastern stations',
'DSEA station'],
fontsize=fontsize)
params = np.polyfit(geo['distance'].values, geo.phase.values, 1)
params2 = np.polyfit(geo['distance'].values, geo.phase.values, 2)
x = np.linspace(0, 210, 100)
y = np.polyval(params, x)
y2 = np.polyval(params2, x)
r2 = r2_score(geo.phase.values, np.polyval(params, geo['distance'].values))
ax.plot(x, y, color='k')
textstr = '\n'.join([r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.5, 0.95, textstr, transform=ax.transAxes, fontsize=fontsize,
verticalalignment='top', bbox=props)
# ax.plot(x,y2, color='green')
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel('Distance from shore [km]', fontsize=fontsize)
ax.set_ylabel('Peak hour [UTC]', fontsize=fontsize)
# add sunrise UTC hour
ax.axhline(16.66, color='tab:orange', linewidth=2)
# change yticks to hours minuets:
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_yticklabels()]
labels = [pd.to_timedelta(float(x), unit='H') for x in labels]
labels = ['{}:{}'.format(x.components[1], x.components[2])
if x.components[2] != 0 else '{}:00'.format(x.components[1]) for x in labels]
ax.set_yticklabels(labels)
fig.canvas.draw()
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if save:
filename = 'pw_peak_distance_shore.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_monthly_variability_heatmap_from_pwv_anomalies(load_path=work_yuval,
thresh=50, save=True,
fontsize=16,
sort_by=['groups_annual', 'alt']):
"""sort_by=['group_annual', 'lat'], ascending=[1,0]"""
import xarray as xr
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from calendar import month_abbr
from PW_stations import produce_geo_gnss_solved_stations
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 1]).index
# anoms = xr.load_dataset(
# load_path /
# 'GNSS_PW_monthly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
anoms = xr.load_dataset(
load_path /
'GNSS_PW_monthly_anoms_thresh_{:.0f}.nc'.format(thresh))
df = anoms.groupby('time.month').std().to_dataframe()
# sites = group_sites_to_xarray(upper=True, scope='annual').T
# sites_flat = [x.lower() for x in sites.values.flatten() if isinstance(x, str)]
# df = df[sites_flat]
# cols = [x for x in sites if x in df.columns]
df = df[sites]
df.columns = [x.upper() for x in df.columns]
fig = plt.figure(figsize=(14, 10))
grid = plt.GridSpec(
2, 1, height_ratios=[
2, 1], hspace=0)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
cbar_ax = fig.add_axes([0.91, 0.37, 0.02, 0.62]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
cmap='Reds',
vmin=df.min().min(),
vmax=df.max().max(),
annot=True,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': 'PWV anomalies STD [mm]'},
annot_kws={'fontsize': fontsize}, xticklabels=False)
cbar_ax.set_ylabel('PWV anomalies STD [mm]', fontsize=fontsize)
cbar_ax.tick_params(labelsize=fontsize)
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(
bottom='off',
labelbottom='off',
labelsize=fontsize)
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=fontsize)
df_mean = df.T.mean()
df_mean = df_mean.to_frame()
df_mean[1] = [month_abbr[x] for x in range(1, 13)]
df_mean.columns = ['std', 'month']
g = sns.barplot(data=df_mean, x='month', y='std', ax=ax_group, palette='Reds',
hue='std', dodge=False, linewidth=2.5)
g.legend_.remove()
ax_group.set_ylabel('PWV anomalies STD [mm]', fontsize=fontsize)
ax_group.grid(color='k', linestyle='--',
linewidth=1.5, alpha=0.5, axis='y')
ax_group.xaxis.set_tick_params(labelsize=fontsize)
ax_group.yaxis.set_tick_params(labelsize=fontsize)
ax_group.set_xlabel('', fontsize=fontsize)
# df.T.mean().plot(ax=ax_group, kind='bar', color='k', fontsize=fontsize, rot=0)
fig.tight_layout()
fig.subplots_adjust(right=0.906)
if save:
filename = 'pw_anoms_monthly_variability_heatmap.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_monthly_means_anomalies_with_station_mean(load_path=work_yuval,
thresh=50, save=True,
anoms=None, agg='mean',
fontsize=16, units=None,
remove_stations=['nizn', 'spir'],
sort_by=['groups_annual', 'lat']):
import xarray as xr
import seaborn as sns
from palettable.scientific import diverging as divsci
import numpy as np
import matplotlib.dates as mdates
import pandas as pd
from aux_gps import anomalize_xr
from PW_stations import produce_geo_gnss_solved_stations
sns.set_style('whitegrid')
sns.set_style('ticks')
div_cmap = divsci.Vik_20.mpl_colormap
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 0]).index
if anoms is None:
# anoms = xr.load_dataset(
# load_path /
# 'GNSS_PW_monthly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
anoms = xr.load_dataset(
load_path /
'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
anoms = anomalize_xr(anoms, 'MS', units=units)
if remove_stations is not None:
anoms = anoms[[x for x in anoms if x not in remove_stations]]
df = anoms.to_dataframe()[:'2019']
# sites = group_sites_to_xarray(upper=True, scope='annual').T
# sites_flat = [x.lower() for x in sites.values.flatten() if isinstance(x, str)]
# df = df[sites_flat]
cols = [x for x in sites if x in df.columns]
df = df[cols]
df.columns = [x.upper() for x in df.columns]
weights = df.count(axis=1).shift(periods=-1, freq='15D').astype(int)
fig = plt.figure(figsize=(20, 10))
grid = plt.GridSpec(
2, 1, height_ratios=[
2, 1], hspace=0.0225)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
cbar_ax = fig.add_axes([0.95, 0.43, 0.0125, 0.45]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
center=0.0,
cmap=div_cmap,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': 'PWV anomalies [mm]'}, xticklabels=False)
cbar_ax.set_ylabel('PWV anomalies [mm]', fontsize=fontsize-4)
cbar_ax.tick_params(labelsize=fontsize)
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(
bottom='off', labelbottom='off', labelsize=fontsize)
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=fontsize)
ax_heat.set_xlabel('')
if agg == 'mean':
ts = df.T.mean().shift(periods=-1, freq='15D')
elif agg == 'median':
ts = df.T.median().shift(periods=-1, freq='15D')
ts.index.name = ''
# dt_as_int = [x for x in range(len(ts.index))]
# xticks_labels = ts.index.strftime('%Y-%m').values[::6]
# xticks = dt_as_int[::6]
# xticks = ts.index
# ts.index = dt_as_int
ts.plot(ax=ax_group, color='k', fontsize=fontsize, lw=2)
barax = ax_group.twinx()
barax.bar(ts.index, weights.values, width=35, color='k', alpha=0.2)
barax.yaxis.set_major_locator(ticker.MaxNLocator(6))
barax.set_ylabel('Stations [#]', fontsize=fontsize-4)
barax.tick_params(labelsize=fontsize)
ax_group.set_xlim(ts.index.min(), ts.index.max() +
pd.Timedelta(15, unit='D'))
ax_group.set_ylabel('PWV {} anomalies [mm]'.format(agg), fontsize=fontsize-4)
# set ticks and align with heatmap axis (move by 0.5):
# ax_group.set_xticks(dt_as_int)
# offset = 1
# ax_group.xaxis.set(ticks=np.arange(offset / 2.,
# max(dt_as_int) + 1 - min(dt_as_int),
# offset),
# ticklabels=dt_as_int)
# move the lines also by 0.5 to align with heatmap:
# lines = ax_group.lines # get the lines
# [x.set_xdata(x.get_xdata() - min(dt_as_int) + 0.5) for x in lines]
# ax_group.xaxis.set(ticks=xticks, ticklabels=xticks_labels)
# ax_group.xaxis.set(ticks=xticks)
years_fmt = mdates.DateFormatter('%Y')
ax_group.xaxis.set_major_locator(mdates.YearLocator())
ax_group.xaxis.set_major_formatter(years_fmt)
ax_group.xaxis.set_minor_locator(mdates.MonthLocator())
# ax_group.xaxis.tick_top()
# ax_group.xaxis.set_ticks_position('both')
# ax_group.tick_params(axis='x', labeltop='off', top='on',
# bottom='on', labelbottom='on')
ax_group.grid()
# ax_group.axvline('2015-09-15')
# ax_group.axhline(2.5)
# plt.setp(ax_group.xaxis.get_majorticklabels(), rotation=45 )
fig.tight_layout()
fig.subplots_adjust(right=0.946)
if save:
filename = 'pw_monthly_means_anomaly_heatmap.png'
plt.savefig(savefig_path / filename, bbox_inches='tight', pad_inches=0.1)
return ts
def plot_grp_anomlay_heatmap(load_path=work_yuval, gis_path=gis_path,
thresh=50, grp='hour', remove_grp=None, season=None,
n_clusters=4, save=True, title=False):
import xarray as xr
import seaborn as sns
import numpy as np
from PW_stations import group_anoms_and_cluster
from aux_gps import geo_annotate
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.colors import ListedColormap
from palettable.scientific import diverging as divsci
from PW_stations import produce_geo_gnss_solved_stations
div_cmap = divsci.Vik_20.mpl_colormap
dem_path = load_path / 'AW3D30'
def weighted_average(grp_df, weights_col='weights'):
return grp_df._get_numeric_data().multiply(
grp_df[weights_col], axis=0).sum() / grp_df[weights_col].sum()
df, labels_sorted, weights = group_anoms_and_cluster(
load_path=load_path, thresh=thresh, grp=grp, season=season,
n_clusters=n_clusters, remove_grp=remove_grp)
# create figure and subplots axes:
fig = plt.figure(figsize=(15, 10))
if title:
if season is not None:
fig.suptitle(
'Precipitable water {}ly anomalies analysis for {} season'.format(grp, season))
else:
fig.suptitle('Precipitable water {}ly anomalies analysis (Weighted KMeans {} clusters)'.format(
grp, n_clusters))
grid = plt.GridSpec(
2, 2, width_ratios=[
3, 2], height_ratios=[
4, 1], wspace=0.1, hspace=0)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
ax_map = fig.add_subplot(grid[0:, 1]) # plt.subplot(122)
# get the camp and zip it to groups and produce dictionary:
cmap = plt.get_cmap("Accent")
cmap = qualitative_cmap(n_clusters)
# cmap = plt.get_cmap("Set2_r")
# cmap = ListedColormap(cmap.colors[::-1])
groups = list(set(labels_sorted.values()))
palette = dict(zip(groups, [cmap(x) for x in range(len(groups))]))
label_cmap_dict = dict(zip(labels_sorted.keys(),
[palette[x] for x in labels_sorted.values()]))
cm = ListedColormap([x for x in palette.values()])
# plot heatmap and colorbar:
cbar_ax = fig.add_axes([0.57, 0.24, 0.01, 0.69]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
center=0.0,
cmap=div_cmap,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': '[mm]'})
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(top='on', labeltop='on')
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=10)
# paint ytick labels with categorical cmap:
boxes = [dict(facecolor=x, boxstyle="square,pad=0.7", alpha=0.6)
for x in label_cmap_dict.values()]
ylabels = [x for x in ax_heat.yaxis.get_ticklabels()]
for label, box in zip(ylabels, boxes):
label.set_bbox(box)
# rotate xtick_labels:
# ax_heat.set_xticklabels(ax_heat.get_xticklabels(), rotation=0,
# fontsize=10)
# plot summed groups (with weights):
df_groups = df.T
df_groups['groups'] = pd.Series(labels_sorted)
df_groups['weights'] = weights
df_groups = df_groups.groupby('groups').apply(weighted_average)
df_groups.drop(['groups', 'weights'], axis=1, inplace=True)
df_groups.T.plot(ax=ax_group, linewidth=2.0, legend=False, cmap=cm)
if grp == 'hour':
ax_group.set_xlabel('hour (UTC)')
ax_group.grid()
group_limit = ax_heat.get_xlim()
ax_group.set_xlim(group_limit)
ax_group.set_ylabel('[mm]')
# set ticks and align with heatmap axis (move by 0.5):
ax_group.set_xticks(df.index.values)
offset = 1
ax_group.xaxis.set(ticks=np.arange(offset / 2.,
max(df.index.values) + 1 -
min(df.index.values),
offset),
ticklabels=df.index.values)
# move the lines also by 0.5 to align with heatmap:
lines = ax_group.lines # get the lines
[x.set_xdata(x.get_xdata() - min(df.index.values) + 0.5) for x in lines]
# plot israel map:
ax_map = plot_israel_map(gis_path=gis_path, ax=ax_map)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
im = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = fig.colorbar(im, ax=ax_map, **cbar_kwargs)
# cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
print('getting solved GNSS israeli stations metadata...')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
gps.index = gps.index.str.upper()
gps = gps.loc[[x for x in df.columns], :]
gps['group'] = pd.Series(labels_sorted)
gps.plot(ax=ax_map, column='group', categorical=True, marker='o',
edgecolor='black', cmap=cm, s=100, legend=True, alpha=1.0,
legend_kwds={'prop': {'size': 10}, 'fontsize': 14,
'loc': 'upper left', 'title': 'clusters'})
# ax_map.set_title('Groupings of {}ly anomalies'.format(grp))
# annotate station names in map:
geo_annotate(ax_map, gps.lon, gps.lat,
gps.index, xytext=(6, 6), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
# plt.legend(['IMS stations', 'GNSS stations'],
# prop={'size': 10}, bbox_to_anchor=(-0.15, 1.0),
# title='Stations')
# plt.legend(prop={'size': 10}, loc='upper left')
# plt.tight_layout()
plt.subplots_adjust(top=0.92,
bottom=0.065,
left=0.065,
right=0.915,
hspace=0.19,
wspace=0.215)
filename = 'pw_{}ly_anoms_{}_clusters_with_map.png'.format(grp, n_clusters)
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return df
def plot_lomb_scargle(path=work_yuval, save=True):
from aux_gps import lomb_scargle_xr
import xarray as xr
pw_mm = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50_homogenized.nc')
pw_mm_median = pw_mm.to_array('station').median('station')
da = lomb_scargle_xr(
pw_mm_median.dropna('time'),
user_freq='MS',
kwargs={
'nyquist_factor': 1,
'samples_per_peak': 100})
plt.ylabel('')
plt.title('Lomb–Scargle periodogram')
plt.xlim([0, 4])
plt.grid()
filename = 'Lomb_scargle_monthly_means.png'
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return da
def plot_vertical_climatology_months(path=sound_path, field='Rho_wv',
center_month=7):
from aux_gps import path_glob
import xarray as xr
ds = xr.open_dataset(
path /
'bet_dagan_phys_sounding_height_2007-2019.nc')[field]
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
day = ds.sel(sound_time=ds['sound_time.hour'] == 12).groupby(
'sound_time.month').mean('sound_time')
night = ds.sel(sound_time=ds['sound_time.hour'] == 00).groupby(
'sound_time.month').mean('sound_time')
next_month = center_month + 1
last_month = center_month - 1
day = day.sel(month=[last_month, center_month, next_month])
night = night.sel(month=[last_month, center_month, next_month])
for month in day.month:
h = day.sel(month=month)['H-Msl'].values
rh = day.sel(month=month).values
ax[0].semilogy(rh, h)
ax[0].set_title('noon')
ax[0].set_ylabel('height [m]')
ax[0].set_xlabel('{}, [{}]'.format(field, day.attrs['units']))
plt.legend([x for x in ax.lines], [x for x in day.month.values])
for month in night.month:
h = night.sel(month=month)['H-Msl'].values
rh = night.sel(month=month).values
ax[1].semilogy(rh, h)
ax[1].set_title('midnight')
ax[1].set_ylabel('height [m]')
ax[1].set_xlabel('{}, [{}]'.format(field, night.attrs['units']))
plt.legend([x for x in ax.lines], [x for x in night.month.values])
return day, night
def plot_global_warming_with_pwv_annual(climate_path=climate_path, work_path=work_yuval, fontsize=16):
import pandas as pd
import xarray as xr
import numpy as np
from aux_gps import anomalize_xr
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
sns.set_style('ticks')
df = pd.read_csv(climate_path/'GLB.Ts+dSST_2007.csv',
header=1, na_values='*******')
df = df.iloc[:19, :13]
df = df.melt(id_vars='Year')
df['time'] = pd.to_datetime(df['Year'].astype(
str)+'-'+df['variable'].astype(str))
df = df.set_index('time')
df = df.drop(['Year', 'variable'], axis=1)
df.columns = ['T']
df['T'] = pd.to_numeric(df['T'])
df = df.sort_index()
df.columns = ['AIRS-ST-Global']
# df = df.loc['2003':'2019']
# df = df.resample('AS').mean()
dss = xr.open_dataset(climate_path/'AIRS.2002-2021.L3.RetStd_IR031.v7.0.3.0.nc')
dss = dss.sel(time=slice('2003','2019'), Longitude=slice(34,36), Latitude=slice(34,29))
ds = xr.concat([dss['SurfAirTemp_A'], dss['SurfAirTemp_D']], 'dn')
ds['dn'] = ['day', 'night']
ds = ds.mean('dn')
ds -= ds.sel(time=slice('2007','2016')).mean('time')
anoms = anomalize_xr(ds, 'MS')
anoms = anoms.mean('Latitude').mean('Longitude')
df['AIRS-ST-Regional'] = anoms.to_dataframe('AIRS-ST-Regional')
# else:
# df = pd.read_csv(climate_path/'GLB.Ts+dSST.csv',
# header=1, na_values='***')
# df = df.iloc[:, :13]
# df = df.melt(id_vars='Year')
# df['time'] = pd.to_datetime(df['Year'].astype(
# str)+'-'+df['variable'].astype(str))
# df = df.set_index('time')
# df = df.drop(['Year', 'variable'], axis=1)
# df.columns = ['T']
# # df = df.resample('AS').mean()
# df = df.sort_index()
pw = xr.load_dataset(work_path/'GNSS_PW_monthly_anoms_thresh_50.nc')
# pw_2007_2016_mean = pw.sel(time=slice('2007','2016')).mean()
# pw -= pw_2007_2016_mean
pw = pw.to_array('s').mean('s')
pw_df = pw.to_dataframe('PWV')
# df['pwv'] = pw_df.resample('AS').mean()
df['PWV'] = pw_df
df = df.loc['2003': '2019']
df = df.resample('AS').mean()
fig, ax = plt.subplots(figsize=(15, 6))
ax = df.plot(kind='bar', secondary_y='PWV',
color=['tab:red', 'tab:orange', 'tab:blue'],
ax=ax, legend=False, rot=45)
twin = get_twin(ax, 'x')
align_yaxis_np(ax, twin)
# twin.set_yticks([-0.5, 0, 0.5, 1.0, 1.5])
# locator = ticker.MaxNLocator(6)
# ax.yaxis.set_major_locator(locator)
twin.yaxis.set_major_locator(ticker.MaxNLocator(6))
twin.set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
ax.set_ylabel(r'Surface Temperature anomalies [$\degree$C]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
twin.tick_params(labelsize=fontsize)
ax.set_xticklabels(np.arange(2003, 2020))
ax.grid(True)
# add legend:
handles, labels = [], []
for h, l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
for h, l in zip(*twin.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax.legend(handles, labels, prop={'size': fontsize-2}, loc='upper left')
ax.set_xlabel('')
fig.tight_layout()
return df
def plot_SST_med(sst_path=work_yuval/'SST', fontsize=16, loop=True):
import xarray as xr
import seaborn as sns
from aux_gps import lat_mean
import numpy as np
def clim_mean(med_sst):
sst = med_sst - 273.15
mean_sst = sst.mean('lon')
mean_sst = lat_mean(mean_sst)
mean_sst = mean_sst.groupby('time.dayofyear').mean()
return mean_sst
sns.set_style('whitegrid')
sns.set_style('ticks')
ds = xr.open_dataset(
sst_path/'med1-1981_2020-NCEI-L4_GHRSST-SSTblend-AVHRR_OI-GLOB-v02.0-fv02.0.nc')
sst = ds['analysed_sst'].sel(time=slice('1997', '2019')).load()
whole_med_lon = [-5, 37]
whole_med_lat = [30, 40]
sst_w = sst.copy().sel(lat=slice(*whole_med_lat), lon=slice(*whole_med_lon))
sst_clim_w = clim_mean(sst_w)
df = sst_clim_w.to_dataframe('SST_whole_Med')
# now for emed:
for i, min_lon in enumerate(np.arange(23, 34, 1)):
e_med_lon = [min_lon, 37]
e_med_lat = [30, 40]
sst_e = sst.copy().sel(lat=slice(*e_med_lat), lon=slice(*e_med_lon))
sst_clim_e = clim_mean(sst_e)
df['SST_EMed_{}'.format(min_lon)] = sst_clim_e.to_dataframe()
# df['SST_EMed'] = sst_clim_e.to_dataframe()
if loop:
ax = df.idxmax().plot(kind='barh')
ax.set_xticks(np.linspace(0, 365, 13)[:-1])
ax.set_xticklabels(np.arange(1, 13))
ax.grid(True)
ax.set_xlabel('month')
else:
ax = df.plot(lw=2, legend=True)
ax.set_xticks(np.linspace(0, 365, 13)[:-1])
ax.set_xticklabels(np.arange(1, 13))
ax.grid(True)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(r'Temperature [$^{\circ}$C]', fontsize=fontsize)
ax.set_xlabel('month')
return df
def plot_SST_med_with_PWV_S1_panel(path=work_yuval,
sst_path=work_yuval/'SST',
ims_path=ims_path,
stations=['tela', 'jslm'], fontsize=16, save=True):
from ims_procedures import gnss_ims_dict
import matplotlib.pyplot as plt
ims_stations = [gnss_ims_dict.get(x) for x in stations]
fig, axes = plt.subplots(1, len(stations), figsize=(15, 6))
for i, (pwv, ims) in enumerate(zip(stations, ims_stations)):
plot_SST_med_with_PWV_first_annual_harmonic(path=work_yuval,
sst_path=sst_path,
ims_path=ims_path,
station=pwv, ims_station=ims,
fontsize=16, ax=axes[i],
save=False)
twin = get_twin(axes[i], 'x')
twin.set_ylim(-4.5, 4.5)
axes[i].set_ylim(8, 30)
fig.tight_layout()
if save:
filename = 'Med_SST_surface_temp_PWV_harmonic_annual_{}_{}.png'.format(
*stations)
plt.savefig(savefig_path / filename, orientation='portrait')
return
def plot_SST_med_with_PWV_first_annual_harmonic(path=work_yuval,
sst_path=work_yuval/'SST',
ims_path=ims_path,
station='tela', ims_station='TEL-AVIV-COAST',
fontsize=16, ax=None,
save=True):
import xarray as xr
from aux_gps import month_to_doy_dict
import pandas as pd
import numpy as np
from aux_gps import lat_mean
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
sns.set_style('ticks')
# load harmonics:
ds = xr.load_dataset(path/'GNSS_PW_harmonics_annual.nc')
# stns = group_sites_to_xarray(scope='annual').sel(group='coastal').values
# harms = []
# for stn in stns:
# da = ds['{}_mean'.format(stn)].sel(cpy=1)
# harms.append(da)
# harm_da = xr.concat(harms, 'station')
# harm_da['station'] = stns
harm_da = ds['{}_mean'.format(station)].sel(cpy=1).reset_coords(drop=True)
# harm_da = harm_da.reset_coords(drop=True)
harm_da['month'] = [month_to_doy_dict.get(
x) for x in harm_da['month'].values]
harm_da = harm_da.rename({'month': 'dayofyear'})
# df = harm_da.to_dataset('station').to_dataframe()
df = harm_da.to_dataframe(station)
# load surface temperature data:
# da = xr.open_dataset(ims_path/'GNSS_5mins_TD_ALL_1996_2020.nc')[station]
da = xr.open_dataset(ims_path / 'IMS_TD_israeli_10mins.nc')[ims_station]
da.load()
print(da.groupby('time.year').count())
# da += 273.15
da_mean = da.groupby('time.dayofyear').mean()
df['{}_ST'.format(station)] = da_mean.to_dataframe()
# add 366 dayofyear for visualization:
df366 = pd.DataFrame(df.iloc[0].values+0.01).T
df366.index = [366]
df366.columns = df.columns
df = df.append(df366)
ind = np.arange(1, 367)
df = df.reindex(ind)
df = df.interpolate('cubic')
# now load sst for MED
ds = xr.open_dataset(
sst_path/'med1-1981_2020-NCEI-L4_GHRSST-SSTblend-AVHRR_OI-GLOB-v02.0-fv02.0.nc')
sst = ds['analysed_sst'].sel(time=slice('1997', '2019')).load()
# sst_mean = sst.sel(lon=slice(25,35)).mean('lon')
sst -= 273.15
sst_mean = sst.mean('lon')
sst_mean = lat_mean(sst_mean)
sst_clim = sst_mean.groupby('time.dayofyear').mean()
df['Med-SST'] = sst_clim.to_dataframe()
pwv_name = '{} PWV-S1'.format(station.upper())
ims_name = '{} IMS-ST'.format(station.upper())
df.columns = [pwv_name, ims_name, 'Med-SST']
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
# first plot temp:
df[[ims_name, 'Med-SST']].plot(ax=ax, color=['tab:red', 'tab:blue'],
style=['-', '-'], lw=2, legend=False)
ax.set_xticks(np.linspace(0, 365, 13)[:-1])
ax.set_xticklabels(np.arange(1, 13))
ax.grid(True)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(r'Temperature [$^{\circ}$C]', fontsize=fontsize)
vl = df[[ims_name, 'Med-SST']].idxmax().to_frame('x')
vl['colors'] = ['tab:red', 'tab:blue']
vl['ymin'] = df[[ims_name, 'Med-SST']].min()
vl['ymax'] = df[[ims_name, 'Med-SST']].max()
print(vl)
ax.vlines(x=vl['x'], ymin=vl['ymin'], ymax=vl['ymax'],
colors=vl['colors'], zorder=0)
ax.plot(vl.iloc[0]['x'], vl.iloc[0]['ymax'], color=vl.iloc[0]['colors'],
linewidth=0, marker='o', zorder=15)
ax.plot(vl.iloc[1]['x'], vl.iloc[1]['ymax'], color=vl.iloc[1]['colors'],
linewidth=0, marker='o', zorder=15)
# ax.annotate(text='', xy=(213,15), xytext=(235,15), arrowprops=dict(arrowstyle='<->'), color='k')
# ax.arrow(213, 15, dx=21, dy=0, shape='full', color='k', width=0.25)
#p1 = patches.FancyArrowPatch((213, 15), (235, 15), arrowstyle='<->', mutation_scale=20)
# ax.arrow(217, 15, 16, 0, head_width=0.14, head_length=2,
# linewidth=2, color='k', length_includes_head=True)
# ax.arrow(231, 15, -16, 0, head_width=0.14, head_length=2,
# linewidth=2, color='k', length_includes_head=True)
start = vl.iloc[0]['x'] + 4
end = vl.iloc[1]['x'] - 4
mid = vl['x'].mean()
dy = vl.iloc[1]['x'] - vl.iloc[0]['x'] - 8
days = dy + 8
ax.arrow(start, 15, dy, 0, head_width=0.14, head_length=2,
linewidth=1.5, color='k', length_includes_head=True, zorder=20)
ax.arrow(end, 15, -dy, 0, head_width=0.14, head_length=2,
linewidth=1.5, color='k', length_includes_head=True, zorder=20)
t = ax.text(
mid, 15.8, "{} days".format(days), ha="center", va="center", rotation=0, size=12,
bbox=dict(boxstyle="round4,pad=0.15", fc="white", ec="k", lw=1), zorder=21)
twin = ax.twinx()
df[pwv_name].plot(ax=twin, color='tab:cyan', style='--', lw=2, zorder=0)
twin.set_ylabel('PWV annual anomalies [mm]', fontsize=fontsize)
ax.set_xlabel('month', fontsize=fontsize)
locator = ticker.MaxNLocator(7)
ax.yaxis.set_major_locator(locator)
twin.yaxis.set_major_locator(ticker.MaxNLocator(7))
# add legend:
handles, labels = [], []
for h, l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
for h, l in zip(*twin.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax.legend(handles, labels, prop={'size': fontsize-2}, loc='upper left')
# ax.right_ax.set_yticks(np.linspace(ax.right_ax.get_yticks()[0], ax.right_ax.get_yticks()[-1], 7))
twin.vlines(x=df[pwv_name].idxmax(), ymin=df[pwv_name].min(),
ymax=df[pwv_name].max(), colors=['tab:cyan'], ls=['--'], zorder=0)
twin.tick_params(labelsize=fontsize)
# plot points:
twin.plot(df[pwv_name].idxmax(), df[pwv_name].max(),
color='tab:cyan', linewidth=0, marker='o')
# fig.tight_layout()
if save:
filename = 'Med_SST_surface_temp_PWV_harmonic_annual_{}.png'.format(
station)
plt.savefig(savefig_path / filename, orientation='portrait')
return df
def plot_pw_lapse_rate_fit(path=work_yuval, model='TSEN', plot=True):
from PW_stations import produce_geo_gnss_solved_stations
import xarray as xr
from PW_stations import ML_Switcher
import pandas as pd
import matplotlib.pyplot as plt
pw = xr.load_dataset(path / 'GNSS_PW_thresh_50.nc')
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
df_gnss = produce_geo_gnss_solved_stations(plot=False)
df_gnss = df_gnss.loc[[x for x in pw.data_vars], :]
alt = df_gnss['alt'].values
# add mean to anomalies:
pw_new = pw.resample(time='MS').mean()
pw_mean = pw_new.mean('time')
# compute std:
# pw_std = pw_new.std('time')
pw_std = (pw_new.groupby('time.month') -
pw_new.groupby('time.month').mean('time')).std('time')
pw_vals = pw_mean.to_array().to_dataframe(name='pw')
pw_vals = pd.Series(pw_vals.squeeze()).values
pw_std_vals = pw_std.to_array().to_dataframe(name='pw')
pw_std_vals = pd.Series(pw_std_vals.squeeze()).values
ml = ML_Switcher()
fit_model = ml.pick_model(model)
y = pw_vals
X = alt.reshape(-1, 1)
fit_model.fit(X, y)
predict = fit_model.predict(X)
coef = fit_model.coef_[0]
inter = fit_model.intercept_
pw_lapse_rate = abs(coef)*1000
if plot:
fig, ax = plt.subplots(1, 1, figsize=(16, 4))
ax.errorbar(x=alt, y=pw_vals, yerr=pw_std_vals,
marker='.', ls='', capsize=1.5, elinewidth=1.5,
markeredgewidth=1.5, color='k')
ax.grid()
ax.plot(X, predict, c='r')
ax.set_xlabel('meters a.s.l')
ax.set_ylabel('Precipitable Water [mm]')
ax.legend(['{} ({:.2f} [mm/km], {:.2f} [mm])'.format(model,
pw_lapse_rate, inter)])
return df_gnss['alt'], pw_lapse_rate
def plot_time_series_as_barplot(ts, anoms=False, ts_ontop=None):
# plt.style.use('fast')
time_dim = list(set(ts.dims))[0]
fig, ax = plt.subplots(figsize=(20, 6), dpi=150)
import matplotlib.dates as mdates
import matplotlib.ticker
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
import pandas as pd
if not anoms:
# sns.barplot(x=ts[time_dim].values, y=ts.values, ax=ax, linewidth=5)
ax.bar(ts[time_dim].values, ts.values, linewidth=5, width=0.0,
facecolor='black', edgecolor='black')
# Series.plot.bar(ax=ax, linewidth=0, width=1)
else:
warm = 'tab:orange'
cold = 'tab:blue'
positive = ts.where(ts > 0).dropna(time_dim)
negative = ts.where(ts < 0).dropna(time_dim)
ax.bar(
positive[time_dim].values,
positive.values,
linewidth=3.0,
width=1.0,
facecolor=warm, edgecolor=warm, alpha=1.0)
ax.bar(
negative[time_dim].values,
negative.values,
width=1.0,
linewidth=3.0,
facecolor=cold, edgecolor=cold, alpha=1.0)
if ts_ontop is not None:
ax_twin = ax.twinx()
color = 'red'
ts_ontop.plot.line(color=color, linewidth=2.0, ax=ax_twin)
# we already handled the x-label with ax1
ax_twin.set_ylabel('PW [mm]', color=color)
ax_twin.tick_params(axis='y', labelcolor=color)
ax_twin.legend(['3-month running mean of PW anomalies'])
title_add = ' and the median Precipitable Water anomalies from Israeli GNSS sites'
l2 = ax_twin.get_ylim()
ax.set_ylim(l2)
else:
title_add = ''
ax.grid(None)
ax.set_xlim([pd.to_datetime('1996'), pd.to_datetime('2020')])
ax.set_title('Multivariate ENSO Index Version 2 {}'.format(title_add))
ax.set_ylabel('MEI.v2')
# ax.xaxis.set_major_locator(MultipleLocator(20))
# Change minor ticks to show every 5. (20/4 = 5)
# ax.xaxis.set_minor_locator(AutoMinorLocator(4))
years_fmt = mdates.DateFormatter('%Y')
# ax.figure.autofmt_xdate()
ax.xaxis.set_major_locator(mdates.YearLocator(2))
ax.xaxis.set_minor_locator(mdates.YearLocator(1))
ax.xaxis.set_major_formatter(years_fmt)
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
ax.figure.autofmt_xdate()
# plt.tick_params(
# axis='x', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# bottom=True, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
# labelbottom=True)
# fig.tight_layout()
plt.show()
return
def plot_tide_pw_lags(path=hydro_path, pw_anom=False, rolling='1H', save=True):
from aux_gps import path_glob
import xarray as xr
import numpy as np
file = path_glob(path, 'PW_tide_sites_*.nc')[-1]
if pw_anom:
file = path_glob(path, 'PW_tide_sites_anom_*.nc')[-1]
ds = xr.load_dataset(file)
names = [x for x in ds.data_vars]
fig, ax = plt.subplots(figsize=(8, 6))
for name in names:
da = ds.mean('station').mean('tide_start')[name]
ser = da.to_series()
if rolling is not None:
ser = ser.rolling(rolling).mean()
time = (ser.index / np.timedelta64(1, 'D')).astype(float)
# ser = ser.loc[pd.Timedelta(-2.2,unit='D'):pd.Timedelta(1, unit='D')]
ser.index = time
ser.plot(marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days around tide event')
if pw_anom:
ax.set_ylabel('PWV anomalies [mm]')
else:
ax.set_ylabel('PWV [mm]')
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x.upper(), y, z)
for x, y, z in fmt])
ax.set_xlim([-3, 1])
ax.axvline(0, color='k', linestyle='--')
ax.grid()
filename = 'pw_tide_sites.png'
if pw_anom:
filename = 'pw_tide_sites_anom.png'
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
# ax.xaxis.set_major_locator(mdates.HourLocator(interval=24)) # tick every two hours
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%H'))
# locator = mdates.AutoDateLocator(minticks=3, maxticks=7)
# formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
# title = 'Mean PW for tide stations near all GNSS stations'
# ax.set_title(title)
return
def plot_profiler(path=work_yuval, ceil_path=ceil_path, title=False,
field='maxsnr', save=True):
import xarray as xr
from ceilometers import read_coastal_BL_levi_2011
from aux_gps import groupby_half_hour_xr
from calendar import month_abbr
df = read_coastal_BL_levi_2011(path=ceil_path)
ds = df.to_xarray()
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw['csar']
pw.load()
pw = pw.sel(time=pw['time.month'] == 7).dropna('time')
pw_size = pw.dropna('time').size
pwyears = [pw.time.dt.year.min().item(), pw.time.dt.year.max().item()]
pw_std = groupby_half_hour_xr(pw, reduce='std')['csar']
pw_hour = groupby_half_hour_xr(pw, reduce='mean')['csar']
pw_hour_plus = (pw_hour + pw_std).values
pw_hour_minus = (pw_hour - pw_std).values
if field == 'maxsnr':
mlh_hour = ds['maxsnr']
mlh_std = ds['std_maxsnr']
label = 'Max SNR'
elif field == 'tv_inversion':
mlh_hour = ds['tv_inversion']
mlh_std = ds['std_tv200']
label = 'Tv inversion'
mlh_hour_minus = (mlh_hour - mlh_std).values
mlh_hour_plus = (mlh_hour + mlh_std).values
half_hours = pw_hour.half_hour.values
fig, ax = plt.subplots(figsize=(10, 8))
red = 'tab:red'
blue = 'tab:blue'
pwln = pw_hour.plot(color=blue, marker='s', ax=ax)
ax.fill_between(half_hours, pw_hour_minus,
pw_hour_plus, color=blue, alpha=0.5)
twin = ax.twinx()
mlhln = mlh_hour.plot(color=red, marker='o', ax=twin)
twin.fill_between(half_hours, mlh_hour_minus,
mlh_hour_plus, color=red, alpha=0.5)
pw_label = 'PW: {}-{}, {} ({} pts)'.format(
pwyears[0], pwyears[1], month_abbr[7], pw_size)
mlh_label = 'MLH: {}-{}, {} ({} pts)'.format(1997, 1999, month_abbr[7], 90)
# if month is not None:
# pwmln = pw_m_hour.plot(color='tab:orange', marker='^', ax=ax)
# pwm_label = 'PW: {}-{}, {} ({} pts)'.format(pw_years[0], pw_years[1], month_abbr[month], pw_month.dropna('time').size)
# ax.legend(pwln + mlhln + pwmln, [pw_label, mlh_label, pwm_label], loc=leg_loc)
# else:
ax.legend([pwln[0], mlhln[0]], [pw_label, mlh_label], loc='best')
# plt.legend([pw_label, mlh_label])
ax.tick_params(axis='y', colors=blue)
twin.tick_params(axis='y', colors=red)
ax.set_ylabel('PW [mm]', color=blue)
twin.set_ylabel('MLH [m]', color=red)
twin.set_ylim(400, 1250)
ax.set_xticks([x for x in range(24)])
ax.set_xlabel('Hour of day [UTC]')
ax.grid()
mlh_name = 'Hadera'
textstr = '{}, {}'.format(mlh_name, pw.name.upper())
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=10,
verticalalignment='top', bbox=props)
if title:
ax.set_title('The diurnal cycle of {} Mixing Layer Height ({}) and {} GNSS site PW'.format(
mlh_name, label, pw.name.upper()))
fig.tight_layout()
if save:
filename = 'PW_diurnal_with_MLH_csar_{}.png'.format(field)
plt.savefig(savefig_path / filename, orientation='landscape')
return ax
def plot_ceilometers(path=work_yuval, ceil_path=ceil_path, interpolate='6H',
fontsize=14, save=True):
import xarray as xr
from ceilometers import twin_hourly_mean_plot
from ceilometers import read_all_ceilometer_stations
import numpy as np
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw[['tela', 'jslm', 'yrcm', 'nzrt', 'klhv', 'csar']]
pw.load()
ds = read_all_ceilometer_stations(path=ceil_path)
if interpolate is not None:
attrs = [x.attrs for x in ds.data_vars.values()]
ds = ds.interpolate_na('time', max_gap=interpolate, method='cubic')
for i, da in enumerate(ds):
ds[da].attrs.update(attrs[i])
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(15, 6))
couples = [['tela', 'TLV'], ['jslm', 'JR']]
twins = []
for i, ax in enumerate(axes.flatten()):
ax, twin = twin_hourly_mean_plot(pw[couples[i][0]],
ds[couples[i][1]],
month=None,
ax=ax,
title=False,
leg_loc='best', fontsize=fontsize)
twins.append(twin)
ax.xaxis.set_ticks(np.arange(0, 23, 3))
ax.grid()
twin_ylim_min = min(min([x.get_ylim() for x in twins]))
twin_ylim_max = max(max([x.get_ylim() for x in twins]))
for twin in twins:
twin.set_ylim(twin_ylim_min, twin_ylim_max)
fig.tight_layout()
filename = 'PW_diurnal_with_MLH_tela_jslm.png'
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fig
def plot_field_with_fill_between(da, dim='hour', mean_dim=None, ax=None,
color='b', marker='s'):
if dim not in da.dims:
raise KeyError('{} not in {}'.format(dim, da.name))
if mean_dim is None:
mean_dim = [x for x in da.dims if dim not in x][0]
da_mean = da.mean(mean_dim)
da_std = da.std(mean_dim)
da_minus = da_mean - da_std
da_plus = da_mean + da_std
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
line = da_mean.plot(color=color, marker=marker, ax=ax)
ax.fill_between(da_mean[dim], da_minus, da_plus, color=color, alpha=0.5)
return line
def plot_mean_with_fill_between_std(da, grp='hour', mean_dim='time', ax=None,
color='b', marker='s', alpha=0.5):
da_mean = da.groupby('{}.{}'.format(mean_dim, grp)
).mean('{}'.format(mean_dim))
da_std = da.groupby('{}.{}'.format(mean_dim, grp)
).std('{}'.format(mean_dim))
da_minus = da_mean - da_std
da_plus = da_mean + da_std
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
line = da_mean.plot(color=color, marker=marker, ax=ax)
ax.fill_between(da_mean[grp], da_minus, da_plus, color=color, alpha=alpha)
return line
def plot_hist_with_seasons(da_ts):
import seaborn as sns
fig, ax = plt.subplots(figsize=(10, 7))
sns.kdeplot(da_ts.dropna('time'), ax=ax, color='k')
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'DJF').dropna('time'),
legend=False,
ax=ax,
shade=True)
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'MAM').dropna('time'),
legend=False,
ax=ax,
shade=True)
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'JJA').dropna('time'),
legend=False,
ax=ax,
shade=True)
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'SON').dropna('time'),
legend=False,
ax=ax,
shade=True)
plt.legend(['ALL', 'MAM', 'DJF', 'SON', 'JJA'])
return
def plot_diurnal_pw_all_seasons(path=work_yuval, season='ALL', synoptic=None,
fontsize=20, labelsize=18,
ylim=[-2.7, 3.3], save=True, dss=None):
import xarray as xr
from synoptic_procedures import slice_xr_with_synoptic_class
if dss is None:
gnss_filename = 'GNSS_PW_thresh_50_for_diurnal_analysis_removed_daily.nc'
pw = xr.load_dataset(path / gnss_filename)
else:
pw = dss
df_annual = pw.groupby('time.hour').mean().to_dataframe()
if season is None and synoptic is None:
# plot annual diurnal cycle only:
fg = plot_pw_geographical_segments(df_annual, fg=None, marker='o', color='b',
ylim=ylim)
legend = ['Annual']
elif season == 'ALL' and synoptic is None:
df_jja = pw.sel(time=pw['time.season'] == 'JJA').groupby(
'time.hour').mean().to_dataframe()
df_son = pw.sel(time=pw['time.season'] == 'SON').groupby(
'time.hour').mean().to_dataframe()
df_djf = pw.sel(time=pw['time.season'] == 'DJF').groupby(
'time.hour').mean().to_dataframe()
df_mam = pw.sel(time=pw['time.season'] == 'MAM').groupby(
'time.hour').mean().to_dataframe()
fg = plot_pw_geographical_segments(
df_jja,
fg=None,
marker='s',
color='tab:green',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=0, label='JJA')
fg = plot_pw_geographical_segments(
df_son,
fg=fg,
marker='^',
color='tab:red',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=1, label='SON')
fg = plot_pw_geographical_segments(
df_djf,
fg=fg,
marker='x',
color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, zorder=2, label='DJF')
fg = plot_pw_geographical_segments(
df_mam,
fg=fg,
marker='+',
color='tab:orange',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=4, label='MAM')
fg = plot_pw_geographical_segments(df_annual, fg=fg, marker='d',
color='tab:purple', ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=3,
label='Annual')
elif season is None and synoptic == 'ALL':
df_pt = slice_xr_with_synoptic_class(
pw, path=path, syn_class='PT').groupby('time.hour').mean().to_dataframe()
df_rst = slice_xr_with_synoptic_class(
pw, path=path, syn_class='RST').groupby('time.hour').mean().to_dataframe()
df_cl = slice_xr_with_synoptic_class(
pw, path=path, syn_class='CL').groupby('time.hour').mean().to_dataframe()
df_h = slice_xr_with_synoptic_class(
pw, path=path, syn_class='H').groupby('time.hour').mean().to_dataframe()
fg = plot_pw_geographical_segments(
df_pt,
fg=None,
marker='s',
color='tab:green',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=0, label='PT')
fg = plot_pw_geographical_segments(
df_rst,
fg=fg,
marker='^',
color='tab:red',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=1, label='RST')
fg = plot_pw_geographical_segments(
df_cl,
fg=fg,
marker='x',
color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, zorder=2, label='CL')
fg = plot_pw_geographical_segments(
df_h,
fg=fg,
marker='+',
color='tab:orange',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=4, label='H')
fg = plot_pw_geographical_segments(df_annual, fg=fg, marker='d',
color='tab:purple', ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=3,
label='Annual')
sites = group_sites_to_xarray(False, scope='diurnal')
for i, (ax, site) in enumerate(zip(fg.axes.flatten(), sites.values.flatten())):
lns = ax.get_lines()
if site in ['yrcm']:
leg_loc = 'upper right'
elif site in ['nrif', 'elat']:
leg_loc = 'upper center'
elif site in ['ramo']:
leg_loc = 'lower center'
else:
leg_loc = None
# do legend for each panel:
# ax.legend(
# lns,
# legend,
# prop={
# 'size': 12},
# framealpha=0.5,
# fancybox=True,
# ncol=2,
# loc=leg_loc, fontsize=12)
lines_labels = [ax.get_legend_handles_labels() for ax in fg.fig.axes][0]
# lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
fg.fig.legend(lines_labels[0], lines_labels[1], prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.029,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
if save:
filename = 'pw_diurnal_geo_{}.png'.format(season)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_climate_classification(path=climate_path, gis_path=gis_path,
fontsize=16):
import xarray as xr
from climate_works import read_climate_classification_legend
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
from matplotlib import colors
ras = xr.open_rasterio(path / 'Beck_KG_V1_present_0p0083.tif')
ds = ras.isel(band=0)
minx = 34.0
miny = 29.0
maxx = 36.5
maxy = 34.0
ds = ds.sortby('y')
ds = ds.sel(x=slice(minx, maxx), y=slice(miny, maxy))
ds = ds.astype(int)
ds = ds.reset_coords(drop=True)
ax_map = plot_israel_map(
gis_path=gis_path,
ax=None,
ticklabelsize=fontsize)
df = read_climate_classification_legend(path)
# get color pixels to dict:
d = df['color'].to_dict()
sort_idx = np.argsort([x for x in d.keys()])
idx = np.searchsorted([x for x in d.keys()], ds.values, sorter=sort_idx)
out = np.asarray([x for x in d.values()])[sort_idx][idx]
ds_as_color = xr.DataArray(out, dims=['y', 'x', 'c'])
ds_as_color['y'] = ds['y']
ds_as_color['x'] = ds['x']
ds_as_color['c'] = ['R', 'G', 'B']
# overlay with dem data:
# cmap = plt.get_cmap('terrain', 41)
# df_gnss = produce_geo_gnss_solved_stations(plot=False)
# c_colors = df.set_index('class_code').loc[df_gnss['code'].unique()]['color'].values
c_colors = df['color'].values
c_li = [c for c in c_colors]
c_colors = np.asarray(c_li)
c_colors = np.unique(ds_as_color.stack(coor=['x', 'y']).T.values, axis=0)
# remove black:
# c_colors = c_colors[:-1]
int_code = np.unique(ds.stack(coor=['x', 'y']).T.values, axis=0)
ticks = [df.loc[x]['class_code'] for x in int_code[1:]]
cc = [df.set_index('class_code').loc[x]['color'] for x in ticks]
cc_as_hex = [colors.rgb2hex(x) for x in cc]
tickd = dict(zip(cc_as_hex, ticks))
# ticks.append('Water')
# ticks.reverse()
bounds = [x for x in range(len(c_colors) + 1)]
chex = [colors.rgb2hex(x) for x in c_colors]
ticks = [tickd.get(x, 'Water') for x in chex]
cmap = colors.ListedColormap(chex)
norm = colors.BoundaryNorm(bounds, cmap.N)
# vmin = ds_as_color.min().item()
# vmax = ds_as_color.max().item()
im = ds_as_color.plot.imshow(
ax=ax_map,
alpha=.7,
add_colorbar=False,
cmap=cmap,
interpolation='antialiased',
origin='lower',
norm=norm)
# colours = im.cmap(im.norm(np.unique(ds_as_color)))
# chex = [colors.rgb2hex(x) for x in colours]
# cmap = colors.ListedColormap(chex)
# bounds=[x for x in range(len(colours))]
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(
im,
boundaries=bounds,
ticks=None,
ax=ax_map,
**cbar_kwargs)
cb.set_label(
label='climate classification',
size=fontsize,
weight='normal')
n = len(c_colors)
tick_locs = (np.arange(n) + 0.5) * (n) / n
cb.set_ticks(tick_locs)
# set tick labels (as before)
cb.set_ticklabels(ticks)
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
# now for the gps stations:
gps = produce_geo_gnss_solved_stations(plot=False)
removed = ['hrmn', 'gilb', 'lhav', 'nizn', 'spir']
removed = []
print('removing {} stations from map.'.format(removed))
# merged = ['klhv', 'lhav', 'mrav', 'gilb']
merged = []
gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.loc[gps_list, :].plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=35, facecolor="None", linewidth=2, zorder=3)
gps_stations = gps_list
to_plot_offset = []
for x, y, label in zip(gps.loc[gps_stations, :].lon, gps.loc[gps_stations,
:].lat, gps.loc[gps_stations, :].index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -6),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
return
def group_sites_to_xarray(upper=False, scope='diurnal'):
import xarray as xr
import numpy as np
if scope == 'diurnal':
group1 = ['KABR', 'BSHM', 'CSAR', 'TELA', 'ALON', 'SLOM', 'NIZN']
group2 = ['NZRT', 'MRAV', 'YOSH', 'JSLM', 'KLHV', 'YRCM', 'RAMO']
group3 = ['ELRO', 'KATZ', 'DRAG', 'DSEA', 'SPIR', 'NRIF', 'ELAT']
elif scope == 'annual':
group1 = ['KABR', 'BSHM', 'CSAR', 'TELA', 'ALON', 'SLOM', 'NIZN']
group2 = ['NZRT', 'MRAV', 'YOSH', 'JSLM', 'KLHV', 'YRCM', 'RAMO']
group3 = ['ELRO', 'KATZ', 'DRAG', 'DSEA', 'SPIR', 'NRIF', 'ELAT']
if not upper:
group1 = [x.lower() for x in group1]
group2 = [x.lower() for x in group2]
group3 = [x.lower() for x in group3]
gr1 = xr.DataArray(group1, dims='GNSS')
gr2 = xr.DataArray(group2, dims='GNSS')
gr3 = xr.DataArray(group3, dims='GNSS')
gr1['GNSS'] = np.arange(0, len(gr1))
gr2['GNSS'] = np.arange(0, len(gr2))
gr3['GNSS'] = np.arange(0, len(gr3))
sites = xr.concat([gr1, gr2, gr3], 'group').T
sites['group'] = ['coastal', 'highland', 'eastern']
return sites
# def plot_diurnal_pw_geographical_segments(df, fg=None, marker='o', color='b',
# ylim=[-2, 3]):
# import xarray as xr
# import numpy as np
# from matplotlib.ticker import MultipleLocator
# from PW_stations import produce_geo_gnss_solved_stations
# geo = produce_geo_gnss_solved_stations(plot=False)
# sites = group_sites_to_xarray(upper=False, scope='diurnal')
# sites_flat = [x for x in sites.values.flatten() if isinstance(x, str)]
# da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
# da['GNSS'] = [x for x in range(len(da))]
# if fg is None:
# fg = xr.plot.FacetGrid(
# da,
# col='GNSS',
# col_wrap=3,
# sharex=False,
# sharey=False, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# try:
# site = sites.values[i, j]
# ax = fg.axes[i, j]
# df.loc[:, site].plot(ax=ax, marker=marker, color=color)
# ax.set_xlabel('Hour of day [UTC]')
# ax.yaxis.tick_left()
# ax.grid()
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
# ax.xaxis.set_ticks(np.arange(0, 23, 3))
# if j == 0:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# elif j == 1:
# if i>5:
## ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# site_label = '{} ({:.0f})'.format(site.upper(), geo.loc[site].alt)
# ax.text(.12, .85, site_label,
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
# ax.yaxis.set_minor_locator(MultipleLocator(3))
# ax.yaxis.grid(
# True,
# which='minor',
# linestyle='--',
# linewidth=1,
# alpha=0.7)
## ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
# if ylim is not None:
# ax.set_ylim(*ylim)
# except KeyError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 0]):
# try:
## df[gr1].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 1]):
# try:
## df[gr2].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 2]):
# try:
## df[gr3].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
#
# fg.fig.tight_layout()
# fg.fig.subplots_adjust()
# return fg
def prepare_reanalysis_monthly_pwv_to_dataframe(path=work_yuval, re='era5',
ds=None):
import xarray as xr
import pandas as pd
if re == 'era5':
reanalysis = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
re_name = 'ERA5'
elif re == 'uerra':
reanalysis = xr.load_dataset(work_yuval / 'GNSS_uerra_monthly_PW.nc')
re_name = 'UERRA-HARMONIE'
elif re is not None and ds is not None:
reanalysis = ds
re_name = re
df_re = reanalysis.to_dataframe()
df_re['month'] = df_re.index.month
pw_mm = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_50_homogenized.nc')
df = pw_mm.to_dataframe()
df['month'] = df.index.month
# concat:
dff = pd.concat([df, df_re], keys=['GNSS', re_name])
dff['source'] = dff.index.get_level_values(0)
dff = dff.reset_index()
return dff
def plot_long_term_era5_comparison(path=work_yuval, era5_path=era5_path,
fontsize=16,
remove_stations=['nizn', 'spir'], save=True):
import xarray as xr
from aux_gps import anomalize_xr
# from aeronet_analysis import prepare_station_to_pw_comparison
# from PW_stations import ML_Switcher
# from aux_gps import get_julian_dates_from_da
# from scipy.stats.mstats import theilslopes
# TODO: add merra2, 3 panel plot and trend
# load GNSS Israel:
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_percent = anomalize_xr(pw, 'MS', verbose=False, units='%')
pw_percent = pw_percent.to_array('station').mean('station')
pw_mean = pw_anoms.to_array('station').mean('station')
pw_mean = pw_mean.sel(time=slice('1998', '2019'))
# load ERA5:
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5_anoms = anomalize_xr(era5, 'MS', verbose=False)
era5_mean = era5_anoms.to_array('station').mean('station')
df = pw_mean.to_dataframe(name='GNSS')
# load MERRA2:
# merra2 = xr.load_dataset(
# path / 'MERRA2/MERRA2_TQV_israel_area_1995-2019.nc')['TQV']
# merra2_mm = merra2.resample(time='MS').mean()
# merra2_anoms = anomalize_xr(
# merra2_mm, time_dim='time', freq='MS', verbose=False)
# merra2_mean = merra2_anoms.mean('lat').mean('lon')
# load AERONET:
# if aero_path is not None:
# aero = prepare_station_to_pw_comparison(path=aero_path, gis_path=gis_path,
# station='boker', mm_anoms=True)
# df['AERONET'] = aero.to_dataframe()
era5_to_plot = era5_mean - 5
# merra2_to_plot = merra2_mean - 10
df['ERA5'] = era5_mean.to_dataframe(name='ERA5')
# df['MERRA2'] = merra2_mean.to_dataframe('MERRA2')
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
# df['GNSS'].plot(ax=ax, color='k')
# df['ERA5'].plot(ax=ax, color='r')
# df['AERONET'].plot(ax=ax, color='b')
pwln = pw_mean.plot.line('k-', marker='o', ax=ax,
linewidth=2, markersize=3.5)
era5ln = era5_to_plot.plot.line(
'k--', marker='s', ax=ax, linewidth=2, markersize=3.5)
# merra2ln = merra2_to_plot.plot.line(
# 'g-', marker='d', ax=ax, linewidth=2, markersize=2.5)
era5corr = df.corr().loc['GNSS', 'ERA5']
# merra2corr = df.corr().loc['GNSS', 'MERRA2']
handles = pwln + era5ln # + merra2ln
# labels = ['GNSS', 'ERA5, r={:.2f}'.format(
# era5corr), 'MERRA2, r={:.2f}'.format(merra2corr)]
labels = ['GNSS station average', 'ERA5 regional mean, r={:.2f}'.format(
era5corr)]
ax.legend(handles=handles, labels=labels, loc='upper left',
prop={'size': fontsize-2})
# if aero_path is not None:
# aeroln = aero.plot.line('b-.', ax=ax, alpha=0.8)
# aerocorr = df.corr().loc['GNSS', 'AERONET']
# aero_label = 'AERONET, r={:.2f}'.format(aerocorr)
# handles += aeroln
ax.set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_xlabel('')
ax.grid()
ax = fix_time_axis_ticks(ax, limits=['1998-01', '2020-01'])
fig.tight_layout()
if save:
filename = 'pwv_long_term_anomalies_era5_comparison.png'
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_long_term_anomalies_with_trends(path=work_yuval,
model_name='TSEN',
fontsize=16,
remove_stations=['nizn', 'spir'],
save=True,
add_percent=False): # ,aero_path=aero_path):
import xarray as xr
from aux_gps import anomalize_xr
from PW_stations import mann_kendall_trend_analysis
from aux_gps import linear_fit_using_scipy_da_ts
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_percent = anomalize_xr(pw, 'MS', verbose=False, units='%')
pw_percent = pw_percent.to_array('station').mean('station')
pw_mean = pw_anoms.to_array('station').mean('station')
pw_mean = pw_mean.sel(time=slice('1998', '2019'))
if add_percent:
fig, axes = plt.subplots(2, 1, figsize=(16, 10))
else:
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
axes = [ax, ax]
pwln = pw_mean.plot.line('k-', marker='o', ax=axes[0],
linewidth=2, markersize=5.5)
handles = pwln
labels = ['GNSS station average']
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_mean, model=model_name, slope_factor=3652.5, plot=False)
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
mann_pval = mann_kendall_trend_analysis(pw_mean).loc['p']
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) mm$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trendln = trend.plot(ax=axes[0], color='b', linewidth=2, alpha=1)
handles += trendln
trend_hi.plot.line('b--', ax=axes[0], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('b--', ax=axes[0], linewidth=1.5, alpha=0.8)
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_mean.sel(time=slice('2010', '2019')), model=model_name, slope_factor=3652.5, plot=False)
mann_pval = mann_kendall_trend_analysis(pw_mean.sel(time=slice('2010','2019'))).loc['p']
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
trendln = trend.plot(ax=axes[0], color='r', linewidth=2, alpha=1)
handles += trendln
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) mm$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trend_hi.plot.line('r--', ax=axes[0], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('r--', ax=axes[0], linewidth=1.5, alpha=0.8)
# ax.grid()
# ax.set_xlabel('')
# ax.set_ylabel('PWV mean anomalies [mm]')
# ax.legend(labels=[],handles=[trendln[0]])
# fig.tight_layout()
axes[0].legend(handles=handles, labels=labels, loc='upper left',
prop={'size': fontsize-2})
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[0].set_xlabel('')
axes[0].grid(True)
axes[0] = fix_time_axis_ticks(axes[0], limits=['1998-01', '2020-01'])
if add_percent:
pwln = pw_percent.plot.line('k-', marker='o', ax=axes[1],
linewidth=2, markersize=5.5)
handles = pwln
labels = ['GNSS station average']
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_percent, model=model_name, slope_factor=3652.5, plot=False)
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
mann_pval = mann_kendall_trend_analysis(pw_percent).loc['p']
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) %$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trendln = trend.plot(ax=axes[1], color='b', linewidth=2, alpha=1)
handles += trendln
trend_hi.plot.line('b--', ax=axes[1], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('b--', ax=axes[1], linewidth=1.5, alpha=0.8)
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_percent.sel(time=slice('2010', '2019')), model=model_name, slope_factor=3652.5, plot=False)
mann_pval = mann_kendall_trend_analysis(pw_percent.sel(time=slice('2010','2019'))).loc['p']
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
trendln = trend.plot(ax=axes[1], color='r', linewidth=2, alpha=1)
handles += trendln
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) %$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trend_hi.plot.line('r--', ax=axes[1], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('r--', ax=axes[1], linewidth=1.5, alpha=0.8)
# ax.grid()
# ax.set_xlabel('')
# ax.set_ylabel('PWV mean anomalies [mm]')
# ax.legend(labels=[],handles=[trendln[0]])
# fig.tight_layout()
axes[1].legend(handles=handles, labels=labels, loc='upper left',
prop={'size': fontsize-2})
axes[1].set_ylabel('PWV anomalies [%]', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('')
axes[1].grid()
axes[1] = fix_time_axis_ticks(axes[1], limits=['1998-01', '2020-01'])
fig.tight_layout()
if save:
filename = 'pwv_station_averaged_trends.png'
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_day_night_pwv_monthly_mean_std_heatmap(
path=work_yuval, day_time=['09:00', '15:00'], night_time=['17:00', '21:00'], compare=['day', 'std']):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
pw = xr.load_dataset(work_yuval / 'GNSS_PW_thresh_50_homogenized.nc')
pw = pw[[x for x in pw if 'error' not in x]]
df = pw.to_dataframe()
sites = group_sites_to_xarray(upper=False, scope='annual')
coast = [x for x in sites.sel(group='coastal').dropna('GNSS').values]
high = [x for x in sites.sel(group='highland').dropna('GNSS').values]
east = [x for x in sites.sel(group='eastern').dropna('GNSS').values]
box_coast = dict(facecolor='cyan', pad=0.05, alpha=0.4)
box_high = dict(facecolor='green', pad=0.05, alpha=0.4)
box_east = dict(facecolor='yellow', pad=0.05, alpha=0.4)
color_dict = [{x: box_coast} for x in coast]
color_dict += [{x: box_high} for x in high]
color_dict += [{x: box_east} for x in east]
color_dict = dict((key, d[key]) for d in color_dict for key in d)
sites = sites.T.values.ravel()
sites_flat = [x for x in sites if isinstance(x, str)]
df = df[sites_flat]
df_mm = df.resample('MS').mean()
df_mm_mean = df_mm.groupby(df_mm.index.month).mean()
df_mm_std = df_mm.groupby(df_mm.index.month).std()
df_day = df.between_time(*day_time)
df_night = df.between_time(*night_time)
df_day_mm = df_day.resample('MS').mean()
df_night_mm = df_night.resample('MS').mean()
day_std = df_day_mm.groupby(df_day_mm.index.month).std()
night_std = df_night_mm.groupby(df_night_mm.index.month).std()
day_mean = df_day_mm.groupby(df_day_mm.index.month).mean()
night_mean = df_night_mm.groupby(df_night_mm.index.month).mean()
per_day_std = 100 * (day_std - df_mm_std) / df_mm_std
per_day_mean = 100 * (day_mean - df_mm_mean) / df_mm_mean
per_night_std = 100 * (night_std - df_mm_std) / df_mm_std
per_night_mean = 100 * (night_mean - df_mm_mean) / df_mm_mean
day_night = compare[0]
mean_std = compare[1]
fig, axes = plt.subplots(
1, 2, sharex=False, sharey=False, figsize=(17, 10))
cbar_ax = fig.add_axes([.91, .3, .03, .4])
if compare[1] == 'std':
all_heat = df_mm_std.T
day_heat = day_std.T
title = 'STD'
elif compare[1] == 'mean':
all_heat = df_mm_mean.T
day_heat = day_mean.T
title = 'MEAN'
vmax = max(day_heat.max().max(), all_heat.max().max())
vmin = min(day_heat.min().min(), all_heat.min().min())
sns.heatmap(all_heat, ax=axes[0], cbar=False, vmin=vmin, vmax=vmax,
annot=True, cbar_ax=None, cmap='Reds')
sns.heatmap(day_heat, ax=axes[1], cbar=True, vmin=vmin, vmax=vmax,
annot=True, cbar_ax=cbar_ax, cmap='Reds')
labels_1 = [x for x in axes[0].yaxis.get_ticklabels()]
[label.set_bbox(color_dict[label.get_text()]) for label in labels_1]
labels_2 = [x for x in axes[1].yaxis.get_ticklabels()]
[label.set_bbox(color_dict[label.get_text()]) for label in labels_2]
axes[0].set_title('All {} in mm'.format(title))
axes[1].set_title('Day only ({}-{}) {} in mm'.format(*day_time, title))
[ax.set_xlabel('month') for ax in axes]
fig.tight_layout(rect=[0, 0, .9, 1])
# fig, axes = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(17, 10))
# ax_mean = sns.heatmap(df_mm_mean.T, annot=True, ax=axes[0])
# ax_mean.set_title('All mean in mm')
# ax_std = sns.heatmap(df_mm_std.T, annot=True, ax=axes[1])
# ax_std.set_title('All std in mm')
# labels_mean = [x for x in ax_mean.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_mean]
# labels_std = [x for x in ax_std.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_std]
# [ax.set_xlabel('month') for ax in axes]
# fig.tight_layout()
# fig_day, axes_day = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(17, 10))
# ax_mean = sns.heatmap(per_day_mean.T, annot=True, cmap='bwr', center=0, ax=axes_day[0])
# ax_mean.set_title('Day mean - All mean in % from All mean')
# ax_std = sns.heatmap(per_day_std.T, annot=True, cmap='bwr', center=0, ax=axes_day[1])
# ax_std.set_title('Day std - All std in % from All std')
# labels_mean = [x for x in ax_mean.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_mean]
# labels_std = [x for x in ax_std.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_std]
# [ax.set_xlabel('month') for ax in axes_day]
# fig_day.tight_layout()
# fig_night, axes_night = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(17, 10))
# ax_mean = sns.heatmap(per_night_mean.T, annot=True, cmap='bwr', center=0, ax=axes_night[0])
# ax_mean.set_title('Night mean - All mean in % from All mean')
# ax_std = sns.heatmap(per_night_std.T, annot=True, cmap='bwr', center=0, ax=axes_night[1])
# ax_std.set_title('Night std - All std in % from All std')
# labels_mean = [x for x in ax_mean.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_mean]
# labels_std = [x for x in ax_std.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_std]
# [ax.set_xlabel('month') for ax in axes_night]
# fig_night.tight_layout()
return fig
def plot_pw_geographical_segments(df, scope='diurnal', kind=None, fg=None,
marker='o', color='b', ylim=[-2, 3],
hue=None, fontsize=14, labelsize=10,
ticklabelcolor=None,
zorder=0, label=None, save=False, bins=None):
import xarray as xr
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
from matplotlib.ticker import MultipleLocator
from PW_stations import produce_geo_gnss_solved_stations
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import FormatStrFormatter
import seaborn as sns
scope_dict = {'diurnal': {'xticks': np.arange(0, 23, 3),
'xlabel': 'Hour of day [UTC]',
'ylabel': 'PWV anomalies [mm]',
'colwrap': 3},
'annual': {'xticks': np.arange(1, 13),
'xlabel': 'month',
'ylabel': 'PWV [mm]',
'colwrap': 3}
}
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
color_dict = produce_colors_for_pwv_station(scope=scope, zebra=False, as_dict=True)
geo = produce_geo_gnss_solved_stations(plot=False)
sites = group_sites_to_xarray(upper=False, scope=scope)
# if scope == 'annual':
# sites = sites.T
sites_flat = [x for x in sites.values.flatten() if isinstance(x, str)]
da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
da['GNSS'] = [x for x in range(len(da))]
if fg is None:
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=scope_dict[scope]['colwrap'],
sharex=False,
sharey=False, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
site = sites.values[i, j]
ax = fg.axes[i, j]
if not isinstance(site, str):
ax.set_axis_off()
continue
else:
if kind is None:
df[site].plot(ax=ax, marker=marker, color=color,
zorder=zorder, label=label)
ax.xaxis.set_ticks(scope_dict[scope]['xticks'])
ax.grid(True, which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'violin':
if not 'month' in df.columns:
df['month'] = df.index.month
pal = sns.color_palette("Paired", 12)
sns.violinplot(ax=ax, data=df, x='month', y=df[site],
hue=hue,
fliersize=4, gridsize=250, inner='quartile',
scale='area')
ax.set_ylabel('')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.grid(True, axis='y', which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'violin+swarm':
if not 'month' in df.columns:
df['month'] = df.index.month
pal = sns.color_palette("Paired", 12)
pal = sns.color_palette("tab20")
sns.violinplot(ax=ax, data=df, x='month', y=df[site],
hue=None, color=color_dict.get(site), fliersize=4, gridsize=250, inner=None,
scale='width')
sns.swarmplot(ax=ax, data=df, x='month', y=df[site],
color="k", edgecolor="gray",
size=2.8)
ax.set_ylabel('')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.grid(True, axis='y', which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'mean_month':
if not 'month' in df.columns:
df['month'] = df.index.month
df_mean = df.groupby('month').mean()
df_mean[site].plot(ax=ax, color=color, marker='o', markersize=10, markerfacecolor="None")
ax.set_ylabel('')
ax.xaxis.set_ticks(scope_dict[scope]['xticks'])
ax.set_xlabel('')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.grid(True, axis='y', which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'hist':
if bins is None:
bins = 15
sns.histplot(ax=ax, data=df[site].dropna(),
line_kws={'linewidth': 3}, stat='density', kde=True, bins=bins)
ax.set_xlabel('PWV [mm]', fontsize=fontsize)
ax.grid(True)
ax.set_ylabel('')
xmean = df[site].mean()
xmedian = df[site].median()
std = df[site].std()
sk = skew(df[site].dropna().values)
kurt = kurtosis(df[site].dropna().values)
# xmode = df[y].mode().median()
data_x, data_y = ax.lines[0].get_data()
ymean = np.interp(xmean, data_x, data_y)
ymed = np.interp(xmedian, data_x, data_y)
# ymode = np.interp(xmode, data_x, data_y)
ax.vlines(x=xmean, ymin=0, ymax=ymean,
color='r', linestyle='--', linewidth=3)
ax.vlines(x=xmedian, ymin=0, ymax=ymed,
color='g', linestyle='-', linewidth=3)
# ax.vlines(x=xmode, ymin=0, ymax=ymode, color='k', linestyle='-')
ax.legend(['Mean: {:.1f}'.format(
xmean), 'Median: {:.1f}'.format(xmedian)], fontsize=fontsize)
# ax.text(0.55, 0.45, "Std-Dev: {:.1f}\nSkewness: {:.1f}\nKurtosis: {:.1f}".format(std, sk, kurt),transform=ax.transAxes, fontsize=fontsize)
ax.tick_params(axis='x', which='major', labelsize=labelsize)
if kind != 'hist':
ax.set_xlabel(scope_dict[scope]['xlabel'], fontsize=16)
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(axis='y', which='major', labelsize=labelsize)
# set minor y tick labels:
# ax.yaxis.set_minor_formatter(FormatStrFormatter("%.2f"))
# ax.tick_params(axis='y', which='minor', labelsize=labelsize-8)
ax.yaxis.tick_left()
if j == 0:
if kind != 'hist':
ax.set_ylabel(scope_dict[scope]['ylabel'], fontsize=16)
else:
ax.set_ylabel('Frequency', fontsize=16)
# elif j == 1:
# if i>5:
# ax.set_ylabel(scope_dict[scope]['ylabel'], fontsize=12)
site_label = '{} ({:.0f})'.format(
site.upper(), geo.loc[site].alt)
ax.text(.17, .87, site_label, fontsize=fontsize,
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
if ticklabelcolor is not None:
ax.tick_params(axis='y', labelcolor=ticklabelcolor)
# ax.yaxis.grid(
# True,
# which='minor',
# linestyle='--',
# linewidth=1,
# alpha=0.7)
# ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
if ylim is not None:
ax.set_ylim(*ylim)
# except KeyError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 0]):
# try:
# df[gr1].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 1]):
# try:
# df[gr2].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 2]):
# try:
# df[gr3].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
fg.fig.tight_layout()
fg.fig.subplots_adjust()
if save:
filename = 'pw_{}_means_{}.png'.format(scope, kind)
plt.savefig(savefig_path / filename, orientation='portrait')
# plt.savefig(savefig_path / filename, orientation='landscape')
return fg
def plot_PWV_comparison_GNSS_radiosonde(path=work_yuval, sound_path=sound_path,
save=True, fontsize=16):
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.patches as mpatches
import matplotlib
matplotlib.rcParams['lines.markeredgewidth'] = 1
sns.set_style('whitegrid')
sns.set_style('ticks')
pal = sns.color_palette("tab10", 2)
# load radiosonde:
radio = xr.load_dataarray(sound_path / 'bet_dagan_2s_sounding_PWV_2014-2019.nc')
radio = radio.rename({'sound_time': 'time'})
radio = radio.resample(time='MS').mean()
radio.name = 'radio'
dfr = radio.to_dataframe()
dfr['month'] = dfr.index.month
# load tela:
tela = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50.nc')['tela']
dfm = tela.to_dataframe(name='tela-pwv')
dfm = dfm.loc[dfr.index]
dfm['month'] = dfm.index.month
dff = pd.concat([dfm, dfr], keys=['GNSS-TELA', 'Radiosonde'])
dff['source'] = dff.index.get_level_values(0)
# dff['month'] = dfm.index.month
dff = dff.reset_index()
dff['pwv'] = dff['tela-pwv'].fillna(0)+dff['radio'].fillna(0)
dff = dff[dff['pwv'] != 0]
fig = plt.figure(figsize=(20, 6))
sns.set_style("whitegrid")
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
grid = plt.GridSpec(
1, 2, width_ratios=[
2, 1], wspace=0.1, hspace=0)
ax_ts = fig.add_subplot(grid[0]) # plt.subplot(221)
ax_v = fig.add_subplot(grid[1])
# fig, axes = plt.subplots(1, 2, figsize=(20, 6))
ax_v = sns.violinplot(data=dff, x='month', y='pwv',
fliersize=10, gridsize=250, ax=ax_v,
inner=None, scale='width', palette=pal,
hue='source', split=True, zorder=20)
[x.set_alpha(0.5) for x in ax_v.collections]
ax_v = sns.pointplot(x='month', y='pwv', data=dff, estimator=np.mean,
dodge=True, ax=ax_v, hue='source', color=None,
linestyles='-', markers=['s', 'o'], scale=0.7,
ci=None, alpha=0.5, zorder=0, style='source',edgecolor='k', edgewidth=0.4)
ax_v.get_legend().set_title('')
p1 = (mpatches.Patch(facecolor=pal[0], edgecolor='k', alpha=0.5))
p2 = (mpatches.Patch(facecolor=pal[1], edgecolor='k', alpha=0.5))
handles = [p1, p2]
ax_v.legend(handles=handles, labels=['GNSS-TELA', 'Radiosonde'],
loc='upper left', prop={'size': fontsize-2})
# ax_v.legend(loc='upper left', prop={'size': fontsize-2})
ax_v.tick_params(labelsize=fontsize)
ax_v.set_ylabel('')
ax_v.grid(True, axis='both')
ax_v.set_xlabel('month', fontsize=fontsize)
df = dfm['tela-pwv'].to_frame()
df.columns = ['GNSS-TELA']
df['Radiosonde'] = dfr['radio']
cmap = sns.color_palette("tab10", as_cmap=True)
df.plot(ax=ax_ts, style=['s-', 'o-'], cmap=cmap)
# df['GNSS-TELA'].plot(ax=ax_ts, style='s-', cmap=cmap)
# df['Radiosonde'].plot(ax=ax_ts, style='o-', cmap=cmap)
ax_ts.grid(True, axis='both')
ylim = ax_v.get_ylim()
ax_ts.set_ylim(*ylim)
ax_ts.set_ylabel('PWV [mm]', fontsize=fontsize)
ax_ts.set_xlabel('')
ax_ts.legend(loc='upper left', prop={'size': fontsize-2})
ax_ts.tick_params(labelsize=fontsize)
fig.tight_layout()
if save:
filename = 'pwv_radio_comparison_violin+ts.png'
plt.savefig(savefig_path / filename, orientation='landscape',bbox_inches='tight')
return fig
def prepare_diurnal_variability_table(path=work_yuval, rename_cols=True):
from PW_stations import calculate_diurnal_variability
df = calculate_diurnal_variability()
gr = group_sites_to_xarray(scope='diurnal')
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
if rename_cols:
df.columns = ['Annual [%]', 'JJA [%]', 'SON [%]', 'DJF [%]', 'MAM [%]']
cols = [x for x in df.columns]
df['Location'] = geo
cols = ['Location'] + cols
df = df[cols]
df.index = df.index.str.upper()
print(df.to_latex())
print('')
print(df.groupby('Location').mean().to_latex())
return df
def prepare_harmonics_table(path=work_yuval, season='ALL',
scope='diurnal', era5=False, add_third=False):
import xarray as xr
from aux_gps import run_MLR_harmonics
import pandas as pd
import numpy as np
from calendar import month_abbr
if scope == 'diurnal':
cunits = 'cpd'
grp = 'hour'
grp_slice = [0, 12]
tunits = 'UTC'
elif scope == 'annual':
cunits = 'cpy'
grp = 'month'
grp_slice = [7, 12]
tunits = 'month'
if era5:
ds = xr.load_dataset(work_yuval / 'GNSS_PW_ERA5_harmonics_annual.nc')
else:
ds = xr.load_dataset(work_yuval / 'GNSS_PW_harmonics_{}.nc'.format(scope))
stations = list(set([x.split('_')[0] for x in ds]))
records = []
for station in stations:
if season in ds.dims:
diu_ph = ds[station + '_mean'].sel({season: season, cunits: 1}).idxmax()
diu_amp = ds[station + '_mean'].sel({season: season, cunits: 1}).max()
semidiu_ph = ds[station +
'_mean'].sel({season: season, cunits: 2, grp: slice(*grp_slice)}).idxmax()
semidiu_amp = ds[station +
'_mean'].sel({season: season, cunits: 2, grp: slice(*grp_slice)}).max()
else:
diu_ph = ds[station + '_mean'].sel({cunits: 1}).idxmax()
diu_amp = ds[station + '_mean'].sel({cunits: 1}).max()
semidiu_ph = ds[station +
'_mean'].sel({cunits: 2, grp: slice(*grp_slice)}).idxmax()
semidiu_amp = ds[station +
'_mean'].sel({cunits: 2, grp: slice(*grp_slice)}).max()
if add_third:
third_ph = ds[station +
'_mean'].sel({cunits: 3, grp: slice(*grp_slice)}).idxmax()
third_amp = ds[station +
'_mean'].sel({cunits: 3, grp: slice(*grp_slice)}).max()
ds_for_MLR = ds[['{}'.format(station), '{}_mean'.format(station)]]
if add_third:
harm_di = run_MLR_harmonics(
ds_for_MLR, season=season, cunits=cunits, plot=False)
record = [station, diu_amp.item(), diu_ph.item(), harm_di[1],
semidiu_amp.item(), semidiu_ph.item(), harm_di[2],
third_amp.item(), third_ph.item(), harm_di[3],
harm_di[1] + harm_di[2] + harm_di[3]]
else:
harm_di = run_MLR_harmonics(
ds_for_MLR, season=season, cunits=cunits, plot=False)
record = [station, diu_amp.item(), diu_ph.item(), harm_di[1],
semidiu_amp.item(), semidiu_ph.item(), harm_di[2],
harm_di[1] + harm_di[2]]
records.append(record)
df = pd.DataFrame(records)
if add_third:
df.columns = ['Station', 'A1 [mm]', 'P1 [{}]'.format(tunits), 'V1 [%]', 'A2 [mm]',
'P2 [{}]'.format(tunits), 'V2 [%]', 'A3 [mm]', 'P3 [{}]'.format(tunits), 'V3 [%]', 'VT [%]']
else:
df.columns = ['Station', 'A1 [mm]', 'P1 [{}]'.format(tunits), 'V1 [%]', 'A2 [mm]',
'P2 [{}]'.format(tunits), 'V2 [%]', 'VT [%]']
df = df.set_index('Station')
gr = group_sites_to_xarray(scope=scope)
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
df.index = df.index.str.upper()
pd.options.display.float_format = '{:.1f}'.format
if scope == 'annual':
df['P1 [Month]'] = df['P1 [Month]'].astype(int).apply(lambda x: month_abbr[x])
df['P2 [Month]'] = df['P2 [Month]'].astype(int).apply(lambda x: month_abbr[x])
if add_third:
df['P3 [Month]'] = df['P3 [Month]'].astype(int).apply(lambda x: month_abbr[x])
if add_third:
df = df[['Location', 'A1 [mm]', 'A2 [mm]', 'A3 [mm]', 'P1 [{}]'.format(tunits),
'P2 [{}]'.format(tunits),'P3 [{}]'.format(tunits), 'V1 [%]', 'V2 [%]', 'V3 [%]', 'VT [%]']]
else:
df = df[['Location', 'A1 [mm]', 'A2 [mm]', 'P1 [{}]'.format(tunits),
'P2 [{}]'.format(tunits), 'V1 [%]', 'V2 [%]', 'VT [%]']]
print(df.to_latex())
return df
def plot_station_mean_violin_plot(path=work_yuval,
remove_stations=['nizn','spir'],
fontsize=16, save=True):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
pw = xr.load_dataset(path / 'GNSS_PW_monthly_anoms_thresh_50.nc')
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
pw_mean = pw.to_array('s').mean('s')
df = pw_mean.to_dataframe(name='pwv')
df['month'] = df.index.month
df['last_decade'] = df.index.year >= 2010
df['years'] = '1997-2009'
df['years'].loc[df['last_decade']] = '2010-2019'
fig, axes = plt.subplots(2, 1, figsize=(12, 10))
# sns.histplot(pw_mean, bins=25, ax=axes[0], kde=True, stat='count')
# axes[0].set_xlabel('PWV anomalies [mm]')
# df = pw_mean.groupby('time.month').std().to_dataframe(name='PWV-SD')
# df.plot.bar(ax=axes[1], rot=0)
# axes[1].set_ylabel('PWV anomalies SD [mm]')
axes[0]= sns.violinplot(ax=axes[0], data=df, x='month', y='pwv', color='tab:purple',
fliersize=10, gridsize=250, inner=None, scale='width',
hue=None)
[x.set_alpha(0.8) for x in axes[0].collections]
sns.swarmplot(ax=axes[0], x="month", y='pwv', data=df,
color="k", edgecolor="gray",
hue=None, dodge=False)
colors = ["tab:blue", "tab:red"] # Set your custom color palette
blue_red = sns.set_palette(sns.color_palette(colors))
axes[1] = sns.violinplot(ax=axes[1], data=df, x='month', y='pwv',
palette=blue_red, fliersize=10, gridsize=250,
inner=None, scale='width',
hue='years', split=True)
sns.swarmplot(ax=axes[1], x="month", y='pwv', data=df,
size=4.5, color='k', edgecolor="gray", palette=None,
hue='years', dodge=True)
[x.set_alpha(0.8) for x in axes[1].collections]
# remove legend, reorder and re-plot:
axes[1].get_legend().remove()
handles, labels = axes[1].get_legend_handles_labels()
axes[1].legend(handles=handles[0:2], labels=labels[0:2],
loc='upper left', prop={'size': 16})
# upper legend:
color = axes[0].collections[0].get_facecolor()[0]
handle = (mpatches.Patch(facecolor=color, edgecolor='k', alpha=0.8))
axes[0].legend(handles=[handle], labels=['1997-2019'],
loc='upper left', prop={'size': 16})
axes[0].grid()
axes[1].grid()
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[1].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('month', fontsize=fontsize)
# draw 0 line:
axes[0].axhline(0, color='k', lw=2, zorder=0)
axes[1].axhline(0, color='k', lw=2, zorder=0)
# annotate extreme events :
axes[0].annotate('2015', xy=(9, 5.58), xycoords='data',
xytext=(8, 7), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='center',
fontsize=fontsize, fontweight='bold')
axes[0].annotate('2013', xy=(9, -5.8), xycoords='data',
xytext=(8, -7), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='center',
fontsize=fontsize, fontweight='bold')
axes[0].set_ylim(-10, 10)
axes[1].set_ylim(-10, 10)
fig.tight_layout()
fig.subplots_adjust(top=0.984,
bottom=0.078,
left=0.099,
right=0.988,
hspace=0.092,
wspace=0.175)
if save:
filename = 'pwv_inter-annual_violin+swarm.png'
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_october_2015(path=work_yuval):
import xarray as xr
pw_daily = xr.load_dataset(work_yuval /
'GNSS_PW_daily_thresh_50_homogenized.nc')
pw = xr.load_dataset(work_yuval / 'GNSS_PW_thresh_50_homogenized.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw_daily = pw_daily[[x for x in pw if '_error' not in x]]
fig, ax = plt.subplots(figsize=(20, 12))
ln1 = pw['tela'].sel(time=slice('2015-07', '2015-12')
).plot(linewidth=0.5, ax=ax)
ln2 = pw['jslm'].sel(time=slice('2015-07', '2015-12')
).plot(linewidth=0.5, ax=ax)
ln3 = pw_daily['tela'].sel(time=slice(
'2015-07', '2015-12')).plot(color=ln1[0].get_color(), linewidth=2.0, ax=ax)
ln4 = pw_daily['jslm'].sel(time=slice(
'2015-07', '2015-12')).plot(color=ln2[0].get_color(), linewidth=2.0, ax=ax)
ax.grid()
ax.legend(ln1+ln2+ln3+ln4, ['TELA-5mins',
'JSLM-5mins', 'TELA-daily', 'JSLM-daily'])
fig, ax = plt.subplots(figsize=(20, 12))
ln1 = pw['tela'].sel(time='2015-10').plot(ax=ax)
ln2 = pw['jslm'].sel(time='2015-10').plot(ax=ax)
ax.grid()
ax.legend(ln1+ln2, ['TELA-5mins', 'JSLM-5mins'])
fig, ax = plt.subplots(figsize=(20, 12))
ln1 = pw['tela'].sel(time=slice('2015-10-22', '2015-10-27')).plot(ax=ax)
ln2 = pw['jslm'].sel(time=slice('2015-10-22', '2015-10-27')).plot(ax=ax)
ax.grid()
ax.legend(ln1+ln2, ['TELA-5mins', 'JSLM-5mins'])
return ax
def plot_correlation_pwv_mean_anoms_and_qflux_anoms(era5_path=era5_path,
work_path=work_yuval,
anoms=None, pwv_mm=None,
all_months=False, mf='qf',
add_hline=None, title=None,
save=True,
remove_stations=['nizn', 'spir']):
import xarray as xr
from aux_gps import anomalize_xr
import matplotlib.pyplot as plt
from aux_gps import get_season_for_pandas_dtindex
from aux_gps import calculate_pressure_integral
import seaborn as sns
# first load pw and produce mean anomalies:
pw = xr.load_dataset(work_path/'GNSS_PW_monthly_thresh_50.nc')
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
if anoms is None:
pw_anoms = anomalize_xr(pw, 'MS')
pw_anoms_mean = pw_anoms.to_array('s').mean('s')
else:
pw_anoms_mean = pw[anoms]
if pwv_mm is not None:
pw_anoms_mean = pwv_mm
# now load qflux and resmaple to mm:
if anoms is None:
ds = xr.load_dataset(
era5_path/'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc')
else:
ds = xr.load_dataset(work_path / 'GNSS_ERA5_qf_1996-2019.nc')
mf = anoms
qf_mm = ds[mf].resample(time='MS').mean()
# add pressure integral:
iqf = calculate_pressure_integral(qf_mm)/9.79
iqf = iqf.expand_dims('level')
iqf['level'] = ['integrated']
qf_mm = xr.concat([qf_mm.sortby('level'), iqf], 'level')
# now produce corr for each level:
dsl = [xr.corr(qf_mm.sel(level=x), pw_anoms_mean) for x in ds['level']][::-1]
dsl.append(xr.corr(qf_mm.sel(level='integrated'), pw_anoms_mean))
dsl = xr.concat(dsl, 'level')
# corr = xr.concat(dsl + [iqf], 'level')
corr_annual = xr.concat(dsl, 'level')
df = pw_anoms_mean.to_dataframe('pwv')
df = df.join(qf_mm.to_dataset('level').to_dataframe())
season = get_season_for_pandas_dtindex(df)
# corr = df.groupby(df.index.month).corr()['pwv'].unstack()
corr = df.groupby(season).corr()['pwv'].unstack()
corr = corr.drop('pwv', axis=1).T
corr = corr[['DJF','MAM','JJA','SON']]
corr['Annual'] = corr_annual.to_dataframe('Annual')
if all_months:
corr.index.name = 'season'
fig, ax = plt.subplots(figsize=(6, 9))
sns.heatmap(corr, annot=True, center=0, cmap='coolwarm', ax=ax, cbar_kws={
'label': 'Pearson correlation coefficient ', 'aspect': 40})
ax.set_ylabel('pressure level [hPa]')
ax.set_xlabel('')
# add line to separate integrated from level
ax.hlines([37], *ax.get_xlim(), color='k')
# add boxes around maximal values:
ax.hlines([26], [1], [5], color='w', lw=0.5)
ax.hlines([27], [1], [5], color='w', lw=0.5)
ax.vlines([1, 2, 3, 4], 26, 27, color='w', lw=0.5)
ax.hlines([28], [0], [1], color='w', lw=0.5)
ax.hlines([29], [0], [1], color='w', lw=0.5)
ax.vlines([0, 1], 28, 29, color='w', lw=0.5)
fig.tight_layout()
filename = 'pwv_qflux_levels_correlations_months.png'
else:
# fig = plt.figure(figsize=(20, 6))
# gridax = plt.GridSpec(1, 2, width_ratios=[
# 10, 2], wspace=0.05)
# ax_level = fig.add_subplot(gridax[0, 1]) # plt.subplot(221)
# ax_ts = fig.add_subplot(gridax[0, 0]) # plt.subplot(122)
fig, ax = plt.subplots(figsize=(8, 6))
corr_annual = corr_annual.to_dataframe('Annual')
corr_annual.plot(ax=ax, lw=2, label='Annual', color=seasonal_colors['Annual'])
colors = [seasonal_colors[x] for x in corr.columns]
corr.iloc[0:37].plot(ax=ax, lw=2, color=colors)
# ax_level.yaxis.set_ticks_position("right")
# ax_level.yaxis.set_label_position("right")
ax.grid()
ax.set_ylabel('pearson correlation coefficient')
ax.set_xlabel('pressure level [hPa]')
if add_hline is not None:
ax.axvline(add_hline, color='k', lw=2)
int_corr = df[['pwv','integrated']].corr()['integrated']['pwv']
# ax.axhline(int_corr, color='r', linestyle='--', lw=2)
# df[['pwv', add_hline]].loc['1997':'2019'].plot(ax=ax_ts, secondary_y=add_hline)
filename = 'pwv_qflux_levels_correlations.png'
if title is not None:
fig.suptitle(title)
if save:
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_pwv_anomalies_histogram(path=work_yuval):
import xarray as xr
import numpy as np
import seaborn as sns
from scipy.stats import norm
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_anoms_thresh_50_homogenized.nc')
arr = pw.to_array('station').to_dataframe('pw').values.ravel()
arr_no_nans = arr[~np.isnan(arr)]
mu, std = norm.fit(arr_no_nans)
ax = sns.histplot(
arr_no_nans,
stat='density',
color='tab:orange',
alpha=0.5)
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
ln = ax.plot(x, p, 'k', linewidth=2)
# x_std = x[(x>=-std) & (x<=std)]
# y_std = norm.pdf(x_std, mu, std)
# x_std2 = x[(x>=-2*std) & (x<=-std) | (x>=std) & (x<=2*std)]
# y_std2 = norm.pdf(x_std2, mu, std)
# ax.fill_between(x_std,y_std,0, alpha=0.7, color='b')
# ax.fill_between(x_std2,y_std2,0, alpha=0.7, color='r')
y_std = [norm.pdf(std, mu, std), norm.pdf(-std, mu, std)]
y_std2 = [norm.pdf(std * 2, mu, std), norm.pdf(-std * 2, mu, std)]
ln_std = ax.vlines([-std, std], ymin=[0, 0], ymax=y_std,
color='tab:blue', linewidth=2)
ln_std2 = ax.vlines([-std * 2, std * 2], ymin=[0, 0],
ymax=y_std2, color='tab:red', linewidth=2)
leg_labels = ['Normal distribution fit',
'1-Sigma: {:.2f} mm'.format(std),
'2-Sigma: {:.2f} mm'.format(2 * std)]
ax.legend([ln[0], ln_std, ln_std2], leg_labels)
ax.set_xlabel('PWV anomalies [mm]')
return ax
return ax
# def plot_quiver_panels(u, v, tcwv,
# times=['2015-10', '2013-10'], level=750):
# import matplotlib.pyplot as plt
# from matplotlib.colors import Normalize
# from mpl_toolkits.axes_grid1 import AxesGrid
# import matplotlib.cm as cm
# import pandas as pd
# from palettable.colorbrewer import sequential as seq_cmap
# from palettable.colorbrewer import diverging as div_cmap
# from aux_gps import anomalize_xr
# cmap_yl = seq_cmap.YlOrRd_9.mpl_colormap
# cmap_rb = div_cmap.PuOr_11.mpl_colormap
# cmap = cmap_rb
# times = pd.to_datetime(times)
# tcwv = slice_time_level_geo_field(tcwv, level=None, time=times,
# anoms=True,
# lats=[17, 47], lons=[17, 47])
# qu = slice_time_level_geo_field(u, level=750, time=times,
# anoms=True,
# lats=[17, 47], lons=[17, 47])
# qv = slice_time_level_geo_field(v, level=750, time=times,
# anoms=True,
# lats=[17, 47], lons=[17, 47])
# fig = plt.figure(figsize=(15, 5))
# # fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# grid = AxesGrid(fig, 111, # as in plt.subplot(111)
# nrows_ncols=(1, 2),
# axes_pad=0.15,
# share_all=True,
# cbar_location="right",
# cbar_mode="single",
# cbar_size="7%",
# cbar_pad=0.15,
# )
# # normalizer=Normalize(-6,6)
# vmax= abs(max(abs(tcwv.min().values), abs(tcwv.max().values)))
# vmin = -vmax
# print(vmin, vmax)
# # vmax = tcwv.max().item()
# cs1 = plot_contourf_field_with_map_overlay(tcwv.sel(time=times[0]), ax=grid[0],
# vmin=vmin, vmax=vmax, cmap=cmap,
# colorbar=False, title='2015-10',
# cbar_label='', extend=None,
# alpha=0.5, levels=21)
# cs2 = plot_contourf_field_with_map_overlay(tcwv.sel(time=times[1]), ax=grid[1],
# vmin=vmin, vmax=vmax, cmap=cmap,
# colorbar=False, title='2013-10',
# cbar_label='', extend=None,
# alpha=0.5, levels=21)
# cbar = grid[0].cax.colorbar(cs2)
# # cbar = grid.cbar_axes[0].colorbar(cs2)
# label = 'PWV anomalies [mm]'
# cbar.set_label_text(label)
# # for cax in grid.cbar_axes:
# # cax.toggle_label(False)
# # im=cm.ScalarMappable(norm=normalizer)
# return fig
# TODO: calculate long term monthly mean from slice and incorporate it easily:
def plot_quiver_panels(u, v, sf,
times=['2013-10', '2015-10'], level=750,
anoms=False, suptitle='', labelsize=12):
import matplotlib.pyplot as plt
import pandas as pd
# from palettable.colorbrewer import sequential as seq_cmap
from palettable.colorbrewer import sequential as colorbrewer_seq
from palettable.scientific import sequential as scientific_seq
from palettable.cmocean import sequential as cmocean_seq
from palettable.cartocolors import sequential as seq_cmap
from palettable.cartocolors import diverging as div_cmap
import cartopy.crs as ccrs
import xarray as xr
cmap_seq = seq_cmap.BluYl_7.mpl_colormap
cmap_seq = colorbrewer_seq.Blues_9.mpl_colormap
cmap_div = div_cmap.Tropic_7.mpl_colormap
cmap_quiver = seq_cmap.SunsetDark_7.mpl_colormap
# cmap_quiver = colorbrewer_seq.YlOrRd_9.mpl_colormap
# cmap_quiver = scientific_seq.LaJolla_20.mpl_colormap
# cmap_quiver = cmocean_seq.Solar_20.mpl_colormap
cmap = cmap_seq
if anoms:
cmap = cmap_div
times_dt = pd.to_datetime(times)
cb_label = 'PWV [mm]'
tcwv = slice_time_level_geo_field(sf, level=None, time=times_dt,
anoms=anoms, clim_month=10,
lats=[17, 47], lons=[17, 47])
qu = slice_time_level_geo_field(u, level=750, time=times_dt,
anoms=anoms, clim_month=10,
lats=[17, 47], lons=[17, 47])
qv = slice_time_level_geo_field(v, level=750, time=times_dt,
anoms=anoms, clim_month=10,
lats=[17, 47], lons=[17, 47])
fg = plot_scaler_field_ontop_map_cartopy(tcwv, col='time', levels=21,
cmap=cmap, alpha=0.8, cbar_label=cb_label,
labelsize=labelsize, figsize=(18, 6))
fg = plot_vector_arrows_ontop_map_cartopy(qu, qv, lon_dim='longitude',
lat_dim='latitude', fg=fg,
qp=5, col='time', qkey=True,
cmap=cmap_quiver, zorder=20)
gdf = box_lat_lon_polygon_as_gpd(lat_bounds=[29, 34], lon_bounds=[34, 36])
for i, ax in enumerate(fg.axes.flat):
# add the box over Israel:
ax.add_geometries(gdf['geometry'].values, crs=ccrs.PlateCarree(),
edgecolor='k', linestyle='--', alpha=1, linewidth=2)
# add gridlines:
gl = ax.gridlines(alpha=0.5, color='k', linestyle='--', draw_labels=True,
dms=True, x_inline=False, y_inline=False, linewidth=1)
gl.top_labels = False
# gl.left_labels = False
gl.xlabel_style = {'size': labelsize, 'color': 'k'}
gl.ylabel_style = {'size': labelsize, 'color': 'k'}
if i == 0:
gl.right_labels = False
elif i == 1:
gl.right_labels = False
gl.left_labels = False
elif i == 2:
gl.right_labels = False
gl.left_labels = False
if i <= 1:
ax.set_title(times_dt[i].strftime('%b %Y'))
else:
ax.set_title('Mean Oct')
fg.fig.suptitle(suptitle)
fg.fig.subplots_adjust(top=0.899,
bottom=0.111,
left=0.03,
right=0.94,
hspace=0.17,
wspace=0.0)
return fg
def slice_time_level_geo_field(field, level=750, lat_dim='latitude',
lon_dim='longitude', time='2012-10',
level_dim='level', time_dim='time',
lats=[None, None], lons=[None, None],
anoms=False, clim_month=None):
from aux_gps import anomalize_xr
import pandas as pd
import xarray as xr
if level is not None:
field = field.sel({level_dim: level})
if field[lat_dim].diff(lat_dim).median() < 0:
lats = lats[::-1]
field = field.sel({lat_dim: slice(*lats), lon_dim: slice(*lons)}).load()
if time is not None and anoms and clim_month is None:
field = field.load()
field = anomalize_xr(field, freq='MS', time_dim=time_dim)
if time is not None and clim_month is None:
field = field.sel({time_dim: time})
elif time is None and clim_month is not None:
field = field.load()
field = field.groupby('{}.month'.format(
time_dim)).mean().sel(month=clim_month)
elif time is not None and clim_month is not None:
clim = field.groupby('{}.month'.format(time_dim)
).mean().sel(month=clim_month)
clim = clim.rename({'month': time_dim})
clim[time_dim] = pd.to_datetime(
'2200-{}'.format(clim_month), format='%Y-%m')
field = field.sel({time_dim: time})
field = xr.concat([field, clim], time_dim)
field = field.sortby(lat_dim).squeeze()
return field
# def plot_contourf_field_with_map_overlay(field, lat_dim='latitude',
# lon_dim='longitude', ax=None,
# vmin=None, vmax=None, cmap='viridis',
# colorbar=False, title=None,
# cbar_label='', extend=None,
# alpha=0.5, levels=11):
# import salem
# import matplotlib.pyplot as plt
# field = field.transpose(lon_dim, lat_dim, ...)
# if ax is None:
# f, ax = plt.subplots(figsize=(10, 8))
# # plot the salem map background, make countries in grey
# smap = field.salem.get_map(countries=False)
# smap.set_shapefile(countries=False, oceans=True, lakes=True, color='grey')
# smap.plot(ax=ax)
# # transform the coordinates to the map reference system and contour the data
# xx, yy = smap.grid.transform(field[lat_dim].values, field[lon_dim].values,
# crs=field.salem.grid.proj)
# cs = ax.contourf(xx, yy, field, cmap=cmap, levels=levels,
# alpha=alpha, vmin=vmin, vmax=vmax, extend=extend)
# if colorbar:
# f.colorbar(cs, ax=ax, aspect=40, label=cbar_label)
# if title is not None:
# ax.set_title(title)
# return cs
# def plot_quiver_ontop_map(u, v, ax=None, lat_dim='latitude',
# lon_dim='longitude', plot_map=False,
# qp=5, qkey=True):
# import salem
# import matplotlib.pyplot as plt
# import numpy as np
# u = u.transpose(lon_dim, lat_dim, ...)
# v = v.transpose(lon_dim, lat_dim, ...)
# if ax is None:
# f, ax = plt.subplots(figsize=(10, 8))
# # plot the salem map background, make countries in grey
# smap = u.salem.get_map(countries=False)
# smap.set_shapefile(countries=False, oceans=True, lakes=True, color='grey')
# if plot_map:
# smap.plot(ax=ax)
# # transform the coordinates to the map reference system and contour the data
# xx, yy = smap.grid.transform(u[lat_dim].values, u[lon_dim].values,
# crs=u.salem.grid.proj)
# # Quiver only every 7th grid point
# u = u[4::qp, 4::qp]
# v = v[4::qp, 4::qp]
# # transform their coordinates to the map reference system and plot the arrows
# xx, yy = smap.grid.transform(u[lat_dim].values, u[lon_dim].values,
# crs=u.salem.grid.proj)
# xx, yy = np.meshgrid(xx, yy)
# qu = ax.quiver(xx, yy, u.values, v.values)
# if qkey:
# qk = ax.quiverkey(qu, 0.7, 1.05, 2, '2 msec',
# labelpos='E', coordinates='axes')
# return ax
def plot_scaler_field_ontop_map_cartopy(field, col='time', levels=21,
cmap='bwr', alpha=1,
labelsize=14, figsize=(15, 6),
cbar_label=''):
import cartopy.crs as ccrs
import cartopy.feature as cfeature
fg = field.plot.contourf(levels=levels, col=col, transform=ccrs.PlateCarree(),
cmap=cmap, alpha=alpha, figsize=figsize, add_colorbar=False,
subplot_kws={"projection": ccrs.PlateCarree()})
# add an axes, lower left corner in [0.83, 0.1] measured in figure coordinate with axes width 0.02 and height 0.8
cbar_ax = fg.fig.add_axes([0.94, 0.1, 0.01, 0.8])
fg.add_colorbar(cax=cbar_ax, label=cbar_label)
for ax in fg.axes.flat:
# land_50m = cfeature.NaturalEarthFeature('physical', 'lakes', '10m',
# edgecolor='face',
# facecolor='b', alpha=0.3)
# ax.add_feature(land_50m, zorder=30)
# ax.add_feature(cfeature.LAKES.with_scale('110m'), facecolor='b')
# ax.add_image(tiler, 6)
ax.coastlines('50m')
# ax.background_img(extent=[17, 47, 17, 47])
ax.tick_params(axis="y", direction="out", length=8)
return fg
def plot_vector_arrows_ontop_map_cartopy(u, v, lon_dim='longitude',
lat_dim='latitude', fg=None,
qp=5, col='time', qkey=True,
cmap=None, zorder=None):
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import cartopy.feature as cfeature
import numpy as np
scale = np.sqrt(u**2+v**2).max().item()
import numpy as np
if fg is None:
fg = plt.figure(figsize=(8, 10))
ax = plt.subplot(1, 1, 1, projection=ccrs.PlateCarree())
ax.add_feature(cfeature.LAND.with_scale('110m'))
# ax.add_image(tiler, 6)
ax.coastlines('50m')
gl = ax.gridlines(alpha=0.5, color='k', linestyle='--', draw_labels=True,
dms=True, x_inline=False, y_inline=False, linewidth=1)
# Quiver only every 7th grid point
u = u[4::qp, 4::qp]
v = v[4::qp, 4::qp]
x = u[lon_dim].values
y = u[lat_dim].values
# set displayed arrow length for longest arrow
displayed_arrow_length = 2
scale_factor = scale / displayed_arrow_length
ax.quiver(x, y, u, v, units='xy',
width=0.1, zorder=zorder,
scale=scale_factor, scale_units='xy',
transform=ccrs.PlateCarree())
return fg
for i, ax in enumerate(fg.axes.flat):
# set displayed arrow length for longest arrow
displayed_arrow_length = 2
scale_factor = scale / displayed_arrow_length
u1 = u.isel({col: i})
v1 = v.isel({col: i})
# colors1 = colors.isel({col: i})
# Quiver only every 7th grid point
u1 = u1[4::qp, 4::qp]
v1 = v1[4::qp, 4::qp]
colors = np.sqrt(u1**2 + v1**2) / scale
x = u1[lon_dim].values
y = u1[lat_dim].values
if cmap is not None:
q = ax.quiver(x, y, u1, v1, colors, units='xy',
width=0.1, cmap=cmap,
scale=scale_factor, scale_units='xy',
transform=ccrs.PlateCarree(),
zorder=zorder)
else:
q = ax.quiver(x, y, u1, v1, units='xy',
width=0.1, zorder=zorder,
scale=scale_factor, scale_units='xy',
transform=ccrs.PlateCarree())
if qkey:
qk = ax.quiverkey(q, 0.7, 1.05, 0.03, r'0.03 m$\cdot$sec$^{-1}$',
labelpos='E', coordinates='axes')
return fg
def box_lat_lon_polygon_as_gpd(lat_bounds=[29, 34], lon_bounds=[34, 36.5]):
import geopandas as gpd
from shapely.geometry import Point, LineString
point1 = [lon_bounds[0], lat_bounds[0]]
point2 = [lon_bounds[0], lat_bounds[1]]
point3 = [lon_bounds[1], lat_bounds[1]]
point4 = [lon_bounds[1], lat_bounds[0]]
line1 = LineString([Point(*point1), Point(*point2)])
line2 = LineString([Point(*point2), Point(*point3)])
line3 = LineString([Point(*point3), Point(*point4)])
line4 = LineString([Point(*point4), Point(*point1)])
geo_df = gpd.GeoDataFrame(geometry=[line1, line2, line3, line4])
return geo_df
def plot_relative_wind_direction_frequency(station='tela', ims_path=ims_path,
clim=True):
import xarray as xr
import pandas as pd
wd_daily = xr.load_dataset(ims_path / 'GNSS_WD_daily.nc')[station]
bins = [0, 45, 90, 135, 180, 215, 270, 315, 360]
bin_labels = ['N-NE', 'NE-E', 'E-SE',
'SE-S', 'S-SW', 'SW-W', 'W-NW', 'NW-N']
wd_daily = wd_daily.dropna('time')
cats = pd.cut(wd_daily.values, bins=bins, labels=bin_labels)
df = wd_daily.dropna('time').to_dataframe(name='WD')
df['month'] = df.index.month
df['year'] = df.index.year
df['months'] = df['year'].astype(str) + '-' + df['month'].astype(str)
cats = pd.Series(cats, index=df.index)
df['direction'] = cats
ndf = df.groupby([df['months'], df['direction']]).size().to_frame()
ndf = ndf.unstack()
ndf.columns = ndf.columns.droplevel()
ndf.index.name = 'time'
ndf.index = pd.to_datetime(ndf.index)
da = ndf.to_xarray()
return da
def plot_multiparams_daily_pwv_single_time(station='tela', ims_path=ims_path,
climate_path=climate_path,
ts1='2013-09-15', days=47,
ts2='2015-09-15',
pwv_lim=[10, 45], dtr_lim=[6, 14.5],
wd_lim=[50, 320],
add_synoptics=['CL', 'RST', 'PT'],
save=True, fontsize=16):
import matplotlib.pyplot as plt
import pandas as pd
import xarray as xr
import numpy as np
from calendar import month_abbr
from aux_gps import replace_time_series_with_its_group
from synoptic_procedures import read_synoptic_classification
sns.set_style('whitegrid')
sns.set_style('ticks')
dt1 = pd.date_range(ts1, periods=days)
# dt2 = pd.date_range(ts2, periods=days)
months = list(set(dt1.month))
year = list(set(dt1.year))[0] # just one year
dt1_str = ', '.join([month_abbr[x] for x in months]) + ' {}'.format(year)
# months = list(set(dt2.month))
# year = list(set(dt2.year))[0] # just one year
# dt2_str = ', '.join([month_abbr[x] for x in months]) + ' {}'.format(year)
pw_daily_all = xr.open_dataset(
work_yuval/'GNSS_PW_daily_thresh_50.nc')[station].load()
# pw_daily2 = pw_daily_all.sel(time=dt2)
pw_daily = pw_daily_all.sel(time=dt1)
dtr_daily_all = xr.load_dataset(ims_path /'GNSS_IMS_DTR_mm_israel_1996-2020.nc')[station]
dtr_daily = dtr_daily_all.sel(time=dt1)
# dtr_daily2 = dtr_daily_all.sel(time=dt2)
wd_daily_all = xr.load_dataset(ims_path /'GNSS_WD_daily.nc')[station]
wd_daily = wd_daily_all.sel(time=dt1)
# wd_daily2 = wd_daily_all.sel(time=dt2)
# wind directions:
# 0 north
# 45 northeast
# 90 east
# 135 southeast
# 180 south
# 225 southwest
# 270 west
# 315 northwest
# 360 north
fig, axes = plt.subplots(3, 1, figsize=(20, 10))
# twins = [ax.twiny() for ax in axes]
pwv_mm = replace_time_series_with_its_group(pw_daily, 'month')
# pwv_mm2 = replace_time_series_with_its_group(pw_daily2, 'month')
blue = 'k'
red = 'tab:red'
pwv1 = dt1_str + ' PWV'
# pwv2 = dt2_str + ' PWV'
pwv1_mm = pwv1 + ' monthly mean'
# pwv2_mm = pwv2 + ' monthly mean'
pw_daily.plot.line('-', color=blue, lw=2, ax=axes[0], label=pwv1)
# pw_daily2.plot.line('-', lw=2, color=red, ax=twins[0], label=pwv2)
pwv_mm.plot.line('--', lw=2, color=blue, ax=axes[0], label=pwv1_mm)
# pwv_mm2.plot.line('--', lw=2, color=red, ax=twins[0], label=pwv2_mm)
axes[0].set_ylabel('PWV [mm]', fontsize=fontsize)
hand, labl = axes[0].get_legend_handles_labels()
# hand2, labl2 = twins[0].get_legend_handles_labels()
# axes[0].legend(handles=hand+hand2, labels=labl+labl2)
axes[0].set_ylim(*pwv_lim)
wd_daily.plot.line('-', lw=2, color=blue, ax=axes[1])
# wd_daily2.plot.line('-', lw=2,color=red, ax=twins[1])
axes[1].set_ylabel(r'Wind Direction [$^{\circ}$]', fontsize=fontsize)
axes[1].set_ylabel('Wind Direction', fontsize=fontsize)
# axes[1].set_ylim(*wd_lim)
dtr_daily.plot.line('-', lw=2, color=blue, ax=axes[2])
# dtr_daily2.plot.line('-', lw=2, color=red, ax=twins[2])
axes[2].set_ylabel('Diurnal Temperature Range [K]', fontsize=fontsize)
axes[2].set_ylim(*dtr_lim)
[ax.xaxis.set_major_locator(mdates.DayLocator(interval=1)) for ax in axes]
# set formatter
[ax.xaxis.set_major_formatter(mdates.DateFormatter('%d')) for ax in axes]
[ax.grid(True) for ax in axes]
[ax.set_xlabel('') for ax in axes]
[ax.tick_params(labelsize=fontsize) for ax in axes]
xlim = [dt1[0]- pd.Timedelta(1, unit='d'), dt1[-1]+ pd.Timedelta(1, unit='d')]
[ax.set_xlim(*xlim) for ax in axes]
[ax.set_xticks(ax.get_xticks()[1:-1]) for ax in axes]
# for ax, twin in zip(axes, twins):
# ylims_low = min(min(ax.get_ylim()), min(twin.get_ylim()))
# ylims_high = max(max(ax.get_ylim()), max(twin.get_ylim()))
# ax.set_ylim(ylims_low, ylims_high)
wd_ticks = np.arange(45, 360, 45)
wind_labels = ['NE', 'E', 'SE', 'S', 'SW', 'W', 'NW']
lbl = []
for tick, label in zip(wd_ticks, wind_labels):
if len(label) == 1:
lbl.append(label + ' ' + str(tick))
elif len(label) == 2:
lbl.append(label + ' ' + str(tick))
# wind_label = [y + ' ' + str(x) for x,y in zip(wd_ticks, wind_labels)]
axes[1].set_yticks(wd_ticks)
axes[1].set_yticklabels(wind_labels, ha='left')
fig.canvas.draw()
yax = axes[1].get_yaxis()
# find the maximum width of the label on the major ticks
pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
yax.set_tick_params(pad=pad-10)
if add_synoptics is not None:
df = read_synoptic_classification(climate_path, report=False)
ind = pw_daily.to_dataframe().index
df = df.loc[ind]
grp_dict = df.groupby('upper_class').groups
[grp_dict.pop(x) for x in grp_dict.copy().keys()
if x not in add_synoptics]
# add_ARSTs:
grp_dict['ARST'] = pd.DatetimeIndex(['2013-10-30', '2015-10-05',
'2015-10-19', '2015-10-20',
'2015-10-25', '2015-10-29'])
grp_dict['RST'] = grp_dict['RST'].difference(grp_dict['ARST'])
color_dict = {'CL': 'tab:green', 'ARST': 'tab:orange',
'RST': 'tab:orange', 'PT': 'tab:purple'}
alpha_dict = {'CL': 0.3, 'ARST': 0.6,
'RST': 0.3, 'PT': 0.3}
ylim0 = axes[0].get_ylim()
ylim1 = axes[1].get_ylim()
ylim2 = axes[2].get_ylim()
for key_class, key_ind in grp_dict.items():
color = color_dict[key_class]
alpha = alpha_dict[key_class]
# ecolor='k'
# edge_color = edge_dict[key_class]
# abbr = add_class_abbr(key_class)
# # abbr_count = month_counts.sel(syn_cls=key_class).sum().item()
# abbr_count = df[df['class'] == key_class].count().values[0]
# abbr_label = r'${{{}}}$: {}'.format(abbr, int(abbr_count))
# # for ind, row in df.iterrows():
# da_ts[da_ts['syn_class'] == key_class].plot.line(
# 'k-', lw=0, ax=ax, marker='o', markersize=20,
# markerfacecolor=color, markeredgewidth=2,
# markeredgecolor=edge_color, label=abbr_label)
axes[0].vlines(key_ind, ylim0[0], ylim0[1],
color=color, alpha=alpha, lw=20,
label=key_class)
axes[1].vlines(key_ind, ylim1[0], ylim1[1],
color=color, alpha=alpha, lw=20,
label=key_class)
axes[2].vlines(key_ind, ylim2[0], ylim2[1],
color=color, alpha=alpha, lw=20,
label=key_class)
handles, labels = axes[2].get_legend_handles_labels()
fig.legend(handles=handles, labels=labels, prop={'size': 16}, edgecolor='k',
framealpha=0.5, fancybox=False, facecolor='white',
ncol=4, fontsize=fontsize, loc='upper left', bbox_to_anchor=(0.05, 1.005),
bbox_transform=plt.gcf().transFigure)
# [twin.tick_params(axis='x',which='both', top=False, # ticks along the top edge are off
# labeltop=False) for twin in twins]
# [twin.set_xlabel('') for twin in twins]
# months = list(set(times_dt.month))
# year = list(set(times_dt.year))[0] # just one year
dt_str = ', '.join([month_abbr[x] for x in months]) + ' {}'.format(year)
# axes[2].set_xlabel(dt_str)
fig.suptitle('{} {}'.format(station.upper(),dt_str), fontsize=fontsize)
fig.tight_layout()
if save:
filename = '{}_multiparam_{}-{}.png'.format(station, '-'.join([str(x) for x in months]), year)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fig
def plot_synoptic_daily_on_pwv_daily_with_colors(climate_path=climate_path,
station='tela',ims_path=ims_path,
times=['2013-09-15',
'2015-09-15'],
days=47, add_era5=True,
add_dtr=True,
twin_ylims=None):
from synoptic_procedures import visualize_synoptic_class_on_time_series
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import matplotlib.dates as mdates
from calendar import month_abbr
# TODO: add option of plotting 3 stations and/without ERA5
times_dt = [pd.date_range(x, periods=days) for x in times]
if isinstance(station, list):
pw_daily = [xr.open_dataset(
work_yuval/'GNSS_PW_daily_thresh_50_homogenized.nc')[x].load() for x in station]
pw_daily = xr.merge(pw_daily)
add_mm = False
label = ', '.join([x.upper() for x in station])
ncol = 6
else:
pw_daily = xr.open_dataset(
work_yuval/'GNSS_PW_daily_thresh_50.nc')[station].load()
add_mm = True
label = station.upper()
ncol = 4
era5_hourly = xr.open_dataset(work_yuval/'GNSS_era5_hourly_PW.nc')[station]
era5_daily = era5_hourly.resample(time='D').mean().load()
dtr_daily = xr.load_dataset(work_yuval/'GNSS_ERA5_DTR_daily_1996-2020.nc')[station]
dtr_daily = xr.load_dataset(ims_path /'GNSS_IMS_DTR_mm_israel_1996-2020.nc')[station]
fig, axes = plt.subplots(len(times), 1, figsize=(20, 10))
leg_locs = ['upper right', 'lower right']
for i, ax in enumerate(axes.flat):
if add_era5:
second_da_ts = era5_daily.sel(time=times_dt[i])
elif add_dtr:
second_da_ts = dtr_daily.sel(time=times_dt[i])
else:
second_da_ts = None
visualize_synoptic_class_on_time_series(pw_daily.sel(time=times_dt[i]),
path=climate_path, ax=ax,
second_da_ts=second_da_ts,
leg_ncol=ncol,
leg_loc=leg_locs[i],
add_mm=add_mm,
twin=twin_ylims)
ax.set_ylabel('PWV [mm]')
ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))
# set formatter
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d'))
# set font and rotation for date tick labels
months = list(set(times_dt[i].month))
year = list(set(times_dt[i].year))[0] # just one year
dt_str = ', '.join([month_abbr[x] for x in months]) + ' {}'.format(year)
ax.set_title(dt_str, fontweight='bold', fontsize=14)
ax.set_xlabel('')
# set ylims :
ylims_low = [ax.get_ylim()[0] for ax in axes]
ylims_high = [ax.get_ylim()[1] for ax in axes]
[ax.set_ylim(min(ylims_low), max(ylims_high)) for ax in axes]
# set ylims in right_axes:
# ylims_low = [ax.right_ax.get_ylim()[0] for ax in axes]
# ylims_high = [ax.right_ax.get_ylim()[1] for ax in axes]
# [ax.right_ax.set_ylim(min(ylims_low), max(ylims_high)) for ax in axes]
# axes[0].right_ax.set_ylim(0,100)
if add_era5:
fig.suptitle(
'Daily PWV and synoptic class for {} station using GNSS(solid - monthly means in dot-dashed) and ERA5(dashed)'.format(label))
elif add_dtr:
fig.suptitle(
'Daily PWV and synoptic class for {} station using GNSS(solid - monthly means in dot-dashed) and DTR(dashed)'.format(label))
else:
fig.suptitle(
'Daily PWV and synoptic class for {} station using GNSS(solid)'.format(label))
fig.tight_layout()
return axes
def create_enhanced_qualitative_color_map(plot=True, alevels=[1, 0.75, 0.5, 0.25]):
import matplotlib.colors as cm
import seaborn as sns
colors = sns.color_palette('colorblind')
colors_with_alpha = [cm.to_rgba(colors[x]) for x in range(len(colors))]
new = []
for color in colors_with_alpha:
r = color[0]
g = color[1]
b = color[2]
for alev in alevels:
alpha = alev
new.append(tuple([r, g, b, alpha]))
if plot:
sns.palplot(new)
return new
def plot_IMS_wind_speed_direction_violins(ims_path=ims_path,
station='tela', save=True,
fontsize=16):
from ims_procedures import gnss_ims_dict
import seaborn as sns
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
sns.set_style('ticks')
pal = sns.color_palette(n_colors=4)
green = pal[2]
red = pal[3]
ims_station = gnss_ims_dict.get(station)
WS = xr.open_dataset(ims_path / 'IMS_WS_israeli_10mins.nc')[ims_station]
WD = xr.open_dataset(ims_path / 'IMS_WD_israeli_10mins.nc')[ims_station]
ws_mm = WS.resample(time='MS').mean().sel(time=slice('2014', '2019'))
wd_mm = WD.resample(time='MS').mean().sel(time=slice('2014', '2019'))
df = ws_mm.to_dataframe(name='Wind Speed')
df['Wind Direction'] = wd_mm.to_dataframe(name='Wind Direction')
df['month'] = df.index.month
fig, axes = plt.subplots(1, 2, figsize=(20, 7))
axes[0] = sns.violinplot(data=df, x='month', y='Wind Speed',
fliersize=10, gridsize=250, ax=axes[0],
inner=None, scale='width', color=green,
hue=None, split=False, zorder=20)
axes[1] = sns.violinplot(data=df, x='month', y='Wind Direction',
fliersize=10, gridsize=250, ax=axes[1],
inner=None, scale='width', color=red,
hue=None, split=False, zorder=20)
[x.set_alpha(0.5) for x in axes[0].collections]
[x.set_alpha(0.5) for x in axes[1].collections]
axes[0] = sns.pointplot(x='month', y='Wind Speed', data=df,
estimator=np.mean,
dodge=False, ax=axes[0], hue=None, color=green,
linestyles="None", markers=['s'], scale=0.7,
ci=None, alpha=0.5, zorder=0, style=None)
axes[1] = sns.pointplot(x='month', y='Wind Direction', data=df,
estimator=np.mean,
dodge=False, ax=axes[1], hue=None, color=red,
linestyles="None", markers=['o'], scale=0.7,
ci=None, alpha=0.5, zorder=0, style=None)
[ax.grid(True) for ax in axes]
wind_labels = ['SE', 'S', 'SW', 'W', 'NW']
wd_ticks = np.arange(135, 360, 45)
axes[1].set_yticks(wd_ticks)
axes[1].set_yticklabels(wind_labels, ha='left')
fig.canvas.draw()
yax = axes[1].get_yaxis()
# find the maximum width of the label on the major ticks
pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
yax.set_tick_params(pad=pad-10)
axes[0].set_ylabel(r'Wind Speed [m$\cdot$sec$^{-1}$]')
fig.tight_layout()
return
def plot_ERA5_wind_speed_direction_profiles_at_bet_dagan(ear5_path=era5_path,
save=True, fontsize=16):
import seaborn as sns
import xarray as xr
from aux_gps import convert_wind_direction
import numpy as np
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
sns.set_style('ticks')
pal = sns.color_palette(n_colors=4)
bd_lat = 32.01
bd_lon = 34.81
v = xr.open_dataset(era5_path/'ERA5_V_mm_EM_area_1979-2020.nc')
u = xr.open_dataset(era5_path/'ERA5_U_mm_EM_area_1979-2020.nc')
u = u.sel(expver=1)
v = v.sel(expver=1)
u1 = u.sel(latitude=bd_lat, longitude=bd_lon, method='nearest')
v1 = v.sel(latitude=bd_lat, longitude=bd_lon, method='nearest')
u1.load().dropna('time')
v1.load().dropna('time')
ws1, wd1 = convert_wind_direction(u=u1['u'], v=v1['v'])
ws1 = ws1.reset_coords(drop=True)
wd1 = wd1.reset_coords(drop=True)
levels = [1000, 900, 800, 700]
df_ws = ws1.sel(level=levels).to_dataframe('ws')
df_ws['level'] = df_ws.index.get_level_values(1)
df_ws['month'] = df_ws.index.get_level_values(0).month
df_wd = wd1.sel(level=levels).to_dataframe('wd')
df_wd['level'] = df_wd.index.get_level_values(1)
df_wd['month'] = df_wd.index.get_level_values(0).month
fig, axes = plt.subplots(2, 1, figsize=(8, 15))
axes[0] = sns.lineplot(data=df_ws, x='month', y='ws',
hue='level', markers=True,
style='level', markersize=10,
ax=axes[0], palette=pal)
axes[1] = sns.lineplot(data=df_wd, x='month', y='wd',
hue='level', markers=True,
style='level', markersize=10,
ax=axes[1], palette=pal)
axes[0].legend(title='pressure level [hPa]', prop={'size': fontsize-2}, loc='upper center')
axes[1].legend(title='pressure level [hPa]', prop={'size': fontsize-2}, loc='lower center')
[ax.grid(True) for ax in axes]
wind_labels = ['SE', 'S', 'SW', 'W', 'NW']
wd_ticks = np.arange(135, 360, 45)
axes[1].set_yticks(wd_ticks)
axes[1].set_yticklabels(wind_labels, ha='left')
fig.canvas.draw()
yax = axes[1].get_yaxis()
# find the maximum width of the label on the major ticks
pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
yax.set_tick_params(pad=pad)
axes[0].set_ylabel(r'Wind Speed [m$\cdot$sec$^{-1}$]', fontsize=fontsize)
axes[1].set_ylabel('Wind Direction', fontsize=fontsize)
axes[1].set_xlabel('month', fontsize=fontsize)
mticks = np.arange(1, 13)
[ax.set_xticks(mticks) for ax in axes]
[ax.tick_params(labelsize=fontsize) for ax in axes]
fig.tight_layout()
fig.subplots_adjust(hspace=0.051)
if save:
filename = 'ERA5_wind_speed_dir_bet-dagan_profiles.png'
plt.savefig(savefig_path / filename, orientation='potrait')
return fig
def plot_PWV_anomalies_groups_maps_with_mean(work_path=work_yuval, station='drag',
fontsize=16, save=True):
import xarray as xr
import seaborn as sns
import numpy as np
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from scipy.ndimage.filters import gaussian_filter
from PW_stations import produce_geo_gnss_solved_stations
sns.set_theme(style='ticks', font_scale=1.5)
cmap = 'jet' # sns.color_palette('terrain', as_cmap=True)
df = produce_geo_gnss_solved_stations(plot=False)
file = work_path/'GNSS_PW_thresh_0_hour_dayofyear_rest.nc'
pw = xr.open_dataset(file)
if isinstance(station, str):
st_mean = pw[station].mean('rest').expand_dims('station')
st_mean['station'] = [station.upper()]
data = gaussian_filter(st_mean, 5)
st_mean = st_mean.copy(data=data)
elif isinstance(station, list):
pws = [pw[x].mean('rest') for x in pw if x in station]
pws = [x.copy(data=gaussian_filter(x, 5)) for x in pws]
st_mean = xr.merge(pws)
st_mean = st_mean[station].to_array('station')
st_mean['station'] = [x.upper() for x in st_mean['station'].values]
alts = df.loc[station,'alt'].values
# drag = pw['drag'].mean('rest')
# elat = pw['elat'].mean('rest')
# dsea = pw['dsea'].mean('rest')
# da = xr.concat([drag, dsea, elat], 'station')
# da['station'] = ['DRAG', 'DSEA', 'ELAT']
n = st_mean['station'].size
st_mean = st_mean.transpose('dayofyear', 'hour', 'station')
norm = mcolors.Normalize(vmin=st_mean.min().item(), vmax=st_mean.max().item(),
clip=True)
fig = plt.figure(constrained_layout=False, figsize=(7, 13))
ratio = 1.0 / len(station)
bots = [1.0 - ratio*(x+1) for x in range(len(station))]
tops = [1 - x - 0.05 for x in reversed(bots)]
bots[-1] = 0.05
# custom tops and bots for 3 figures:
tops = [0.95, 0.6333333333339999, 0.3166666666673999]
bots = [0.6833333333339999, 0.3666666666673999, 0.05]
for i, st in enumerate(station):
gs = fig.add_gridspec(nrows=2, ncols=1, hspace=0, height_ratios=[3,1],
bottom=bots[i], top=tops[i], right=0.7)
ax_heat = fig.add_subplot(gs[0])
ax_bottom = fig.add_subplot(gs[1])
cf = st_mean.sel(station=st.upper()).plot.contourf(levels=41,
add_colorbar=False,
cmap=cmap, ax=ax_heat,
norm=norm)
st_mean.sel(station=st.upper()).mean('dayofyear').plot(ax=ax_bottom,
color='k', linewidth=2)
bottom_limit = ax_heat.get_xlim()
ax_bottom.set_xlim(bottom_limit)
ax_bottom.set_title('')
ax_bottom.yaxis.set_major_locator(tck.MaxNLocator(3))
ax_bottom.set_xlabel('')
ax_bottom.grid(True)
ax_heat.set_xlabel('')
ax_heat.tick_params(labelbottom=False)
ax_bottom.tick_params(top='on', labelsize=fontsize)
ax_bottom.set_ylabel('PWV [mm]', fontsize=fontsize)
ax_heat.set_yticks(np.arange(50, 400, 50))
title = ax_heat.get_title()
title = title + ' ({:.0f} m a.s.l)'.format(alts[i])
ax_heat.set_title(title)
ax_bottom.set_xlabel('Hour of Day [UTC]')
cbar_ax = fig.add_axes([0.80, 0.049, 0.05, 0.900])
cb = fig.colorbar(cf, cax=cbar_ax)
cb.set_ticks(np.arange(7, 31+2, 2))
# cb.ax.set_yticklabels(['{:.0f}'.format(x) for x in np.arange(9, 31+1, 1)], fontsize=16, weight='bold')
# cb.ax.tick_params()# labelsize=fontsize-2)
cb.set_label('PWV [mm]')#, size=fontsize-2)
if save:
filename = 'PWV_climatology_{}_stacked_groups_with_mean.png'.format('_'.join(station))
plt.savefig(savefig_path / filename, orientation='potrait')
return fig
def plot_PWV_anomalies_groups_maps(work_path=work_yuval, station='drag',
fontsize=16, save=True):
import xarray as xr
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from PW_stations import produce_geo_gnss_solved_stations
sns.set_style('whitegrid')
sns.set_style('ticks')
cmap = 'jet' # sns.color_palette('terrain', as_cmap=True)
df = produce_geo_gnss_solved_stations(plot=False)
file = work_path/'GNSS_PW_thresh_0_hour_dayofyear_rest.nc'
pw = xr.open_dataset(file)
if isinstance(station, str):
st_mean = pw[station].mean('rest').expand_dims('station')
st_mean['station'] = [station.upper()]
data = gaussian_filter(st_mean, 5)
st_mean = st_mean.copy(data=data)
elif isinstance(station, list):
pws = [pw[x].mean('rest') for x in pw if x in station]
pws = [x.copy(data=gaussian_filter(x, 5)) for x in pws]
st_mean = xr.merge(pws)
st_mean = st_mean[station].to_array('station')
st_mean['station'] = [x.upper() for x in st_mean['station'].values]
alts = df.loc[station,'alt'].values
# drag = pw['drag'].mean('rest')
# elat = pw['elat'].mean('rest')
# dsea = pw['dsea'].mean('rest')
# da = xr.concat([drag, dsea, elat], 'station')
# da['station'] = ['DRAG', 'DSEA', 'ELAT']
n = st_mean['station'].size
st_mean = st_mean.transpose('dayofyear', 'hour', 'station')
fg = st_mean.plot.contourf(levels=41, row='station', add_colorbar=False,
figsize=(7, 13), cmap=cmap)
for i, ax in enumerate(fg.fig.axes):
ax.set_yticks(np.arange(50, 400, 50))
ax.tick_params(labelsize=fontsize)
ax.set_ylabel('Day of Year', fontsize=fontsize)
title = ax.get_title()
title = title + ' ({:.0f} m a.s.l)'.format(alts[i])
ax.set_title(title, fontsize=fontsize)
fg.fig.axes[-1].set_xlabel('Hour of day [UTC]', fontsize=fontsize)
cbar_ax = fg.fig.add_axes([0.85, 0.074, 0.025, 0.905])
fg.add_colorbar(cax=cbar_ax)
cb = fg.cbar
cb.ax.tick_params(labelsize=fontsize-2)
cb.set_label('PWV [mm]', size=fontsize-2)
fg.fig.subplots_adjust(top=0.967,
bottom=0.075,
left=0.13,
right=0.83,
hspace=0.135,
wspace=0.195)
if save:
filename = 'PWV_climatology_{}_stacked_groups.png'.format('_'.join(station))
plt.savefig(savefig_path / filename, orientation='potrait')
return fg
def plot_hydro_pwv_before_event_motivation(work_path=work_yuval,
hydro_path=hydro_path,
days_prior=3, fontsize=16,
save=True, smoothed=False):
import xarray as xr
from hydro_procedures import hydro_pw_dict
from hydro_procedures import produce_pwv_days_before_tide_events
from hydro_procedures import read_station_from_tide_database
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
def smooth_df(df):
import numpy as np
dfs = df.copy()
dfs.index = pd.to_timedelta(dfs.index, unit='d')
dfs = dfs.resample('15S').interpolate(method='cubic')
dfs = dfs.resample('5T').mean()
dfs = dfs.reset_index(drop=True)
dfs.index = np.linspace(df.index[0], df.index[-1], dfs.index.size)
return dfs
sns.set_style('whitegrid')
sns.set_style('ticks')
pw = xr.open_dataset(work_path / 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc')
pws = [pw[x].load() for x in hydro_pw_dict.keys()]
dfs = [read_station_from_tide_database(hydro_pw_dict.get(x), hydro_path=hydro_path) for x in hydro_pw_dict.keys()]
df_list = []
for pw_da, df_da in zip(pws, dfs):
df, _, _ = produce_pwv_days_before_tide_events(pw_da, df_da,
plot=False,
days_prior=days_prior,
drop_thresh=0.5,
max_gap='12H')
df_list.append(df)
n_events = [len(x.columns) for x in df_list]
if smoothed:
df_list = [smooth_df(x) for x in df_list]
df_mean = pd.concat([x.T.mean().to_frame(x.columns[0].split('_')[0]) for x in df_list], axis=1)
fig, ax = plt.subplots(figsize=(8, 10))
labels = ['{}: mean from {} events'.format(x.upper(), y) for x,y in zip(df_mean.columns, n_events)]
for i, station in enumerate(df_mean.columns):
sns.lineplot(data=df_mean, y=station, x=df.index, ax=ax, label=labels[i], lw=4)
ax.grid(True)
ax.axvline(0, color='k', linestyle='--')
ax.set_xlabel('Days before/after tide event', fontsize=fontsize)
ax.set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.legend(prop={'size': fontsize-2})
fig.tight_layout()
if save:
filename = 'PWV_anoms_dsea_drag_elat_{}_prior_tides.png'.format(days_prior)
plt.savefig(savefig_path / filename, orientation='potrait')
return fig
def plot_typical_tide_event_with_PWV(work_path=work_yuval,
hydro_path=hydro_path,
station='yrcm',
days_prior=3, days_after=1, fontsize=16,
date='2018-04-27',
save=True, smoothed=True):
# possible dates: 2014-11-16T13:50, 2018-04-26T18:55
# best to show 2018-04-24-27,
# TODO: x-axis time hours, ylabels in color
# TODO: change flow to bars instead of lineplot
import xarray as xr
import pandas as pd
from hydro_procedures import hydro_pw_dict
from matplotlib.ticker import FormatStrFormatter
import numpy as np
def smooth_df(df):
dfs = df.copy()
# dfs.index = pd.to_timedelta(dfs.index, unit='d')
dfs = dfs.resample('15S').interpolate(method='cubic')
dfs = dfs.resample('5T').mean()
# dfs = dfs.reset_index(drop=True)
# dfs.index = np.linspace(df.index[0], df.index[-1], dfs.index.size)
return dfs
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
colors = sns.color_palette('tab10', n_colors=2)
# sns.set_style('whitegrid')
sns.set_theme(style='ticks', font_scale=1.8)
# load hydro graphs:
hgs = xr.open_dataset(hydro_path/'hydro_graphs.nc')
# select times:
dt_start = pd.to_datetime(date) - pd.Timedelta(days_prior, unit='d')
dt_end = pd.to_datetime(date) + pd.Timedelta(days_after, unit='d')
hs_id = hydro_pw_dict.get(station)
hg_da = hgs['HS_{}_flow'.format(hs_id)].sel(time=slice(dt_start, dt_end)).dropna('time')
# hg_da = hg_da.resample(time='15T').mean().interpolate_na('time', method='spline', max_gap='12H')
hg_da = hg_da.resample(time='15T').mean()
# load pwv:
pw = xr.open_dataset(work_path / 'GNSS_PW_thresh_0_for_hydro_analysis.nc')[station]
pw = pw.sel(time=slice(dt_start, dt_end))
df = pw.to_dataframe(name='pwv')
df['flow'] = hg_da.to_dataframe()
df['flow'] = df['flow'].fillna(0)
if smoothed:
df = smooth_df(df)
fig, ax = plt.subplots(figsize=(15, 4))
flow_label = r'Flow [m$^3\cdot$sec$^{-1}$]'
# df['time'] = df.index
# sns.lineplot(data=df, y='flow', x=df.index, ax=ax, label=48125, lw=2, color=colors[0])
# twin = ax.twinx()
# sns.lineplot(data=df, y='pwv', x=df.index, ax=twin, label='DRAG', lw=2, color=colors[1])
df.index.name=''
sns.lineplot(data = df, y='pwv', x=df.index, lw=2,color=colors[1], marker=None, sort = False, ax=ax)
twin = ax.twinx()
twin.bar(x=df.index,height=df['flow'].values, width=0.05, linewidth=0, color=colors[0], alpha=0.5)
# ax = df['flow'].plot(color=colors[0], ax=ax, lw=2)
# df['pwv'].plot(color=colors[1], ax=twin, lw=2)
twin.set_ylim(0, 100)
twin.set_ylabel(flow_label, color=colors[0])
ax.set_ylabel('PWV [mm]', color=colors[1])
twin.tick_params(axis='y', labelcolor=colors[0])
# ax.tick_params(axis='x', labelsize=fontsize, bottom=True, which='both')
ax.tick_params(axis='y', labelcolor=colors[1])
ax.yaxis.set_ticks(np.arange(10, 35, 5))
# twin.yaxis.set_major_locator(ticker.FixedLocator(locs=np.arange(0,35,5)))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
# align_yaxis_np(ax, twin)
# alignYaxes([ax, twin], [0, 10])
# lim = ax.get_ylim()
# l2 = twin.get_ylim()
# fun = lambda x: l2[0]+(x-lim[0])/(lim[1]-lim[0])*(l2[1]-l2[0])
# ticks = fun(ax.get_yticks())
sns.set(rc={"xtick.bottom" : True, "ytick.left" : True})
xticks=df.resample('12H').mean().index
ax.xaxis.set_ticks(xticks)
strDates = [x.strftime('%d-%H') for x in xticks]
ax.set_xticklabels(strDates)
xticks=df.resample('4H').mean().index
ax.xaxis.set_ticks(xticks, minor=True)
# locator = mdates.AutoDateLocator(minticks = 15,
# maxticks = 20)
# # formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
# loc = mdates.AutoDateLocator()
# ax.xaxis.set_major_locator(loc)
# ax.xaxis.set_major_formatter(mdates.ConciseDateFormatter(loc))
# ax.xaxis.set_minor_locator(mdates.HourLocator(interval=3))
# ax.xaxis.set_major_locator(mdates.DayLocator())
# minorLocator = ticker.AutoMinorLocator()
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%H'))
# ax.xaxis.set_major_locator(mdates.DayLocator())
# ax.xaxis.set_minor_locator(minorLocator)
# ax.xaxis.set_minor_locator(mdates.HourLocator(interval=3))
ax.grid(True, which='major', axis='y',color='k', ls='--')
# ax.set_xticklabels([x.strftime("%d-%H") for x in df.index], rotation=45)
# ax.grid(True, which='minor', axis='x')
ax.grid(True, which='major', axis='x',color='k', ls='--')
# twin.yaxis.set_major_locator(ticker.FixedLocator(ticks))
# twin.grid(True, axis='y',color='k', ls='--')
# twin.xaxis.set_major_locator(mdates.DayLocator())
# twin.xaxis.set_minor_locator(mdates.HourLocator())
# Fmt = mdates.AutoDateFormatter(mdates.DayLocator())
# twin.xaxis.set_major_formatter(Fmt)
# ax.set_ylim(0, 20)
fig.autofmt_xdate()
fig.tight_layout()
if save:
filename = 'typical_tide_event_with_pwv'
plt.savefig(savefig_path / filename, orientation='potrait')
return df
def plot_hydro_pressure_anomalies(hydro_path=hydro_path, std=False,
fontsize=16, save=True):
import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
sns.set_style('ticks')
if std:
feats = xr.load_dataset(hydro_path/'hydro_tides_hourly_features_with_positives_std.nc')
else:
feats = xr.load_dataset(hydro_path/'hydro_tides_hourly_features_with_positives.nc')
dts =
|
pd.DatetimeIndex(feats['X_pos']['positive_sample'].values)
|
pandas.DatetimeIndex
|
import numpy as np
import pandas as pd
import math
from elopackage.elo import Elo
from elopackage.player import Player
class ResultsTable:
def __init__(self, df):
"""
df - pd DataFrame of tournamenent results
"""
# self.df = df.sort_values(by='match_date_dt', ascending=True)
self.df = df.copy(deep=True)
self.elo = Elo('test')
self.player_dict = {2000000: Player('dummy', 2000000)}
self.dummy_tsid = 2000001
self.cold_start_threshold = 0
def add_players_to_dict(self, row, df_results, kfactor=None, sd=None):
"""
Adds all players from a row in the results table to player_dict, unless they already exist in player_dict
Also, assigns temp tsid to players with missing tsid. This is a unique value starting at 2,000,001
:param row: pd.Series - row of results df
:param df_results: pd DataFrame - results df
:param kfactor: float - kfactor to assign to player object
:param sd: float - sd to assign to player object
:return: None
"""
row = row._asdict()
if row['Doubles']:
col_headings = [t + p + "_tsid" for t in ['winning_team_', 'losing_team_'] for p in ['p1', 'p2']]
else:
col_headings = [t + p + "_tsid" for t in ['winning_team_', 'losing_team_'] for p in ['p1']]
tsids = [row[c] for c in col_headings]
names = [row[c.split('_tsid')[0]] for c in col_headings]
for ch, t, n in zip(col_headings, tsids, names):
if np.isnan(t):
self.player_dict[self.dummy_tsid] = Player(n, self.dummy_tsid, kfactor=kfactor, sd=sd)
df_results.at[row['Index'], ch] = self.dummy_tsid
self.dummy_tsid += 1
else:
if t not in self.player_dict:
self.player_dict[t] = Player(n, t, kfactor=kfactor, sd=sd)
def get_unique_players_in_category(self, category):
"""
category - list (str) - tournament category e.g MS - <NAME>
"""
self.df = self.df[self.df['event_title'].isin(category)]
df_unique = pd.DataFrame()
for v in ['losing_team_p1', 'winning_team_p1']:
# Get unique players available from 2019 season
df_tmp = self.df[[f'{v}_tsid', v]].copy(deep=True)
df_tmp.drop_duplicates(subset=[f'{v}_tsid'], inplace=True)
df_tmp.columns = ['tsid', 'name']
df_unique = pd.concat([df_unique, df_tmp])
df_unique.drop_duplicates(inplace=True)
df_unique.reset_index(drop=True, inplace=True)
# Initiate all players with basic 1500 rating
df_unique['rating'] = 1500
self.df_unique = df_unique
def get_all_unique_players(self):
"""
Find all unique players in results tables
"""
df_unique = pd.DataFrame()
for v in ['losing_team_p1', 'winning_team_p1']:
# Get unique players available from 2019 season
df_tmp = self.df[[f'{v}_tsid', v]].copy(deep=True)
df_tmp.drop_duplicates(subset=[f'{v}_tsid'], inplace=True)
df_tmp.columns = ['tsid', 'name']
df_unique = pd.concat([df_unique, df_tmp])
# Add on doubles players
for v in ['losing_team_p2', 'winning_team_p2']:
# Get unique players available from 2019 season
df_tmp = self.df[self.df['Doubles'] == True][[f'{v}_tsid', v]].copy(deep=True)
df_tmp.drop_duplicates(subset=[f'{v}_tsid'], inplace=True)
df_tmp.columns = ['tsid', 'name']
df_unique =
|
pd.concat([df_unique, df_tmp])
|
pandas.concat
|
# coding: utf-8
# http://stackoverflow.com/questions/27889873/clustering-text-documents-using-scikit-learn-kmeans-in-python
# http://brandonrose.org/clustering
import os
import re
import sys
import frontmatter
import matplotlib.pyplot as plt
import pandas as pd
import pytoml as toml
import mpld3
import numpy as np
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.grid_search import GridSearchCV
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.pipeline import Pipeline
reload(sys) # to re-enable sys.setdefaultencoding()
sys.setdefaultencoding('utf-8')
stop = stopwords.words('spanish')
stopE = stopwords.words('english')
stop = stop + stopE + ['com', 'más', 'si', 'está', 'puede', 'ejemplo', 'usar',
'aplicación', 'siguiente', 'cada', 'ser', 'vez',
'hacer', 'podemos' 'cómo', 'forma', 'así', 'asi', 'dos',
'tipo', 'nombre', 'ahora', 'también', 'solo', 'ver',
'qué', 'pueden', 'hace', 'tener', 'número', 'valor',
'artículo', 'parte', '»»', 'c', 'vamos', 'uso', 'debe',
'página', 'todas', 'decir', 'están', 'puedes', 'dentro',
'ello', 'blog', 'realizar', 'lugar', 'además', 'aquí',
'etc', 'aunque', 'nuevo', 'último', 'será', 'tema',
'bien', 'sólo', 'solo', 'hecho', 'cosas', 'poder',
'simplemente', 'simple', 'artículos', 'va', 'debemos',
'debería', 'hoy', 'algún', '–', 'sido', 'sí', 'éste',
'varios', 'aún', 'x', 'tan', 'podría', 'seguir', 'día',
'tres', 'cuatro', 'cinco', 'voy', 'ir', 'tal',
'mientras', 'saber', 'existe', 'sería', 'pasar',
'pueda', '¿qué', 'dejo', 'él', '»', 'ir', 'trabajar',
'Éste', 'n', 'mas', 'serán', 'ejempl', 'algun',
'aplicacion', 'aplic', 'bas', 'cas', 'cre', 'llam',
'numer', 'pod', 'referent', 'pas', 'tambi', u'ultim',
u'unic', u'usa', u'usand', u'usuari', u'utiliz',
u'variabl', u'version', u'visit', u'vist', u'web',
u'\xbb\xbb', 'import', 'podr', 'util', 'gran', 'siti',
'sol', 'solucion', 'aquell', 'pued', 'inform', 'deb',
'archiv', 'sistem', 'mism', 'permit', 'articul', 'ea',
'f', 'fc', 'non', 'bd', 'nuev', 'pdf', 'gui', 'notici',
'debi', 'mejor', 'misc', 'use', 'websit']
stop = set(stop)
def readPosts(path, english=False):
"""Read posts in path and return a pandas Data frame"""
df = pd.DataFrame()
# titleRegEx = r'title ?[:=] ?"?([^"\n]*)'
for file in os.listdir(path):
p = os.path.join(path, file)
if not os.path.isdir(p):
with open(p, 'r') as infile:
txt = infile.read()
if txt.startswith('+++'):
txt = re.search(r'^\s*\+{3}(.*)\+{3}\s*$', txt,
flags=re.DOTALL | re.MULTILINE
| re.UNICODE).group(1)
metadata = toml.loads(txt)
else:
metadata, c = frontmatter.parse(txt)
title = metadata.get('title')
# toRemove = ('author', 'image', 'lastmod', 'date',
# 'url', 'category', 'mainclass', 'color')
# for tag in toRemove:
# if tag in metadata:
# metadata.pop(tag)
tags = ('title', 'tags', 'introduction', 'description')
text = ''
for tag in tags:
if tag in metadata:
text += ' ' + str(metadata[tag])
data = [[os.path.basename(infile.name), text, title]]
isEnglish = re.search('\.en\.md|\.en\.markdown', infile.name)
if english and isEnglish:
df = df.append(data, ignore_index=True)
elif not english and not isEnglish:
df = df.append(data, ignore_index=True)
# Save for latter use
# df.to_csv('./post_data.csv', index=False)
return df
def preprocessor(text):
# TODO: Remove punctuation
# Remove frontmatter
text = re.sub(r"u'", '', text)
text = re.sub(r'^\s*---.*---\s*$', '', text,
flags=re.DOTALL | re.MULTILINE | re.UNICODE)
text = re.sub(r'^\s*\+{3}.*\+{3}\s*$', '', text,
flags=re.DOTALL | re.MULTILINE | re.UNICODE)
text = re.sub(r'^\s*```.*?```\s*$', '', text,
flags=re.DOTALL | re.MULTILINE)
text = re.sub(r'`[^`]*`', '', text)
text = re.sub(r'<[^>]*>', '', text, flags=re.UNICODE |
re.DOTALL | re.MULTILINE)
text = text.replace('<!--more--><!--ad-->', '')
text = re.sub(r'https?:\/\/.*[\r\n]*', '',
text, flags=re.MULTILINE | re.UNICODE)
text = re.sub(r'[#|*|\[\]:.,]', '', text, flags=re.UNICODE)
text = re.sub(r'[!"#$%&\'()*+,-./:;<=>?@\[\\\]^_`{|}~]', '', text)
text = re.sub(r'\d*', '', text)
text = text.lower()
text = re.sub(r'[\W]+', ' ', text.lower(), flags=re.UNICODE)
return text
def tokenizer_porter(text):
porter = PorterStemmer()
return [porter.stem(word) for word in text.split() if word not in stop]
# Cambiamos a este stemmer que tiene soporte para español
def tokenizer_snowball(text):
stemmer = SnowballStemmer("spanish")
return [stemmer.stem(word) for word in text.split() if word not in stop]
def stop_removal(text, stops_w):
return [w for w in text.split() if w not in stops_w]
def generateTfIdfVectorizer(data, stop='english', max_df=0.08, min_df=8):
tokenizer = tokenizer_snowball if stop != 'english' else tokenizer_porter
tfidf = TfidfVectorizer(strip_accents=None,
max_df=max_df,
min_df=min_df,
lowercase=True,
stop_words=stop,
sublinear_tf=True,
tokenizer=tokenizer,
analyzer='word',
max_features=16,
preprocessor=preprocessor)
X = tfidf.fit_transform(data)
print('%d Features: %s' %
(len(tfidf.get_feature_names()), tfidf.get_feature_names()))
return X
def KmeansWrapper(true_k, data, load=False):
from sklearn.externals import joblib
modelName = 'doc_cluster.%s.plk' % true_k
if load:
km = joblib.load(modelName)
labels = km.labels_
else:
km = KMeans(n_clusters=true_k,
init='k-means++',
# max_iter=1000,
n_init=10,
n_jobs=-1,
random_state=0,
verbose=0)
km.fit_predict(data)
labels = km.labels_
joblib.dump(km, modelName)
return labels, km.cluster_centers_
def elbowMethod(X, k=21):
distortions = []
for i in range(1, k):
km2 = KMeans(n_clusters=i,
init='k-means++',
n_init=10,
random_state=0,
n_jobs=-1,
verbose=0)
km2.fit(X)
distortions.append(km2.inertia_)
print('k=%s, Distortion: %.2f' % (i, km2.inertia_))
plt.plot(range(1, k), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.show()
def plotPCA(df, true_k, clusters, X, english=False):
# Plot in 2d with PCA
dist = 1 - cosine_similarity(X)
MDS()
# convert two components as we're plotting points in a two-dimensional plane
# "precomputed" because we provide a distance matrix
# we will also specify `random_state` so the plot is reproducible.
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1)
pos = mds.fit_transform(dist) # shape (n_components, n_samples)
xs, ys = pos[:, 0], pos[:, 1]
import matplotlib.cm as cm
# set up colors per clusters using a dict
cluster_colors = cm.rainbow(np.linspace(0, 1, true_k))
# set up cluster names using a dict
# cluster_names = {i: 'i' for i in range(true_k)}
# create data frame that has the result of the MDS plus the cluster
# numbers and titles
df2 = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=df[0],
title2=df[2]))
# group by cluster
groups = df2.groupby('label')
pd.set_option('display.max_rows', len(df2))
# print(df2.sort_values(by='label')[['label', 'title', 'title2']])
filename = './labels.%s.csv' % ('en' if english else 'es')
df2.sort_values(by='label')[
['label', 'title', 'title2']].to_csv(filename)
|
pd.reset_option('display.max_rows')
|
pandas.reset_option
|
#!usr/bin/env python3
import os
from datetime import datetime
from typing import AnyStr
import pandas as pd
from jinja2 import Environment, FileSystemLoader
from termcolor import cprint
def svi_generator(excel_file: AnyStr) -> None:
"""Generates an SVI configuration template
Parameters
----------
excel_file : AnyStr
Name of an Excel file
"""
# Handle Jinja template
env = Environment(
loader=FileSystemLoader(searchpath="./"), trim_blocks=True, lstrip_blocks=True
)
template = env.get_template(
name=os.path.join("./", "svi.j2"),
globals={"now": datetime.now().replace(microsecond=0)},
)
# Read the Excel file
data = pd.read_excel(
io=os.path.join("./", excel_file), sheet_name=0, usecols="A:B,H:J"
)
vlans = (
|
pd.DataFrame(data=data)
|
pandas.DataFrame
|
"""
"""
__version__='172.16.58.3.dev1'
import os
import sys
import re
import pandas as pd
import numpy as np
import warnings
import tables
import h5py
import time
import base64
import struct
import logging
import glob
import math
import pyodbc
import argparse
import unittest
import doctest
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context:','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Dm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Dm - trying import Dm instead ... maybe pip install -e . is active ...'))
import Dm
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
class AmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Am():
"""SIR 3S AccessDB to pandas DataFrames.
Args:
* accFile (str): SIR 3S AccessDB
Attributes:
* dataFrames: enthaelt alle gelesenen Tabellen und konstruierten Views
* viewSets: fasst View- bzw. Tabellennamen zu Kategorien zusammen; dient zur Uebersicht bei Bedarf
* zu den Spaltennamen der Views:
* grundsaetzlich die Originalnamen - aber ...
* bei den _BVZ_ Views:
* :_BZ, wenn Spalten Namensgleich
* Datenebenen:
* _VMBZ,_VMVARIANTE,_VMBASIS, wenn Spalten Namensgleich
* CONT:
* immer _CONT
* VKNO:
* immer _VKNO
* VBEL:
* immer _i und _k fuer die Knotendaten
* V3-Views i.e. dataFrames['V3_KNOT']
* V3_RSLW_SWVT
* 1 Zeile pro RSLW der aktiv eine SWVT referenziert
* V3_KNOT: Knoten: "alle" Knotendaten
* V3_VBEL: Kanten: "alle" Verbindungselementdaten des hydr. Prozessmodells
* Multiindex:
* OBJTYPE
* OBJID (pk)
* V3_DPKT: ausgewaehlte Daten von Datenpunkten
* V3_RKNOT: Knoten: "alle" Knotendaten des Signalmodells
* Kn: Knotenname
*_rkRUES: RUES-Selbstbezug
* OBJTYPE: RUES fuer RUES; sonst der Typname des Elementes des Signalmodells z.B. RADD
* V3_RVBEL: Kanten: "alle" Verbindungen des Signalmodells
* Multiindex:
* OBJTYPE_i
* OBJTYPE_k
* OBJID (pk)
* viewSets['pairViews_BZ']:
* ['V_BVZ_ALLG'
*, 'V_BVZ_BEVE', 'V_BVZ_BEWI', 'V_BVZ_BZAG'
*, 'V_BVZ_DPGR', 'V_BVZ_DPRG'
*, 'V_BVZ_EBES'
*, 'V_BVZ_FKNL', 'V_BVZ_FQPS', 'V_BVZ_FWEA', 'V_BVZ_FWES', 'V_BVZ_FWVB', 'V_BVZ_FWWU'
*, 'V_BVZ_GVWK'
*, 'V_BVZ_HYDR'
*, 'V_BVZ_KLAP', 'V_BVZ_KNOT', 'V_BVZ_KOMP'
*, 'V_BVZ_LFAL'
*, 'V_BVZ_MREG'
*, 'V_BVZ_NSCH'
*, 'V_BVZ_OBEH'
*, 'V_BVZ_PARI', 'V_BVZ_PARZ', 'V_BVZ_PGRP', 'V_BVZ_PGRP_PUMP', 'V_BVZ_PHTR', 'V_BVZ_PREG', 'V_BVZ_PUMP', 'V_BVZ_PZVR'
*, 'V_BVZ_RADD', 'V_BVZ_RART', 'V_BVZ_RDIV', 'V_BVZ_REGV', 'V_BVZ_RFKT', 'V_BVZ_RHYS', 'V_BVZ_RINT', 'V_BVZ_RLSR', 'V_BVZ_RLVG', 'V_BVZ_RMES', 'V_BVZ_RMMA', 'V_BVZ_RMUL'
*, 'V_BVZ_ROHR'
* erweitert um folgende DTRO-Daten: ['NAME_DTRO','DN', 'DI', 'DA', 'S', 'KT', 'PN']
*, 'V_BVZ_RPID', 'V_BVZ_RPT1', 'V_BVZ_RSLW', 'V_BVZ_RSTE', 'V_BVZ_RSTN', 'V_BVZ_RTOT', 'V_BVZ_RUES'
*, 'V_BVZ_SIVE', 'V_BVZ_SLNK', 'V_BVZ_SNDE', 'V_BVZ_STRO'
*, 'V_BVZ_VENT'
*, 'V_BVZ_WIND']
* viewSets['pairViews_ROWS']:
* ['V_BVZ_ANTE', 'V_BVZ_ANTP', 'V_BVZ_AVOS', 'V_BVZ_DPGR', 'V_BVZ_ETAM', 'V_BVZ_ETAR', 'V_BVZ_ETAU', 'V_BVZ_KOMK', 'V_BVZ_MAPG'
*, 'V_BVZ_PHI2', 'V_BVZ_PHIV', 'V_BVZ_PUMK', 'V_BVZ_RPLAN', 'V_BVZ_SRAT', 'V_BVZ_STOF', 'V_BVZ_TFKT', 'V_BVZ_TRFT', 'V_BVZ_ZEP1', 'V_BVZ_ZEP2']
* viewSets['pairViews_ROWT']:
* ['V_BVZ_LFKT', 'V_BVZ_PHI1', 'V_BVZ_PUMD', 'V_BVZ_PVAR', 'V_BVZ_QVAR'
*, 'V_BVZ_RCPL' # da RCPL_ROWT existiert "landet" RCPL bei den ROWTs; es handelt sich aber bei RCPL_ROWT um gar keine Zeittabelle
*, 'V_BVZ_SWVT', 'V_BVZ_TEVT', 'V_BVZ_WEVT', 'V_BVZ_WTTR']
* enthalten alle Zeiten
* Spalte lfdNrZEIT beginnt mit 1 fuer die chronologisch 1. Zeit
* viewSets['pairViews_ROWD']:
* ['V_BVZ_DTRO']
* viewSets['notPairViews']:
* ['V_AB_DEF', 'V_AGSN', 'V_ARRW', 'V_ATMO'
*, 'V_BENUTZER', 'V_BREF'
*, 'V_CIRC', 'V_CONT', 'V_CRGL'
*, 'V_DATENEBENE', 'V_DPGR_DPKT', 'V_DPKT', 'V_DRNP'
*, 'V_ELEMENTQUERY'
*, 'V_FSTF', 'V_FWBZ'
*, 'V_GKMP', 'V_GMIX', 'V_GRAV', 'V_GTXT'
*, 'V_HAUS'
*, 'V_LAYR', 'V_LTGR'
*, 'V_MODELL', 'V_MWKA'
*, 'V_NRCV'
*, 'V_OVAL'
*, 'V_PARV', 'V_PGPR', 'V_PLYG', 'V_POLY', 'V_PROZESSE', 'V_PZON'
*, 'V_RCON', 'V_RECT', 'V_REGP', 'V_RMES_DPTS', 'V_ROHR_VRTX', 'V_RPFL', 'V_RRCT'
*, 'V_SIRGRAF', 'V_SOKO', 'V_SPLZ', 'V_STRASSE', 'V_SYSTEMKONFIG'
*, 'V_TIMD', 'V_TRVA'
*, 'V_UTMP'
*, 'V_VARA', 'V_VARA_CSIT', 'V_VARA_WSIT', 'V_VERB', 'V_VKNO', 'V_VRCT'
*, 'V_WBLZ']
Raises:
AmError
"""
def __init__(self,accFile):
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if os.path.exists(accFile):
if os.access(accFile,os.W_OK):
pass
else:
logger.debug("{:s}accFile: {:s}: Not writable.".format(logStr,accFile))
if os.access(accFile,os.R_OK):
pass
else:
logStrFinal="{:s}accFile: {:s}: Not readable!".format(logStr,accFile)
raise AmError(logStrFinal)
else:
logStrFinal="{:s}accFile: {:s}: Not existing!".format(logStr,accFile)
raise AmError(logStrFinal)
# die MDB existiert und ist lesbar
logger.debug("{:s}accFile (abspath): {:s}".format(logStr,os.path.abspath(accFile)))
Driver=[x for x in pyodbc.drivers() if x.startswith('Microsoft Access Driver')]
if Driver == []:
logStrFinal="{:s}{:s}: No Microsoft Access Driver!".format(logStr,accFile)
raise AmError(logStrFinal)
# ein Treiber ist installiert
conStr=(
r'DRIVER={'+Driver[0]+'};'
r'DBQ='+accFile+';'
)
logger.debug("{0:s}conStr: {1:s}".format(logStr,conStr))
# Verbindung ...
con = pyodbc.connect(conStr)
cur = con.cursor()
# all Tables in DB
tableNames=[table_info.table_name for table_info in cur.tables(tableType='TABLE')]
logger.debug("{0:s}tableNames: {1:s}".format(logStr,str(tableNames)))
allTables=set(tableNames)
# pandas DataFrames
self.dataFrames={}
# Mengen von Typen von Tabellen und Views
pairTables=set()
pairViews=set()
pairViews_BZ=set()
pairViews_ROWS=set()
pairViews_ROWT=set()
pairViews_ROWD=set()
# SIR 3S Grundtabellen und -views lesen
try:
dfViewModelle=pd.read_sql('select * from VIEW_MODELLE',con)
except pd.io.sql.DatabaseError as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise AmError(logStrFinal)
try:
dfCONT=pd.read_sql('select * from CONT',con)
except pd.io.sql.DatabaseError as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise AmError(logStrFinal)
try:
dfKNOT=pd.read_sql('select * from KNOT',con)
except pd.io.sql.DatabaseError as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise AmError(logStrFinal)
# Paare
for pairType in ['_BZ','_ROWS','_ROWT','_ROWD']:
logger.debug("{0:s}pairType: {1:s}: ####".format(logStr,pairType))
tablePairsBVBZ=[(re.search('(?P<BV>[A-Z,1,2]+)('+pairType+')$',table_info.table_name).group('BV'),table_info.table_name) for table_info in cur.tables(tableType='TABLE') if re.search('(?P<BV>[A-Z,1,2]+)('+pairType+')$',table_info.table_name) != None]
for (BV,BZ) in tablePairsBVBZ:
if BV not in tableNames:
logger.debug("{0:s}BV: {1:s}: Tabelle gibt es nicht. Falsche Paar-Ermittlung? Weiter. ".format(logStr,BV))
continue
if BZ not in tableNames:
logger.debug("{0:s}BZ: {1:s}: Tabelle gibt es nicht. Falsche Paar-Ermittlung? Weiter. ".format(logStr,BZ))
continue
if BZ == 'PGRP_PUMP_BZ': # BV: PUMP BVZ: PGRP_PUMP_BZ V: V_PUMP - Falsch!; wird unten ergaenzt
continue
# TabellenNamen in entspr. Mengen abspeichern
pairTables.add(BV)
pairTables.add(BZ)
# VName
VName='V_BVZ_'+BV
logger.debug("{0:s}BV: {1:s} BVZ: {2:s} V: {3:s} ...".format(logStr,BV,BZ,VName))
df,dfBV,dfBZ=Dm.f_HelperBVBZ(
con
,BV
,BZ
)
self.dataFrames[BV]=dfBV
self.dataFrames[BZ]=dfBZ
df=Dm.f_HelperDECONT(
df
,dfViewModelle
,dfCONT
)
if pairType=='_ROWT':
if 'ZEIT' in df.columns.to_list():
df['lfdNrZEIT']=df.sort_values(['pk','ZEIT'],ascending=True).groupby(['pk'])['ZEIT'].cumcount(ascending=True)+1
else:
logger.debug("{0:s}ROWT: {1:s} hat keine Spalte ZEIT?!".format(logStr,VName))
# View abspeichern
self.dataFrames[VName]=df
# ViewName in entspr. Menge abspeichern
pairViews.add(VName)
if pairType=='_BZ':
pairViews_BZ.add(VName)
elif pairType=='_ROWS':
pairViews_ROWS.add(VName)
elif pairType=='_ROWT':
pairViews_ROWT.add(VName)
elif pairType=='_ROWD':
pairViews_ROWD.add(VName)
# BVZ-<NAME>
for (BV,BZ) in [('PGRP_PUMP','PGRP_PUMP_BZ')]:
df,dfBV,dfBZ=Dm.f_HelperBVBZ(
con
,BV
,BZ
)
self.dataFrames[BV]=dfBV
self.dataFrames[BZ]=dfBZ
df=Dm.f_HelperDECONT(
df
,dfViewModelle
,dfCONT
)
VName='V_BVZ_'+BV
self.dataFrames[VName]=df
logger.debug("{0:s}BV: {1:s} BVZ: {2:s} V: {3:s}".format(logStr,BV,BZ,VName))
pairTables.add(BV)
pairTables.add(BZ)
pairViews.add(VName)
pairViews_BZ.add(VName)
# Nicht-Paare
notInPairTables=sorted(allTables-pairTables)
notInPairTablesW=[ # W: "Sollwert"; erwartete SIR 3S Tabellen, die nicht Paare sind
'AB_DEF', 'AGSN', 'ARRW', 'ATMO'
,'BENUTZER', 'BREF'
,'CIRC', 'CONT', 'CRGL'
,'DATENEBENE'
,'DPGR_DPKT'
,'DPKT' # 90-12 ein Paar
,'DRNP'
,'ELEMENTQUERY'
,'FSTF', 'FWBZ'
,'GEOMETRY_COLUMNS' # 90-12
,'GKMP', 'GMIX', 'GRAV', 'GTXT'
,'HAUS'
,'LAYR', 'LTGR'
,'MODELL'
,'MWKA' # nicht 90-12
,'NRCV'
,'OVAL'
,'PARV', 'PGPR', 'PLYG', 'POLY', 'PROZESSE', 'PZON'
,'RCON', 'RECT', 'REGP'
,'RMES_DPTS'#, 'RMES_DPTS_BZ'
,'ROHR_VRTX', 'RPFL', 'RRCT'
,'SIRGRAF', 'SOKO', 'SPLZ', 'STRASSE', 'SYSTEMKONFIG'
,'TIMD', 'TRVA'
,'UTMP'
,'VARA', 'VARA_CSIT', 'VARA_WSIT', 'VERB', 'VKNO', 'VRCT'
,'WBLZ']
# erwartete SIR 3S Tabellen, die nicht Paare sind
notPairTables=set()
notPairViews=set()
for tableName in notInPairTablesW:
if tableName not in tableNames:
logger.debug("{0:s}tableName: {1:s}: Tabelle gibt es nicht - falsche Annahme in diesem Modul bzgl. der existierenden SIR 3S Tabellen? Weiter. ".format(logStr,tableName))
continue
sql='select * from '+tableName
try:
df=pd.read_sql(sql,con)
self.dataFrames[tableName]=df
notPairTables.add(tableName)
except pd.io.sql.DatabaseError as e:
logger.info("{0:s}sql: {1:s}: Fehler?! Weiter. ".format(logStr,sql))
continue
df=Dm.f_HelperDECONT(
df
,dfViewModelle
,dfCONT
)
VName='V_'+tableName
logger.debug("{0:s}V: {1:s}".format(logStr,VName))
self.dataFrames[VName]=df
notPairViews.add(VName)
# unerwartete Tabellen
notPairViewsProbablyNotSir3sTables=set()
notPairTablesProbablyNotSir3sTables=set()
for tableName in set(notInPairTables)-set(notInPairTablesW):
logger.debug("{0:s}tableName: {1:s}: Tabelle keine SIR 3S Tabelle aus Sicht dieses Moduls. Trotzdem lesen. ".format(logStr,tableName))
sql='select * from '+tableName
try:
df=
|
pd.read_sql(sql,con)
|
pandas.read_sql
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas as pd
from datetime import datetime, date, time, timedelta
from .data_feed_base import DataFeedBase
from .bar_event import BarEvent
class BacktestDataFeedLocalMultipleSymbols(DataFeedBase):
"""
BacktestDataFeed retrieves historical data; which is pulled out by backtest_event_engine.
"""
def __init__(
self, hist_dir=None, start_date=None, end_date=None
):
"""
hist_dir: str
start_date, end_date: datetime.datetime
events_queue receives feed of tick/bar events
"""
self._hist_dir = hist_dir
if end_date is not None:
self._end_date = end_date
else:
self._end_date = datetime.today().date()
if start_date is not None:
self._start_date = start_date
else:
self._start_date = self._end_date-timedelta(days = 365)
self._data_stream = None
# ------------------------------------ private functions -----------------------------#
def _retrieve_historical_data(self, symbol):
"""
Retrieve historical data from web
"""
hist_file = os.path.join(self._hist_dir, "%s.csv" % symbol)
data =
|
pd.read_csv(hist_file, header=0, parse_dates=True, sep=',', index_col=0)
|
pandas.read_csv
|
"""
pyLDAvis Prepare
===============
Main transformation functions for preparing LDAdata to the visualization's data structures
"""
from __future__ import absolute_import
from past.builtins import basestring
from collections import namedtuple
import json
import logging
from joblib import Parallel, delayed, cpu_count
import numpy as np
import pandas as pd
from scipy.stats import entropy
from scipy.spatial.distance import pdist, squareform
from .utils import NumPyEncoder
try:
from sklearn.manifold import MDS, TSNE
sklearn_present = True
except ImportError:
sklearn_present = False
def __num_dist_rows__(array, ndigits=2):
return array.shape[0] - int((pd.DataFrame(array).sum(axis=1) < 0.999).sum())
class ValidationError(ValueError):
pass
def _input_check(topic_term_dists, doc_topic_dists, doc_lengths, vocab, term_frequency):
ttds = topic_term_dists.shape
dtds = doc_topic_dists.shape
errors = []
def err(msg):
errors.append(msg)
if dtds[1] != ttds[0]:
err_msg = ('Number of rows of topic_term_dists does not match number of columns of '
'doc_topic_dists; both should be equal to the number of topics in the model.')
err(err_msg)
if len(doc_lengths) != dtds[0]:
err_msg = ('Length of doc_lengths not equal to the number of rows in doc_topic_dists;'
'both should be equal to the number of documents in the data.')
err(err_msg)
W = len(vocab)
if ttds[1] != W:
err_msg = ('Number of terms in vocabulary does not match the number of columns of '
'topic_term_dists (where each row of topic_term_dists is a probability '
'distribution of terms for a given topic)')
err(err_msg)
if len(term_frequency) != W:
err_msg = ('Length of term_frequency not equal to the number of terms in the '
'number of terms in the vocabulary (len of vocab)')
err(err_msg)
if __num_dist_rows__(topic_term_dists) != ttds[0]:
err('Not all rows (distributions) in topic_term_dists sum to 1.')
if __num_dist_rows__(doc_topic_dists) != dtds[0]:
err('Not all rows (distributions) in doc_topic_dists sum to 1.')
if len(errors) > 0:
return errors
def _input_validate(*args):
res = _input_check(*args)
if res:
raise ValidationError('\n' + '\n'.join([' * ' + s for s in res]))
def _jensen_shannon(_P, _Q):
_M = 0.5 * (_P + _Q)
return 0.5 * (entropy(_P, _M) + entropy(_Q, _M))
def _pcoa(pair_dists, n_components=2):
"""Principal Coordinate Analysis,
aka Classical Multidimensional Scaling
"""
# code referenced from skbio.stats.ordination.pcoa
# https://github.com/biocore/scikit-bio/blob/0.5.0/skbio/stats/ordination/_principal_coordinate_analysis.py
# pairwise distance matrix is assumed symmetric
pair_dists = np.asarray(pair_dists, np.float64)
# perform SVD on double centred distance matrix
n = pair_dists.shape[0]
H = np.eye(n) - np.ones((n, n)) / n
B = - H.dot(pair_dists ** 2).dot(H) / 2
eigvals, eigvecs = np.linalg.eig(B)
# Take first n_components of eigenvalues and eigenvectors
# sorted in decreasing order
ix = eigvals.argsort()[::-1][:n_components]
eigvals = eigvals[ix]
eigvecs = eigvecs[:, ix]
# replace any remaining negative eigenvalues and associated eigenvectors with zeroes
# at least 1 eigenvalue must be zero
eigvals[np.isclose(eigvals, 0)] = 0
if np.any(eigvals < 0):
ix_neg = eigvals < 0
eigvals[ix_neg] = np.zeros(eigvals[ix_neg].shape)
eigvecs[:, ix_neg] = np.zeros(eigvecs[:, ix_neg].shape)
return np.sqrt(eigvals) * eigvecs
def js_PCoA(distributions):
"""Dimension reduction via Jensen-Shannon Divergence & Principal Coordinate Analysis
(aka Classical Multidimensional Scaling)
Parameters
----------
distributions : array-like, shape (`n_dists`, `k`)
Matrix of distributions probabilities.
Returns
-------
pcoa : array, shape (`n_dists`, 2)
"""
dist_matrix = squareform(pdist(distributions, metric=_jensen_shannon))
return _pcoa(dist_matrix)
def js_MMDS(distributions, **kwargs):
"""Dimension reduction via Jensen-Shannon Divergence & Metric Multidimensional Scaling
Parameters
----------
distributions : array-like, shape (`n_dists`, `k`)
Matrix of distributions probabilities.
**kwargs : Keyword argument to be passed to `sklearn.manifold.MDS()`
Returns
-------
mmds : array, shape (`n_dists`, 2)
"""
dist_matrix = squareform(pdist(distributions, metric=_jensen_shannon))
model = MDS(n_components=2, random_state=0, dissimilarity='precomputed', **kwargs)
return model.fit_transform(dist_matrix)
def js_TSNE(distributions, **kwargs):
"""Dimension reduction via Jensen-Shannon Divergence & t-distributed Stochastic Neighbor Embedding
Parameters
----------
distributions : array-like, shape (`n_dists`, `k`)
Matrix of distributions probabilities.
**kwargs : Keyword argument to be passed to `sklearn.manifold.TSNE()`
Returns
-------
tsne : array, shape (`n_dists`, 2)
"""
dist_matrix = squareform(pdist(distributions, metric=_jensen_shannon))
model = TSNE(n_components=2, random_state=0, metric='precomputed', **kwargs)
return model.fit_transform(dist_matrix)
def _df_with_names(data, index_name, columns_name):
if type(data) == pd.DataFrame:
# we want our index to be numbered
df = pd.DataFrame(data.values)
else:
df = pd.DataFrame(data)
df.index.name = index_name
df.columns.name = columns_name
return df
def _series_with_name(data, name):
if type(data) == pd.Series:
data.name = name
# ensures a numeric index
return data.reset_index()[name]
else:
return pd.Series(data, name=name)
def _topic_coordinates(mds, topic_term_dists, topic_proportion, start_index=1):
K = topic_term_dists.shape[0]
mds_res = mds(topic_term_dists)
assert mds_res.shape == (K, 2)
mds_df = pd.DataFrame({'x': mds_res[:, 0], 'y': mds_res[:, 1],
'topics': range(start_index, K + start_index),
'cluster': 1, 'Freq': topic_proportion * 100})
# note: cluster (should?) be deprecated soon. See: https://github.com/cpsievert/LDAvis/issues/26
return mds_df
def _chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def _job_chunks(l, n_jobs):
n_chunks = n_jobs
if n_jobs < 0:
# so, have n chunks if we are using all n cores/cpus
n_chunks = cpu_count() + 1 - n_jobs
return _chunks(l, n_chunks)
def _find_relevance(log_ttd, log_lift, R, lambda_):
relevance = lambda_ * log_ttd + (1 - lambda_) * log_lift
return relevance.T.apply(lambda topic: topic.nlargest(R).index)
def _find_relevance_chunks(log_ttd, log_lift, R, lambda_seq):
return pd.concat([_find_relevance(log_ttd, log_lift, R, l) for l in lambda_seq])
def _topic_info(topic_term_dists, topic_proportion, term_frequency, term_topic_freq,
vocab, lambda_step, R, n_jobs, start_index=1):
# marginal distribution over terms (width of blue bars)
term_proportion = term_frequency / term_frequency.sum()
# compute the distinctiveness and saliency of the terms:
# this determines the R terms that are displayed when no topic is selected
tt_sum = topic_term_dists.sum()
topic_given_term = pd.eval("topic_term_dists / tt_sum")
log_1 = np.log(pd.eval("(topic_given_term.T / topic_proportion)"))
kernel = pd.eval("topic_given_term * log_1.T")
distinctiveness = kernel.sum()
saliency = term_proportion * distinctiveness
# Order the terms for the "default" view by decreasing saliency:
default_term_info = pd.DataFrame({
'saliency': saliency,
'Term': vocab,
'Freq': term_frequency,
'Total': term_frequency,
'Category': 'Default'})
default_term_info = default_term_info.sort_values(
by='saliency', ascending=False).head(R).drop('saliency', 1)
# Rounding Freq and Total to integer values to match LDAvis code:
default_term_info['Freq'] = np.floor(default_term_info['Freq'])
default_term_info['Total'] = np.floor(default_term_info['Total'])
ranks = np.arange(R, 0, -1)
default_term_info['logprob'] = default_term_info['loglift'] = ranks
default_term_info = default_term_info.reindex(columns=[
"Term", "Freq", "Total", "Category", "logprob", "loglift"
])
# compute relevance and top terms for each topic
log_lift = np.log(pd.eval("topic_term_dists / term_proportion")).astype("float64")
log_ttd = np.log(topic_term_dists).astype("float64")
lambda_seq = np.arange(0, 1 + lambda_step, lambda_step)
def topic_top_term_df(tup):
new_topic_id, (original_topic_id, topic_terms) = tup
term_ix = topic_terms.unique()
df = pd.DataFrame({'Term': vocab[term_ix],
'Freq': term_topic_freq.loc[original_topic_id, term_ix],
'Total': term_frequency[term_ix],
'Category': 'Topic%d' % new_topic_id,
'logprob': log_ttd.loc[original_topic_id, term_ix].round(4),
'loglift': log_lift.loc[original_topic_id, term_ix].round(4),
})
return df.reindex(columns=[
"Term", "Freq", "Total", "Category", "logprob", "loglift"
])
top_terms = pd.concat(Parallel(n_jobs=n_jobs)
(delayed(_find_relevance_chunks)(log_ttd, log_lift, R, ls)
for ls in _job_chunks(lambda_seq, n_jobs)))
topic_dfs = map(topic_top_term_df, enumerate(top_terms.T.iterrows(), start_index))
return pd.concat([default_term_info] + list(topic_dfs))
def _token_table(topic_info, term_topic_freq, vocab, term_frequency, start_index=1):
# last, to compute the areas of the circles when a term is highlighted
# we must gather all unique terms that could show up (for every combination
# of topic and value of lambda) and compute its distribution over topics.
# term-topic frequency table of unique terms across all topics and all values of lambda
term_ix = topic_info.index.unique()
term_ix = np.sort(term_ix)
top_topic_terms_freq = term_topic_freq[term_ix]
# use the new ordering for the topics
K = len(term_topic_freq)
top_topic_terms_freq.index = range(start_index, K + start_index)
top_topic_terms_freq.index.name = 'Topic'
# we filter to Freq >= 0.5 to avoid sending too much data to the browser
token_table = pd.DataFrame({'Freq': top_topic_terms_freq.unstack()})\
.reset_index().set_index('term').query('Freq >= 0.5')
token_table['Freq'] = token_table['Freq'].round()
token_table['Term'] = vocab[token_table.index.values].values
# Normalize token frequencies:
token_table['Freq'] = token_table.Freq / term_frequency[token_table.index]
return token_table.sort_values(by=['Term', 'Topic'])
def prepare(topic_term_dists, doc_topic_dists, doc_lengths, vocab, term_frequency,
R=30, lambda_step=0.01, mds=js_PCoA, n_jobs=-1,
plot_opts=None, sort_topics=True, start_index=1):
"""Transforms the topic model distributions and related corpus data into
the data structures needed for the visualization.
Parameters
----------
topic_term_dists : array-like, shape (`n_topics`, `n_terms`)
Matrix of topic-term probabilities. Where `n_terms` is `len(vocab)`.
doc_topic_dists : array-like, shape (`n_docs`, `n_topics`)
Matrix of document-topic probabilities.
doc_lengths : array-like, shape `n_docs`
The length of each document, i.e. the number of words in each document.
The order of the numbers should be consistent with the ordering of the
docs in `doc_topic_dists`.
vocab : array-like, shape `n_terms`
List of all the words in the corpus used to train the model.
term_frequency : array-like, shape `n_terms`
The count of each particular term over the entire corpus. The ordering
of these counts should correspond with `vocab` and `topic_term_dists`.
R : int
The number of terms to display in the barcharts of the visualization.
Default is 30. Recommended to be roughly between 10 and 50.
lambda_step : float, between 0 and 1
Determines the interstep distance in the grid of lambda values over
which to iterate when computing relevance.
Default is 0.01. Recommended to be between 0.01 and 0.1.
mds : function or a string representation of function
A function that takes `topic_term_dists` as an input and outputs a
`n_topics` by `2` distance matrix. The output approximates the distance
between topics. See :func:`js_PCoA` for details on the default function.
A string representation currently accepts `pcoa` (or upper case variant),
`mmds` (or upper case variant) and `tsne` (or upper case variant),
if `sklearn` package is installed for the latter two.
n_jobs : int
The number of cores to be used to do the computations. The regular
joblib conventions are followed so `-1`, which is the default, will
use all cores.
plot_opts : dict, with keys 'xlab' and `ylab`
Dictionary of plotting options, right now only used for the axis labels.
sort_topics : sort topics by topic proportion (percentage of tokens covered). Set to false to
to keep original topic order.
start_index: how to number topics for prepared data. Defaults to one-based indexing.
Set to 0 for zero-based indexing.
Returns
-------
prepared_data : PreparedData
A named tuple containing all the data structures required to create
the visualization. To be passed on to functions like :func:`display`.
This named tuple can be represented as json or a python dictionary.
There is a helper function 'sorted_terms' that can be used to get
the terms of a topic using lambda to rank their relevance.
Notes
-----
This implements the method of `<NAME>. and <NAME>. (2014):
LDAvis: A Method for Visualizing and Interpreting Topics, ACL Workshop on
Interactive Language Learning, Visualization, and Interfaces.`
http://nlp.stanford.edu/events/illvi2014/papers/sievert-illvi2014.pdf
See Also
--------
:func:`save_json`: save json representation of a figure to file
:func:`save_html` : save html representation of a figure to file
:func:`show` : launch a local server and show a figure in a browser
:func:`display` : embed figure within the IPython notebook
:func:`enable_notebook` : automatically embed visualizations in IPython notebook
"""
if plot_opts is None:
plot_opts = {'xlab': 'PC1', 'ylab': 'PC2'}
# parse mds
if isinstance(mds, basestring):
mds = mds.lower()
if mds == 'pcoa':
mds = js_PCoA
elif mds in ('mmds', 'tsne'):
if sklearn_present:
mds_opts = {'mmds': js_MMDS, 'tsne': js_TSNE}
mds = mds_opts[mds]
else:
logging.warning('sklearn not present, switch to PCoA')
mds = js_PCoA
else:
logging.warning('Unknown mds `%s`, switch to PCoA' % mds)
mds = js_PCoA
# Conceptually, the items in `topic_term_dists` end up as individual rows in the
# DataFrame, but we can speed up ingestion by treating them as columns and
# transposing at the end. (This is especially true when the number of terms far
# exceeds the number of topics.)
topic_term_dist_cols = [
|
pd.Series(topic_term_dist, dtype="float64")
|
pandas.Series
|
import pandas as pd
import streamlit as st
import plotly.graph_objects as go
from pathlib import Path
import base64
full_path = Path(__file__).parent.absolute().parent.absolute()
directory = f'{Path(full_path).parent}/data/processed'
cda_fundos = pd.read_csv(f'{directory}/cda_fundos.csv', sep = ';', encoding = 'latin1', decimal=',')
admin = pd.read_parquet(f'{directory}/dim_administrador.parquet')
fundos = pd.read_parquet(f'{directory}/dim_fundos.parquet')
gestor = pd.read_parquet(f'{directory}/dim_gestor.parquet')
tipo = pd.read_parquet(f'{directory}/dim_tipo_fundo.parquet')
mvt =
|
pd.read_parquet(f'{directory}/fato_movimento_detalhado.parquet')
|
pandas.read_parquet
|
import pandas as pd
from pandas import ExcelWriter
import os
import numpy as np
import sys
log_base_path = sys.argv[1]
main_categories = ['airplane', 'cabinet', 'car', 'chair', 'lamp', 'sofa', 'table', 'vessel', 'main_mean']
novel_categories = ['bed', 'bench', 'bookshelf', 'bus', 'guitar', 'motorbike', 'pistol', 'skateboard', 'novel_mean']
model_names = ['pcn', 'topnet', 'msn', 'cascade']
train_modes = ['cd', 'emd']
loss_cols = ['emd', 'cd_p', 'cd_p_f1', 'cd_t', 'cd_t_f1']
sheet_names = ['cd_train_main_category', 'cd_train_novel_category', 'cd_train_overview','emd_train_main_category',
'emd_train_novel_category', 'emd_train_overview', ]
def save_xls(list_dfs, xls_path):
assert len(list_dfs) == len(sheet_names)
with ExcelWriter(xls_path, engine='xlsxwriter') as writer:
for n, df in enumerate(list_dfs):
df.to_excel(writer, sheet_names[n])
if n != 2 and n != 5:
writer.sheets[sheet_names[n]].set_row(2, None, None, {'hidden': True})
writer.save()
def generate_cat_results_row(best_emd_cat, best_cd_p_cat, best_cd_t_cat):
main_cat_r = []
novel_cat_r = []
emd = [float(line.split(' ')[5]) for line in best_emd_cat]
cd_p = [float(line.split(' ')[1][:-1]) for line in best_cd_p_cat]
cd_p_f1 = [float(line.split(' ')[-1]) for line in best_cd_p_cat]
cd_t = [float(line.split(' ')[3][:-1]) for line in best_cd_t_cat]
cd_t_f1 = [float(line.split(' ')[-1]) for line in best_cd_t_cat]
for i in range(8):
main_cat_r.extend([emd[i], cd_p[i], cd_p_f1[i], cd_t[i], cd_t_f1[i]])
novel_cat_r.extend([emd[i+8], cd_p[i+8], cd_p_f1[i+8], cd_t[i+8], cd_t_f1[i+8]])
main_cat_r.extend([np.mean(emd[:8]), np.mean(cd_p[:8]), np.mean(cd_p_f1[:8]), np.mean(cd_t[:8]), np.mean(cd_t_f1[:8])])
novel_cat_r.extend([np.mean(emd[8:]), np.mean(cd_p[8:]), np.mean(cd_p_f1[8:]), np.mean(cd_t[8:]), np.mean(cd_t_f1[8:])])
return main_cat_r, novel_cat_r
def generate_overview_row(best_emd_overview, best_cd_p_overview, best_cd_t_overview):
best_emd = float(best_emd_overview.split(' ')[5])
best_cd_p = float(best_cd_p_overview.split(' ')[1][:-1])
best_cd_p_f1 = float(best_cd_p_overview.split(' ')[-1])
best_cd_t = float(best_cd_t_overview.split(' ')[3][:-1])
best_cd_t_f1 = float(best_cd_t_overview.split(' ')[-1])
return [best_emd*(10**4), best_cd_p*(10**4), best_cd_p_f1, best_cd_t*(10**4), best_cd_t_f1]
sheets = []
for mode in train_modes:
main_cat_col = pd.MultiIndex.from_product([main_categories, loss_cols])
main_cat_df = pd.DataFrame(columns=main_cat_col, index=model_names)
novel_cat_col =
|
pd.MultiIndex.from_product([novel_categories, loss_cols])
|
pandas.MultiIndex.from_product
|
"""
This creates Figure 4, fitting of multivalent binding model to Gc Data.
"""
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
from scipy.optimize import minimize
from copy import copy
from .figureCommon import subplotLabel, getSetup
from ..PCA import nllsq_EC50
from ..MBmodel import runFullModel, cytBindingModel
path_here = os.path.dirname(os.path.dirname(__file__))
def makeFigure():
"""Get a list of the axis objects and create a figure"""
ax, f = getSetup((10, 7), (3, 4), multz={9: 1})
axlabel = copy(ax)
del axlabel[1]
del axlabel[1]
del axlabel[1]
del axlabel[2]
subplotLabel(axlabel)
ax[0].axis("off")
ax[1].axis("off")
ax[2].axis("off")
ax[3].axis("off")
ax[5].axis("off")
minSolved = minimize(runFullModel, x0=-12.0, args=([0.5, 1], False, True))
print(minSolved)
modelDF = runFullModel(time=[0.5, 1.0], saveDict=False, singleCell=True) # Change to save
print(r2_score(modelDF.Experimental.values, modelDF.Predicted.values))
Pred_Exp_plot(ax[4], modelDF)
legend = ax[4].get_legend()
labels = (x.get_text() for x in legend.get_texts())
ax[5].legend(legend.legendHandles, labels, loc="upper left", prop={"size": 8.5}) # use this to place universal legend later
ax[4].get_legend().remove()
R2_Plot_Cells(ax[6], modelDF)
R2_Plot_Ligs(ax[7], modelDF)
MonVsBivalent(ax[8], modelDF, ligs=True)
R2_Plot_Conc(ax[9], modelDF)
timePlot(ax[10])
return f
def Pred_Exp_plot(ax, df):
"""Plots all experimental vs. Predicted Values"""
sns.scatterplot(x="Experimental", y="Predicted", hue="Cell", style="Valency", data=df, ax=ax, alpha=0.35)
ax.set(xlim=(0, 60000), ylim=(0, 60000))
def R2_Plot_Cells(ax, df):
"""Plots all accuracies per cell"""
accDF =
|
pd.DataFrame(columns={"Cell Type", "Valency", "Accuracy"})
|
pandas.DataFrame
|
import os
import datetime
from flask import Flask, render_template, request, redirect
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
import json
import pandas as pd,re
database_uri = 'postgresql+psycopg2://{dbuser}:{dbpass}@{dbhost}/{dbname}'.format(
dbuser=os.environ['DBUSER'],
dbpass=os.environ['DBPASS'],
dbhost=os.environ['DBHOST'],
dbname=os.environ['DBNAME']
)
app = Flask(__name__)
app.config.update(
SQLALCHEMY_DATABASE_URI=database_uri,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
)
# initialize the database connection
db = SQLAlchemy(app)
with app.app_context():
db.create_all()
# initialize database migration management
migrate = Migrate(app, db)
def getJsonList():
from models import Mapping
db.create_all()
geoCords=Mapping.query.all()
#json_output = json.dumps(geoCords)
return geoCords
@app.route('/')
def home_page():
jdata=getJsonList()
return render_template('index.html',maps=jdata,total_locations=len(jdata))
@app.route('/populate')
def populate_table():
df = pd.read_csv('finalExtracted.csv')
df = df[df['predicted_class1']=='Informative']
print(df.columns)
from models import Request,Mapping
for index, row in df.iterrows():
if row['predicted_class2']=='Caution and advice':
if pd.isna(row['predicted_where_caution_x'])==False:
s=row['predicted_where_caution_x']
locations=s.split(', ')
for place in locations:
if len(place)>1:
place=re.sub(r"[^a-zA-Z0-9]",' ', place)
place=re.sub(' +',' ',place)
mapping=Mapping(place)
db.session.add(mapping)
db.session.commit()
if row['predicted_class2']=='Casualities and damage':
if
|
pd.isna(row['predicted_where_casuality'])
|
pandas.isna
|
import os
# os.environ["OMP_NUM_THREADS"] = "16"
import logging
logging.basicConfig(filename=snakemake.log[0], level=logging.INFO)
import pandas as pd
import numpy as np
# seak imports
from seak.data_loaders import intersect_ids, EnsemblVEPLoader, VariantLoaderSnpReader, CovariatesLoaderCSV
from seak.scoretest import ScoretestNoK
from seak.lrt import LRTnoK, pv_chi2mixture, fit_chi2mixture
from pysnptools.snpreader import Bed
import pickle
import sys
from util.association import BurdenLoaderHDF5
from util import Timer
class GotNone(Exception):
pass
# set up the covariatesloader
covariatesloader = CovariatesLoaderCSV(snakemake.params.phenotype,
snakemake.input.covariates_tsv,
snakemake.params.covariate_column_names,
sep='\t',
path_to_phenotypes=snakemake.input.phenotypes_tsv)
# initialize the null models
Y, X = covariatesloader.get_one_hot_covariates_and_phenotype('noK')
null_model_score = ScoretestNoK(Y, X)
null_model_lrt = LRTnoK(X, Y)
# set up function to filter variants:
def maf_filter(mac_report):
# load the MAC report, keep only observed variants with MAF below threshold
mac_report = pd.read_csv(mac_report, sep='\t', usecols=['SNP', 'MAF', 'Minor', 'alt_greater_ref'])
if snakemake.params.filter_highconfidence:
vids = mac_report.SNP[(mac_report.MAF < snakemake.params.max_maf) & (mac_report.Minor > 0) & ~(mac_report.alt_greater_ref.astype(bool)) & (mac_report.hiconf_reg.astype(bool))]
else:
vids = mac_report.SNP[(mac_report.MAF < snakemake.params.max_maf) & (mac_report.Minor > 0) & ~(mac_report.alt_greater_ref.astype(bool))]
# this has already been done in filter_variants.py
# load the variant annotation, keep only variants in high-confidece regions
# anno = pd.read_csv(anno_tsv, sep='\t', usecols=['Name', 'hiconf_reg'])
# vids_highconf = anno.Name[anno.hiconf_reg.astype(bool).values]
# vids = np.intersect1d(vids, vids_highconf)
return mac_report.set_index('SNP').loc[vids]
def get_regions():
# load the results, keep those below a certain p-value
results = pd.read_csv(snakemake.input.results_tsv, sep='\t')
kern = snakemake.params.kernels
if isinstance(kern, str):
kern = [kern]
pvcols_score = ['pv_score_' + k for k in kern ]
pvcols_lrt = ['pv_lrt_' + k for k in kern]
statcols = ['lrtstat_' + k for k in kern]
results = results[['gene', 'n_snp', 'cumMAC', 'nCarrier'] + statcols + pvcols_score + pvcols_lrt]
# get genes below threshold
genes = [results.gene[results[k] < 1e-7].values for k in pvcols_score + pvcols_lrt ]
genes = np.unique(np.concatenate(genes))
if len(genes) == 0:
return None
# set up the regions to loop over for the chromosome
regions =
|
pd.read_csv(snakemake.input.regions_bed, sep='\t', header=None, usecols=[0 ,1 ,2 ,3, 5], dtype={0 :str, 1: np.int32, 2 :np.int32, 3 :str, 5:str})
|
pandas.read_csv
|
import yfinance as yf
from yahooquery import Ticker
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
from tabulate import tabulate
import warnings
import streamlit as st
import src.tools.functions as f0
warnings.filterwarnings("ignore")
mpl.use("Agg")
plt.style.use("ggplot")
sm, med, lg = 10, 15, 20
plt.rc("font", size=sm) # controls default text sizes
plt.rc("axes", titlesize=med) # fontsize of the axes title
plt.rc("axes", labelsize=med) # fontsize of the x & y labels
plt.rc("xtick", labelsize=sm) # fontsize of the tick labels
plt.rc("ytick", labelsize=sm) # fontsize of the tick labels
plt.rc("legend", fontsize=sm) # legend fontsize
plt.rc("figure", titlesize=lg) # fontsize of the figure title
plt.rc("axes", linewidth=2) # linewidth of plot lines
plt.rcParams["figure.figsize"] = [10, 7]
plt.rcParams["figure.dpi"] = 100
def MovingAverageCrossStrategy(symbol, short_window, long_window, end_date, moving_avg, cc=0.0, ccc=0.0, inter='1d'):
"""
The function takes the stock symbol, time-duration of analysis,
look-back periods and the moving-average type(SMA or EMA) as input
and returns the respective MA Crossover chart along with the buy/sell signals for the given period.
>> symbol - (str)stock ticker as on Yahoo finance. Eg: 'ULTRACEMCO.NS'
>> start_date - (str)start analysis from this date (format: 'YYYY-MM-DD') Eg: '2018-01-01'
>> end_date - (str)end analysis on this date (format: 'YYYY-MM-DD') Eg: '2021-01-01'
>> short_window - (int)lookback period for short-term moving average. Eg: 5, 10, 20
>> long_window - (int)lookback period for long-term moving average. Eg: 50, 100, 200
>> moving_avg - (str)the type of moving average to use ('SMA' or 'EMA')
>> display_table - (bool)whether to display the date and price table at buy/sell positions(True/False)
>> import the closing price data of the stock for the aforementioned period of time in Pandas dataframe
"""
data = yf.download(symbol, start='2020-01-03', end=str(end_date)[:10], interval=inter)
data.index = pd.to_datetime(data.index)
stock_df =
|
pd.DataFrame(data['Adj Close'])
|
pandas.DataFrame
|
import argparse
import os
import time
import pandas as pd
from kalasanty.data import DataWrapper
from kalasanty.net import UNet, dice_loss, dice, ovl
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
def input_path(path):
"""Check if input exists."""
path = os.path.abspath(path)
if not os.path.exists(path):
raise IOError('%s does not exist.' % path)
return path
def output_path(path):
path = os.path.abspath(path)
return path
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', required=True, type=input_path,
help='path to the .hdf file with prepared data (can be '
'created with prepare_dataset.py)')
parser.add_argument('--model', '-m', type=input_path,
help='path to the .hdf file with pretrained model. '
'If not specified, a new model will be trained from scratch.')
parser.add_argument('--batch_size', default=10, type=int)
parser.add_argument('--steps_per_epoch', default=150, type=int)
parser.add_argument('--epochs', default=10000, type=int)
parser.add_argument('--train_ids', type=input_path,
help='text file with IDs to use for training (each in separate line). '
'If not specified, all proteins in the database '
'(except those listed with --test_ids) will be used. '
'Note that --test_ids has higher priority (i.e. if '
'ID is specified with both --train_ids and '
'--test_ids, it will be in the test set)')
parser.add_argument('--test_ids', type=input_path,
help='text file with IDs to use for training (each in separate line). '
'If not specified, all proteins will be used for training. '
'This option has higher priority than --train_ids (i.e. if '
'ID is specified with both --train_ids and '
'--test_ids, it will be in the test set)')
parser.add_argument('--load', '-l', action='store_true',
help='whether to load all data into memory')
parser.add_argument('--output', '-o', type=output_path,
help='name for the output directory. If not specified, '
'"output_<YYYY>-<MM>-<DD>" will be used')
parser.add_argument('--verbose', '-v', default=2, type=int,
help='verbosity level for keras')
return parser.parse_args()
def main():
args = parse_args()
if args.output is None:
args.output = 'output_' + time.strftime('%Y-%m-%d')
if not os.path.exists(args.output):
os.makedirs(args.output)
if not os.access(args.output, os.W_OK):
raise IOError('Cannot create files inside %s (check your permissions).' % args.output)
if args.train_ids:
with open(args.train_ids) as f:
train_ids = list(filter(None, f.read().split('\n')))
else:
train_ids = None
if args.test_ids:
with open(args.test_ids) as f:
test_ids = list(filter(None, f.read().split('\n')))
else:
test_ids = None
if train_ids:
if test_ids:
all_ids = sorted(set(train_ids) | set(test_ids))
else:
all_ids = train_ids
else:
all_ids = None
data = DataWrapper(args.input, test_set=test_ids, pdbids=all_ids,
load_data=args.load)
if args.model:
model = UNet.load_model(args.model, data_handle=data)
else:
model = UNet(data_handle=data)
model.compile(optimizer=Adam(lr=1e-6), loss=dice_loss,
metrics=[dice, ovl, 'binary_crossentropy'])
train_batch_generator = data.batch_generator(batch_size=args.batch_size)
callbacks = [ModelCheckpoint(os.path.join(args.output, 'checkpoint.hdf'),
save_best_only=False)]
if test_ids:
val_batch_generator = data.batch_generator(batch_size=args.batch_size, subset='test')
num_val_steps = max(args.steps_per_epoch // 5, 1)
callbacks.append(ModelCheckpoint(os.path.join(args.output, 'best_weights.hdf'),
save_best_only=True))
else:
val_batch_generator = None
num_val_steps = None
model.fit_generator(train_batch_generator, steps_per_epoch=args.steps_per_epoch,
epochs=args.epochs, verbose=args.verbose, callbacks=callbacks,
validation_data=val_batch_generator, validation_steps=num_val_steps)
history =
|
pd.DataFrame(model.history.history)
|
pandas.DataFrame
|
"""Training GCMC model on the MovieLens data set.
The script loads the full graph to the training device.
"""
import os, time
import argparse
import logging
import random
import string
import numpy as np
import pandas as pd
import torch as th
import torch.nn as nn
import torch.nn.functional as F
#import bottleneck as bn
from data import DataSetLoader
#from data_custom import DataSetLoader
from model import BiDecoder, GCMCLayer, MLPDecoder
from utils import get_activation, get_optimizer, torch_total_param_num, torch_net_info, MetricLogger
from utils import to_etype_name
from sklearn.metrics import ndcg_score
class Net(nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self._act = get_activation(args.model_activation)
self.encoder = nn.ModuleList()
self.encoder.append(GCMCLayer(args.rating_vals,
args.src_in_units,
args.dst_in_units,
args.gcn_agg_units,
args.gcn_out_units,
args.gcn_dropout,
args.gcn_agg_accum,
agg_act=self._act,
share_user_item_param=args.share_param,
device=args.device))
self.gcn_agg_accum = args.gcn_agg_accum
self.rating_vals = args.rating_vals
self.device = args.device
self.gcn_agg_units = args.gcn_agg_units
self.src_in_units = args.src_in_units
for i in range(1, args.layers):
if args.gcn_agg_accum == 'stack':
gcn_out_units = args.gcn_out_units * len(args.rating_vals)
else:
gcn_out_units = args.gcn_out_units
self.encoder.append(GCMCLayer(args.rating_vals,
args.gcn_out_units,
args.gcn_out_units,
gcn_out_units,
args.gcn_out_units,
args.gcn_dropout - i*0.1,
args.gcn_agg_accum,
agg_act=self._act,
share_user_item_param=args.share_param,
ini = False,
device=args.device))
if args.decoder == "Bi":
self.decoder = BiDecoder(in_units= args.gcn_out_units, #* args.layers,
num_classes=len(args.rating_vals),
num_basis=args.gen_r_num_basis_func)
'''
self.decoder2 = MLPDecoder(in_units= args.gcn_out_units * 2,
num_classes=len(args.rating_vals),
num_basis=args.gen_r_num_basis_func)
'''
elif args.decoder == "MLP":
if args.loss_func == "CE":
num_classes = len(args.rating_vals)
else:
num_classes = 1
self.decoder = MLPDecoder(in_units= args.gcn_out_units * args.layers,
num_classes=num_classes,
num_basis=args.gen_r_num_basis_func)
self.rating_vals = args.rating_vals
def forward(self, enc_graph, dec_graph, ufeat, ifeat, Two_Stage = False):
#user_out = []
#movie_out = []
for i in range(0, args.layers):
user_o, movie_o = self.encoder[i](
enc_graph,
ufeat,
ifeat,
Two_Stage)
if i == 0:
user_out = user_o
movie_out = movie_o
else:
user_out += user_o / float(i + 1)
movie_out += movie_o /float(i + 1)
#user_out.append(user_o)
#movie_out.append(movie_o)
ufeat = user_o
ifeat = movie_o
#print("user_out:", user_o[0])
#print("movie_out:", movie_o[0])
#pred_ratings = self.decoder2(dec_graph, th.cat([user_out[0], user_out[1]], 1), th.cat([movie_out[1], movie_out[0]], 1))
#user_out = th.cat(user_out, 1)
#movie_out = th.cat(movie_out, 1)
pred_ratings = self.decoder(dec_graph, user_out, movie_out)
W_r_last = None
reg_loss = 0.0
'''
for rating in self.rating_vals:
rating = to_etype_name(rating)
if W_r_last is not None:
reg_loss += th.sum((self.encoder[0].W_r[rating] - W_r_last)**2)
W_r_last = self.encoder[0].W_r[rating]
#W_r_last_2 = self.encoder_2.W_r[rating]
'''
W = th.matmul(self.encoder[0].att, self.encoder[0].basis.view(self.encoder[0].basis_units, -1))
#print("forward W:", W.shape)
W = W.view(len(self.rating_vals), self.src_in_units, -1)
for i, rating in enumerate(self.rating_vals):
rating = to_etype_name(rating)
if i != 0:
reg_loss += -th.sum(th.cosine_similarity(W[i,:,:], W[i-1,:,:], dim=1))
return pred_ratings, reg_loss, user_out, movie_out, W
def evaluate(args, net, dataset, segment='valid',debug = False, idx = 0):
possible_rating_values = dataset.possible_rating_values
nd_possible_rating_values = th.FloatTensor(possible_rating_values).to(args.device)
if segment == "valid":
rating_values = dataset.valid_truths
user_index = dataset.test_rating_info["user_id"]
enc_graph = dataset.valid_enc_graph
dec_graph = dataset.valid_dec_graph
elif segment == "test":
rating_values = dataset.test_truths
user_index = dataset.test_rating_info["user_id"]
enc_graph = dataset.test_enc_graph
dec_graph = dataset.test_dec_graph
user_map = dataset.global_user_id_map
movie_map = dataset.global_movie_id_map
else:
raise NotImplementedError
# Evaluate RMSE
net.eval()
with th.no_grad():
pred_ratings, reg_loss, user_out, movie_out, W = net(enc_graph, dec_graph,
dataset.user_feature, dataset.movie_feature)
#print(dataset.user_feature)
if args.loss_func == "CE":
max_rating, max_indices = th.max(pred_ratings, dim=1)
pred = nd_possible_rating_values[max_indices]
real_pred_ratings = (th.softmax(pred_ratings, dim=1) *
nd_possible_rating_values.view(1, -1)).sum(dim=1)
num_correct = th.eq(pred, rating_values).sum().float().item()
print("correct rate:",float(num_correct) / float(rating_values.shape[0]))
elif args.loss_func == "MLP":
real_pred_ratings = pred_ratings[:, 0]
rmse = ((real_pred_ratings - rating_values) ** 2.).mean().item()
rmse = np.sqrt(rmse)
#print("self.valid_labels:\n", dataset.valid_labels[0:10])
#print("self.valid_truths:\n", dataset.valid_truths[0:10])
'''
if debug and segment == "test":
fp = open("./debug/"+str(idx), "w")
mse = (real_pred_ratings - rating_values) ** 2.
edges = dec_graph.edges()
for i in range(rating_values.shape[0]):
print(str(user_map.inverse[edges[0][i].item()]) + " " + str(movie_map.inverse[edges[1][i].item()]) + " " + str(rating_values[i].item()) + " " + str(real_pred_ratings[i].item()) , file = fp)
fp.close()
'''
return rmse
def evaluate_metric(args, net, dataset, segment='valid', debug = False):
possible_rating_values = dataset.possible_rating_values
nd_possible_rating_values = th.FloatTensor(possible_rating_values).to(args.device)
if segment == "test":
rating_matrix = dataset.test_rating_matrix
enc_graph = dataset.test_enc_graph
dec_graph = dataset.test_recall_dec_graph
user_len = len(list(
|
pd.unique(dataset.test_rating_info["user_id"])
|
pandas.unique
|
import os
import pandas as pd
import numpy as np
from src.tools import Config
from dotenv import find_dotenv, load_dotenv
## config file
load_dotenv(find_dotenv())
cfg = Config(project_dir = os.getenv('PROJECT_DIR'), mode = os.getenv('MODE'))
## variables globales
CHEMIN_DATA = os.path.join(cfg.get('directory')['project_dir'], 'data', 'raw')
CHEMIN_TRAIN_EVENTS = os.path.join(CHEMIN_DATA, 'games_train_events.csv')
CHEMIN_TEST_EVENTS = os.path.join(CHEMIN_DATA, 'games_test_events.csv')
CHEMIN_PLAYERS = os.path.join(CHEMIN_DATA, 'players.csv')
CHEMIN_DATA_INTERIM = os.path.join(cfg.get('directory')['project_dir'], 'data', 'interim')
## chargement des données
games_train = pd.read_csv(CHEMIN_TRAIN_EVENTS)
games_test =
|
pd.read_csv(CHEMIN_TEST_EVENTS)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""User modeling functions and developer utilities for populating Time
x Channel grids with OLS and LMER model fits. The API exposes the
primary user interface functions for general use with ``Epochs``
objects as ``fitgrid.lm`` and ``fitgrid.lmer``.
"""
from math import ceil
from functools import partial
from multiprocessing import Pool
from contextlib import redirect_stdout
from io import StringIO
import numpy as np
import pandas as pd
from statsmodels.formula.api import ols
from pymer4 import Lmer # moved up from lmer_single() for Multiprocessing
from tqdm import tqdm
from .errors import FitGridError
from . import tools
from .fitgrid import FitGrid, LMFitGrid, LMERFitGrid
def validate_LHS(epochs, LHS):
# must be a list of strings
if not (
isinstance(LHS, list) and all(isinstance(item, str) for item in LHS)
):
raise FitGridError('LHS must be a list of strings.')
# all LHS items must be present in the epochs_table
missing = set(LHS) - set(epochs.table.columns)
if missing:
raise FitGridError(
'Items in LHS should all be present in the epochs table, '
f'the following are missing: {missing}'
)
def validate_RHS(RHS):
# validate RHS
if RHS is None:
raise FitGridError('Specify the RHS argument.')
if not isinstance(RHS, str):
raise FitGridError('RHS has to be a string.')
def process_key_and_group(key_and_group, function, channels):
key, group = key_and_group
results = {channel: function(group, channel) for channel in channels}
return
|
pd.Series(results, name=key)
|
pandas.Series
|
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
def buildFeatureFrame(filename,timepoint):
temp = np.asarray(np.load(filename,allow_pickle=True)).item()
image_props = measure.regionprops_table(temp['masks'],
intensity_image=temp['img'],
properties=('label','area','filled_area','bbox', 'centroid',
'eccentricity','solidity','convex_area',
'mean_intensity','min_intensity','max_intensity',
'orientation','major_axis_length','minor_axis_length',
'perimeter','extent','intensity_image'))
im_df = pd.DataFrame(image_props)
im_df['time'] = timepoint
return(im_df)
def buildOffsetFrame(filename_tminus1,filename_tplus1):
temp1 = np.asarray(np.load(filename_tminus1,allow_pickle=True)).item()
temp2 = np.asarray(np.load(filename_tplus1,allow_pickle=True)).item()
image_props = measure.regionprops_table(temp1['masks'],
intensity_image=temp2['img'],
properties=('label','centroid','area',"mean_intensity"))
im_df = pd.DataFrame(image_props)
im_df['time'] = None
return(im_df)
def linkEnergy(image1, image2, im1_select, im2_select):
deltaX = np.sqrt((image1['centroid-0'][im1_select]-image2['centroid-0'][im2_select])**2+
(image1['centroid-1'][im1_select]-image2['centroid-1'][im2_select])**2)
deltaA = np.absolute(image1['area'][im1_select] - image2['area'][im2_select])
score = deltaX + np.sqrt(deltaA)
return(score)
def generateLinks(image1, image2, im1_select, dist_multiplier=2):
delX = np.sqrt((image1['centroid-0'][im1_select]-image2['centroid-0'])**2+
(image1['centroid-1'][im1_select]-image2['centroid-1'])**2)
max_dist = dist_multiplier*min(delX)
candidates = np.array(delX[delX < max_dist].index)
return(candidates)
def ScoreMotherArea(motherArea, ProgArea_1, Prog_Area_2, threshold=0.95):
if (motherArea/(ProgArea_1 + Prog_Area_2)) > threshold :
return(0)
else:
return(10)
def ScoreMotherInt(motherCurrentInt, motherNextInt):
normInt = (motherCurrentInt/(motherNextInt + 10**-4))
if (normInt > 2):
return(-1)
elif (normInt <= 2)&(normInt > 1.4):
return(0)
else:
return(1)
def ScoreProjInt(projCurrentInt, projPrevInt):
normInt = (projCurrentInt/(projPrevInt + 10**-4))
if (normInt > 2):
return(-1)
elif (normInt <= 2)&(normInt > 1):
return(0)
else:
return(1)
def ScoreProjDiff(proj1Int, proj2Int):
return(np.abs(proj1Int - proj2Int)/2)
def ScoreDivisionTotal(motherFrameCurr, motherFrameNext,
projFrameCurr, projFramePrev,
motherCell,projCell_1, projCell_2):
motherArea = ScoreMotherArea(motherFrameCurr["area"][motherCell],
projFrameCurr["area"][projCell_1],
projFrameCurr["area"][projCell_2])
motherInt = ScoreMotherInt(motherFrameCurr["mean_intensity"][motherCell],
motherFrameNext["mean_intensity"][motherCell])
projInt = -1 + ScoreProjInt(projFrameCurr["mean_intensity"][projCell_1], projFramePrev["mean_intensity"][projCell_1]) +ScoreProjInt(projFrameCurr["mean_intensity"][projCell_2], projFramePrev["mean_intensity"][projCell_2])
projIntDiff = ScoreProjDiff(projFrameCurr["mean_intensity"][projCell_1],
projFrameCurr["mean_intensity"][projCell_2])
projAreaDiff = ScoreProjDiff(projFrameCurr["area"][projCell_1],
projFrameCurr["area"][projCell_2])
return(motherArea + motherInt + projInt + projIntDiff + projAreaDiff)
def DivisionCandidate(motherFrameCurr, motherFrameNext,
projFrameCurr, projFramePrev,
motherCell, projCell_1, projCell_2_candidates, threshold=3):
tru_vec=[]
for i in projCell_2_candidates:
if(ScoreDivisionTotal(motherFrameCurr,motherFrameNext,
projFrameCurr,projFramePrev,
motherCell,projCell_1,i) < threshold):
tru_vec=np.append(tru_vec,True)
else:
tru_vec=np.append(tru_vec,False)
return(np.any(tru_vec))
def buildConnections(filename_t0,greedy=False,openingCost=2, nnDist=3, DivScoreThreshold=12):
time0 = filename_t0.split("/")[-1].split("_")[0] ;
time1 = str(int(time0)+1) ;
tmp_filename_t1 = time1+"_"+filename_t0.split("/")[-1].split("_")[1] ;
dirs = filename_t0.split("/")[:-1] ;
filename_t1 = "/".join(dirs)+"/"+tmp_filename_t1 ;
ip0 = buildFeatureFrame(filename_t0,time0)
ip1 = buildFeatureFrame(filename_t1,time1)
fx0 = buildOffsetFrame(filename_t0,filename_t1)
fx1 = buildOffsetFrame(filename_t1,filename_t0)
num=0
arr =
|
pd.DataFrame([])
|
pandas.DataFrame
|
import structlog
from strategies.strategy_utils import Utils
import pandas as pd
class MovingAvgConvDiv():
def __init__(self):
self.logger = structlog.get_logger()
self.utils = Utils()
def get_12_day_EMA(self, frame):
twelve_day_EMA = frame.ewm(span=12)
return list(twelve_day_EMA.mean()["Prices"])
def get_26_day_EMA(self, frame):
twenty_six_day_EMA = frame.ewm(span=26)
return list(twenty_six_day_EMA.mean()["Prices"])
def calculate_MACD_line(self, historical_data):
closing_prices = self.utils.get_closing_prices(historical_data)
emadict = {"Prices": closing_prices}
frame =
|
pd.DataFrame(emadict)
|
pandas.DataFrame
|
import pandas as pd
import pytest
from rdtools.normalization import normalize_with_expected_power
@pytest.fixture()
def times_15():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='15T')
@pytest.fixture()
def times_30():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='30T')
@pytest.fixture()
def pv_15(times_15):
return pd.Series([1.0, 2.5, 3.0, 2.2, 2.1], index=times_15)
@pytest.fixture()
def expected_15(times_15):
return pd.Series([1.2, 2.3, 2.8, 2.1, 2.0], index=times_15)
@pytest.fixture()
def irradiance_15(times_15):
return pd.Series([1000.0, 850.0, 950.0, 975.0, 890.0], index=times_15)
@pytest.fixture()
def pv_30(times_30):
return pd.Series([1.0, 3.0, 2.1], index=times_30)
@pytest.fixture()
def expected_30(times_30):
return pd.Series([1.2, 2.8, 2.0], index=times_30)
@pytest.fixture()
def irradiance_30(times_30):
return pd.Series([1000.0, 950.0, 890.0], index=times_30)
def test_normalize_with_expected_power_uniform_frequency(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15)
expected_norm = pv_15.iloc[1:]/expected_15.iloc[1:]
expected_norm.name = 'energy_Wh'
expected_insol = irradiance_15.iloc[1:]*0.25
expected_insol.name = 'energy_Wh'
pd.testing.assert_series_equal(norm, expected_norm)
|
pd.testing.assert_series_equal(insol, expected_insol)
|
pandas.testing.assert_series_equal
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import autokeras as ak
os.chdir("/home/pchabets/Dropbox/STRESS_INDEX/")
# In[2]:
#proteomics
prtm = pd.read_csv("data/blood_and_saliva_variables/W1/proteomics/output/proteomics_replaced_outliers.csv")
prtm = prtm.drop('applate', axis=1)
# In[3]:
#label data
y = pd.read_spss("data/outcome_groups/DSM_groups.sav", convert_categoricals=True)
y = y.drop('Remitted_comorbid', axis=1)
y = y.rename(columns={'pident':'Pident'})
y['Pident'] = y['Pident'].astype(int)
# In[4]:
whole_set = pd.merge(y, prtm, how='inner', on='Pident')
# In[5]:
whole_set
# In[6]:
#Turn labels into 0's and 1's: non-remitted = 0, remitted = 1
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
whole_set['Remitted_depression'] = lbl.fit_transform(whole_set['Remitted_depression'])
# In[7]:
X = whole_set.drop(['Pident','Remitted_depression'], axis=1)
y = whole_set.pop('Remitted_depression')
# ### log10 transform data, can be done before train/test split because pointwise operation
# In[8]:
X = np.log10(X)
# ### Train-test split (80-20)
# In[9]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101, shuffle=True)
# ### Preprocessing: remove zero-variance variables and impute NaN's with median
# In[10]:
from sklearn.feature_selection import VarianceThreshold
from sklearn.impute import SimpleImputer
# #### Select ZV columns from trainings data and remove them from train + test X
# In[11]:
selector = VarianceThreshold()
selector.fit(X_train)
# In[12]:
X_names = list(X_train.columns[selector.get_support()])
# In[13]:
X_train = selector.transform(X_train)
X_test = selector.transform(X_test)
# In[14]:
print(str(X.shape[1]-X_train.shape[1]) + " analytes with zero variance")
# #### Calculate median for each column in train X, and replace NaNs with that value in both train and test X
# In[15]:
imputer = SimpleImputer(strategy="median")
# In[16]:
imputer.fit(X_train)
# In[17]:
X_train = imputer.transform(X_train)
X_test = imputer.transform(X_test)
# #### Scale data with MinMax scaling
# In[18]:
from sklearn.preprocessing import MinMaxScaler
# In[19]:
scaler = MinMaxScaler()
# In[20]:
scaler.fit(X_train)
# In[21]:
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# In[22]:
# Convert back to dataframe
X_train =
|
pd.DataFrame(data=X_train,columns=X_names)
|
pandas.DataFrame
|
import pandas as pd
import subprocess
if __name__ == "__main__":
candidate_gdas = pd.read_csv("n_out/candidate_GDAs.csv", names=['pmid', 'diseaseId', 'geneId'], sep="\t")
predict =
|
pd.read_csv("n_out/DTMiner_predict", names=['predict'])
|
pandas.read_csv
|
import argparse
import os
import sys
import warnings
import numpy as np
import pandas as pd
import tqdm
warnings.simplefilter(action='ignore', category=FutureWarning)
sys.path.append(os.path.dirname(os.getcwd()))
sys.path.append(os.getcwd())
from commons import files_tools, consts, utils
from variance import bulk_get_valid_cpgs_dataset
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--methylation_folder', help='Path to methylation files', required=True)
parser.add_argument('--orig_meth_cells', required=True,
help='Path to file containing the list of cell to calculate original cells')
parser.add_argument('--output_folder', help='Path of the output folder', required=False)
parser.add_argument('--percentage_of_extreme_cells', required=False, type=int, default=15,
help='Percentage of extreme cells to take from top and bottom, default is 15% from '
'each side')
parser.add_argument('--coverage_perc_cpgs_to_remove', type=int, default=5, required=False,
help='Percentage of cells to remove based on extreme coverage(high and low)')
parser.add_argument('--min_cells_threshold', help='minimum samples per cell group to be a valid '
'cpg', required=False, default=5, type=int)
args = parser.parse_args()
return args
def get_cells_to_calculate_variance(file_path):
cells_info_data = pd.read_csv(file_path)
nc_cells = list(cells_info_data[cells_info_data["variance_cells"] == 1]["sample"])
return [i.strip() for i in nc_cells]
def main():
args = parse_input()
all_files = files_tools.get_files_to_work(args.methylation_folder, consts.BULK_FILE_FORMAT % "*")
orig_meth_cells = bulk_get_valid_cpgs_dataset.get_cells_to_calculate_original_meth(args.orig_meth_cells)
variance_cells = get_cells_to_calculate_variance(args.orig_meth_cells)
chromosomes_list = []
for chromosome_file in tqdm.tqdm(all_files):
chromosome, pmd_df = bulk_get_valid_cpgs_dataset.get_pmd_df(chromosome_file)
orig_meth_values = bulk_get_valid_cpgs_dataset.get_cpgs_orig_methylated(pmd_df, orig_meth_cells)
df = pmd_df.filter(items=variance_cells, axis=0).astype(np.float64)
# Remove extreme cells and cells with not enough coverage
df = utils.remove_extreme_cpgs_by_coverage(df, args.coverage_perc_cpgs_to_remove)
threshold_filter = df.notnull().sum(axis=0) > args.min_cells_threshold
df = df.filter(items=threshold_filter[threshold_filter].index, axis=1)
pmd_sample_mean = df.mean(axis=1) # Mean of each sample in pmd
chromosome_df = pd.DataFrame(columns=["chromosome", "location"])
chromosome_df["location"] = df.columns
chromosome_df["chromosome"] = chromosome
chromosome_df = chromosome_df.set_index("location")
# chromosome_df["pmd_index"] = df["pmd_index"]
chromosome_df["meth"] = df.mean()
chromosome_df["var"] = df.var()
chromosome_df["pearson_corr"] = df.corrwith(pmd_sample_mean)
covariance = [df.iloc[:, i].cov(pmd_sample_mean) for i in range(df.shape[1])]
chromosome_df["covariance"] = covariance
chromosome_df["orig_meth"] = orig_meth_values[chromosome_df.index]
# chromosome_df["sequence"] = sequence_tools.get_sequences_for_cpgs(df.columns, str(chromosome))
chromosomes_list.append(chromosome_df.reset_index())
all_chromosome_df =
|
pd.concat(chromosomes_list)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
assert expected.index.name is None
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
self.read_csv(StringIO(text), verbose=True)
output = sys.stdout.getvalue()
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 3 NA values in column a\n'
# Reset the stdout buffer.
sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
self.read_csv(StringIO(text), verbose=True, index_col=0)
output = sys.stdout.getvalue()
# Engines are verbose in different ways.
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
pytest.skip(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
pytest.raises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
assert expected.A.dtype == 'int64'
assert expected.B.dtype == 'float'
assert expected.C.dtype == 'float'
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
assert df2['Number1'].dtype == float
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = self.read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = self.read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
def test_compact_ints_use_unsigned(self):
# see gh-13323
data = 'a,b,c\n1,9,258'
# sanity check
expected = DataFrame({
'a': np.array([1], dtype=np.int64),
'b': np.array([9], dtype=np.int64),
'c': np.array([258], dtype=np.int64),
})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.int8),
'b': np.array([9], dtype=np.int8),
'c': np.array([258], dtype=np.int16),
})
# default behaviour for 'use_unsigned'
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True)
tm.assert_frame_equal(out, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=False)
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.uint8),
'b': np.array([9], dtype=np.uint8),
'c': np.array([258], dtype=np.uint16),
})
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=True)
tm.assert_frame_equal(out, expected)
def test_compact_ints_as_recarray(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
assert result.dtype == ex_dtype
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
assert result.dtype == ex_dtype
def test_as_recarray(self):
# basic test
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
|
tm.assert_numpy_array_equal(out, expected)
|
pandas.util.testing.assert_numpy_array_equal
|
import json
import os
import tweepy
from datetime import date
import calendar
import pandas as pd
from dotenv import load_dotenv
load_dotenv()
TWITTER_API_KEY = os.getenv("TWITTER_API_KEY")
TWITTER_API_SECRET = os.getenv("TWITTER_API_SECRET")
TWITTER_ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_TOKEN")
TWITTER_ACCESS_TOKEN_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET")
auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
# Helper functions
def todays_date():
"""Returns today's date in UTC"""
today = date.today()
day = today.strftime("%b-%d-%Y").replace('-', ' ')
my_date = date.today()
return calendar.day_name[my_date.weekday()][:3] + ' ' + day
def min_bin (mins):
"""Bins the minutes of when our followers engage into 00 and 30"""
l = []
for _min in mins:
if _min < 30:
l.append('00')
else:
l.append('30')
return l
class data_wrangling:
# Definining api as a class variable (shared among all instances of the class)
api = tweepy.API(auth, wait_on_rate_limit=True)
def __init__(self, user_id, follower_count=10):
# instance variables (unique for each instance of the class)
self.user_id = user_id
self.follower_count = follower_count
def followers_ids(self):
"""Returns the first 10 ids of the user's followers"""
followers_ids = api.followers_ids(self.user_id)
return followers_ids
def get_follower_data(self, followers_ids):
times = []
text = []
# Loop through the follower_count (int) defined in instance
for followers in followers_ids[:self.follower_count]:
# Try and excepts statement to bipass an error that arises when we call a protected user's information
try:
favorited_tweets = api.favorites(id=f'{followers}')
# Add each tweet that the current follower engaged with to the list
for tweet in range(len(favorited_tweets)):
status = favorited_tweets[tweet]
#convert to string
json_str = json.dumps(status._json)
#deserialise string into python object
parsed = json.loads(json_str)
# gets the created_at (time) and the text from the tweets the followers engaged with
times.append(parsed.get('created_at'))
text.append(parsed.get('text'))
except tweepy.TweepError:
pass
# Seperates hours, mins into lists to be put into a df (leave hours as str to keep in military time for put request to backend)
hours, mins = [i[11:13] for i in times], [int(i[14:16]) for i in times]
_min_bin = min_bin(mins)
# Creates df with times and text
df =
|
pd.DataFrame(data={'hours':hours, 'mins':mins, 'min_bin':_min_bin, 'text':text})
|
pandas.DataFrame
|
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
# Add Bayesian-and-novelty directory to the PYTHONPATH
import sys
import os
sys.path.append(os.path.realpath('../../..'))
# Autoreload changes in utils, etc.
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
from novelty.utils.metrics import plot_roc, plot_prc
from novelty.utils.metrics import get_summary_statistics
from novelty.utils.metrics import html_summary_table
# In[2]:
# Training settings
BATCH_SIZE = 128
EPOCHS = 100
LR = 0.1
MOMENTUM = 0.9
NO_CUDA = False
SEED = 1
CLASSES = 80
MODEL_PATH_ROOT = './weights/wrn-28-10-cifar80'
MODEL_PATH = MODEL_PATH_ROOT + '.pth'
# MNIST mean and stdevs of training data by channel
CHANNEL_MEANS = (129.38732832670212/255, 124.35894414782524/255, 113.09937313199043/255)
CHANNEL_STDS = (67.87980079650879/255, 65.10988622903824/255, 70.04801765084267/255)
# Plot ROC and PR curves
PLOT_CHARTS = False
# ## Training and Testing functions
# In[3]:
from novelty.utils import Progbar
def train(model, device, train_loader, optimizer, epoch):
progbar = Progbar(target=len(train_loader.dataset))
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
progbar.add(len(data), [("loss", loss.item())])
# In[4]:
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = F.log_softmax(model(data), dim=1)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_acc = 100. * correct / len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), test_acc))
return test_loss, test_acc
# ## Load CIFAR80
# In[5]:
from novelty.utils import DATA_DIR
from src.wide_resnet import Wide_ResNet
torch.manual_seed(SEED)
use_cuda = not NO_CUDA and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Dataset transformation
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
# Load training and test sets
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(os.path.join(DATA_DIR, 'cifar80/train'), transform=transform),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(os.path.join(DATA_DIR, 'cifar80/test'), transform=transform),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
# ## Initialize model and optimizer
# In[6]:
# Create model instance
model = Wide_ResNet(28, 10, 0., CLASSES)
# Move model to available GPUs
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
print("Using", torch.cuda.device_count(), "GPUs")
model = model.to(device)
# Initialize optimizer
optimizer = optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM)
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(EPOCHS*0.5), int(EPOCHS*0.75)], gamma=0.1)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=EPOCHS)
# ## Optimization loop
# In[7]:
if os.path.exists(MODEL_PATH):
# load previously trained model:
model.load_state_dict(torch.load(MODEL_PATH))
else:
best_loss = float("inf")
# Training loop
for epoch in range(EPOCHS):
print("Epoch:", epoch)
scheduler.step()
# Print the learning rate
for param_group in optimizer.param_groups:
print('Learning rate:', param_group['lr'])
train(model, device, train_loader, optimizer, epoch)
loss, acc = test(model, device, test_loader)
# Checkpoint the model parameters
if loss < best_loss:
torch.save(model.state_dict(), "{}_epoch{}.pth".format(MODEL_PATH_ROOT, epoch))
best_loss = loss
# save the model
torch.save(model.state_dict(), MODEL_PATH)
# ## ODIN prediction functions
# In[8]:
from torch.autograd import Variable
def predict(model, data, device):
model.eval()
data = data.to(device)
outputs = model(data)
outputs = outputs - outputs.max(1)[0].unsqueeze(1) # For stability
return F.softmax(outputs, dim=1)
def predict_temp(model, data, device, temp=1000.):
model.eval()
data = data.to(device)
outputs = model(data)
outputs /= temp
outputs = outputs - outputs.max(1)[0].unsqueeze(1) # For stability
return F.softmax(outputs, dim=1)
def predict_novelty(model, data, device, temp=1000., noiseMagnitude=0.0012):
model.eval()
# Create a variable so we can get the gradients on the input
inputs = Variable(data.to(device), requires_grad=True)
# Get the predicted labels
outputs = model(inputs)
outputs = outputs / temp
outputs = F.log_softmax(outputs, dim=1)
# Calculate the perturbation to add to the input
maxIndexTemp = torch.argmax(outputs, dim=1)
labels = Variable(maxIndexTemp).to(device)
loss = F.nll_loss(outputs, labels)
loss.backward()
# Normalizing the gradient to binary in {0, 1}
gradient = torch.ge(inputs.grad.data, 0)
gradient = (gradient.float() - 0.5) * 2
# Normalize the gradient to the same space of image
for channel, (mean, std) in enumerate(zip(CHANNEL_MEANS, CHANNEL_STDS)):
gradient[0][channel] = (gradient[0][channel] - mean) / std
# Add small perturbations to image
# TODO, this is from the released code, but disagrees with paper I think
tempInputs = torch.add(inputs.data, -noiseMagnitude, gradient)
# Get new outputs after perturbations
outputs = model(Variable(tempInputs))
outputs = outputs / temp
outputs = outputs - outputs.max(1)[0].unsqueeze(1) # For stability
outputs = F.softmax(outputs, dim=1)
return outputs
# ## Evaluate method on outlier datasets
# In[9]:
def get_max_model_outputs(data_loader, device):
"""Get the max softmax output from the model in a Python array.
data_loader: object
A pytorch dataloader with the data you want to calculate values for.
device: object
The CUDA device handle.
"""
result = []
for data, target in data_loader:
# Using regular model
p = predict(model, data, device)
max_val, label = torch.max(p, dim=1)
# Convert torch tensors to python list
max_val = list(max_val.cpu().detach().numpy())
result += max_val
return result
def get_max_odin_outputs(data_loader, device, temp=1000., noiseMagnitude=0.0012):
"""Convenience function to get the max softmax values from the ODIN model in a Python array.
data_loader: object
A pytorch dataloader with the data you want to calculate values for.
device: object
The CUDA device handle.
temp: float, optional (default=1000.)
The temp the model should use to do temperature scaling on the softmax outputs.
noiseMagnitude: float, optional (default=0.0012)
The epsilon value used to scale the input images according to the ODIN paper.
"""
result = []
for data, target in data_loader:
# Using ODIN model
p = predict_novelty(model, data, device, temp=temp, noiseMagnitude=noiseMagnitude)
max_val, label = torch.max(p, dim=1)
# Convert torch tensors to python list
max_val = list(max_val.cpu().detach().numpy())
result += max_val
return result
# In[10]:
import pandas as pd
df = pd.DataFrame(columns=['auroc', 'aupr_in', 'aupr_out', 'fpr_at_95_tpr', 'detection_error'],
index=['imagenet_crop', 'imagenet_resize', 'lsun_crop', 'lsun_resize',
'isun_resize', 'gaussian', 'uniform', 'cifar20'])
df_odin = pd.DataFrame(columns=['auroc', 'aupr_in', 'aupr_out', 'fpr_at_95_tpr', 'detection_error'],
index=['imagenet_crop', 'imagenet_resize', 'lsun_crop', 'lsun_resize',
'isun_resize', 'gaussian', 'uniform', 'cifar20'])
# ### Process Inliers
# In[11]:
num_inliers = len(test_loader.dataset)
# Get predictions on in-distribution images
cifar_model_maximums = get_max_model_outputs(test_loader, device)
# ### Tiny Imagenet (Crop)
# In[12]:
directory = os.path.join(DATA_DIR, 'tiny-imagenet-200/test')
# Dataset transformation
transform_crop = transforms.Compose([
transforms.RandomCrop([32, 32]),
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
# Load the dataset
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
imagenet_crop_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(directory, transform=transform_crop),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_imagenet_crop = len(imagenet_crop_loader.dataset.imgs)
# Get predictions on in-distribution images
imagenet_crop_model_maximums = get_max_model_outputs(imagenet_crop_loader, device)
temp = 1000
eps = 0.0026
cifar_odin_maximums = get_max_odin_outputs(test_loader, device, temp=temp, noiseMagnitude=eps)
imagenet_crop_odin_maximums = get_max_odin_outputs(imagenet_crop_loader, device, temp=temp, noiseMagnitude=eps)
# In[13]:
labels = [1] * num_inliers + [0] * num_imagenet_crop
predictions = cifar_model_maximums + imagenet_crop_model_maximums
predictions_odin = cifar_odin_maximums + imagenet_crop_odin_maximums
stats = get_summary_statistics(predictions, labels)
df.loc['imagenet_crop'] = pd.Series(stats)
stats_odin = get_summary_statistics(predictions_odin, labels)
df_odin.loc['imagenet_crop'] = pd.Series(stats_odin)
if PLOT_CHARTS:
plot_roc(predictions, labels, title="Softmax Thresholding ROC Curve")
plot_roc(predictions_odin, labels, title="ODIN ROC Curve")
# plot_prc(predictions, labels, title="Softmax Thresholding PRC Curve")
# plot_prc(predictions_odin, labels, title="ODIN PRC Curve")
# ### Tiny Imagenet (Resize)
# In[14]:
directory = os.path.join(DATA_DIR, 'tiny-imagenet-200/test')
# Dataset transformation
transform_resize = transforms.Compose([
transforms.Resize([32, 32]),
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
# Load the dataset
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
imagenet_resize_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(directory, transform=transform_resize),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_imagenet_resize = len(imagenet_resize_loader.dataset.imgs)
# Get predictions on in-distribution images
imagenet_resize_model_maximums = get_max_model_outputs(imagenet_resize_loader, device)
temp = 1000
eps = 0.0024
cifar_odin_maximums = get_max_odin_outputs(test_loader, device, temp=temp, noiseMagnitude=eps)
imagenet_resize_odin_maximums = get_max_odin_outputs(imagenet_resize_loader, device, temp=temp, noiseMagnitude=eps)
# In[15]:
labels = [1] * num_inliers + [0] * num_imagenet_resize
predictions = cifar_model_maximums + imagenet_resize_model_maximums
predictions_odin = cifar_odin_maximums + imagenet_resize_odin_maximums
stats = get_summary_statistics(predictions, labels)
df.loc['imagenet_resize'] = pd.Series(stats)
stats_odin = get_summary_statistics(predictions_odin, labels)
df_odin.loc['imagenet_resize'] = pd.Series(stats_odin)
if PLOT_CHARTS:
plot_roc(predictions, labels, title="Softmax Thresholding ROC Curve")
plot_roc(predictions_odin, labels, title="ODIN ROC Curve")
# plot_prc(predictions, labels, title="Softmax Thresholding PRC Curve")
# plot_prc(predictions_odin, labels, title="ODIN PRC Curve")
# ### LSUN (Crop)
# In[16]:
lsun_directory = '/media/tadenoud/DATADisk/datasets/lsun'
# Load the dataset
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
lsun_crop_loader = torch.utils.data.DataLoader(
datasets.LSUN(lsun_directory, classes='test', transform=transform_crop),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_lsun_crop = len(lsun_crop_loader.dataset)
# Get predictions on in-distribution images
lsun_crop_model_maximums = get_max_model_outputs(lsun_crop_loader, device)
temp = 1000
eps = 0.0038
cifar_odin_maximums = get_max_odin_outputs(test_loader, device, temp=temp, noiseMagnitude=eps)
lsun_crop_odin_maximums = get_max_odin_outputs(lsun_crop_loader, device, temp=temp, noiseMagnitude=eps)
# In[17]:
labels = [1] * num_inliers + [0] * num_lsun_crop
predictions = cifar_model_maximums + lsun_crop_model_maximums
predictions_odin = cifar_odin_maximums + lsun_crop_odin_maximums
stats = get_summary_statistics(predictions, labels)
df.loc['lsun_crop'] = pd.Series(stats)
stats_odin = get_summary_statistics(predictions_odin, labels)
df_odin.loc['lsun_crop'] = pd.Series(stats_odin)
if PLOT_CHARTS:
plot_roc(predictions, labels, title="Softmax Thresholding ROC Curve")
plot_roc(predictions_odin, labels, title="ODIN ROC Curve")
# plot_prc(predictions, labels, title="Softmax Thresholding PRC Curve")
# plot_prc(predictions_odin, labels, title="ODIN PRC Curve")
# ### LSUN (Resize)
# In[18]:
# Load the dataset
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
lsun_resize_loader = torch.utils.data.DataLoader(
datasets.LSUN(lsun_directory, classes='test', transform=transform_resize),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_lsun_resize = len(lsun_resize_loader.dataset)
# Get predictions on in-distribution images
lsun_resize_model_maximums = get_max_model_outputs(lsun_resize_loader, device)
temp = 1000
eps = 0.0026
cifar_odin_maximums = get_max_odin_outputs(test_loader, device, temp=temp, noiseMagnitude=eps)
lsun_resize_odin_maximums = get_max_odin_outputs(lsun_resize_loader, device, temp=temp, noiseMagnitude=eps)
# In[19]:
labels = [1] * num_inliers + [0] * num_lsun_resize
predictions = cifar_model_maximums + lsun_resize_model_maximums
predictions_odin = cifar_odin_maximums + lsun_resize_odin_maximums
stats = get_summary_statistics(predictions, labels)
df.loc['lsun_resize'] = pd.Series(stats)
stats_odin = get_summary_statistics(predictions_odin, labels)
df_odin.loc['lsun_resize'] =
|
pd.Series(stats_odin)
|
pandas.Series
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_read_data.ipynb (unless otherwise specified).
__all__ = ['AttrDict', 'cData', 'cCLS_CNES', 'cESA_SWE', 'cESA_SOLMAG', 'cNOAA45', 'fget_external_forecasts']
# Cell
import os
import re
import io
import sys
import numpy as np
import pandas as pd
import datetime as dt
from pathlib import Path
import shutil
import urllib.request as request
from urllib.error import URLError
from contextlib import closing
import requests
from requests.exceptions import HTTPError
from functools import reduce
# Cell
from fastai import *
from fastai.vision.all import *
# Cell
from .stats_utils import cStationary
# Cell
class AttrDict(dict):
"""
access dictionary keys as attributes
obj = AttrDict()
obj.update(dict)
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
# Cell
class cData:
"""
parent data class
download functions
"""
def __init__(self):
pass
def fdownload_url_ftp(self, url, dest, overwrite=False):
"""
download url from ftp server
cannot use fastai as this uses requests library
requests cannot handle ftp (ned to use urllib)
url = (ftp) url
dest = destination filename (pathlib object)
"""
if dest.exists() and not overwrite:
if self.verb: print("{} already downloaded".format(dest))
return
else:
if self.verb: print("Downloading data to {}...".format(dest))
with closing(request.urlopen(url)) as r:
with open(dest, 'wb') as f:
shutil.copyfileobj(r, f)
def fdownload_url_http(self, url, dest, overwrite=False):
"""
download url from http server
Throw exception if response not 200
r.status_code == 200: success
r.status_code == 404: not found
"""
try:
response = requests.get(url)
response.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}') # Python 3.6
except Exception as err:
print(f'Other error occurred: {err}') # Python 3.6
# if file exists pass, else download
if dest.exists() and not overwrite:
if self.verb: print("{} already downloaded".format(dest))
return
else:
if self.verb: print("Downloading data to {}...".format(dest))
with open(dest, "wb") as f:
f.write(response.content)
def fdownload_url(self, url, dest, overwrite=False):
"""
Fastai download data: https://github.com/fastai/fastai2/blob/master/fastai2/data/external.py
"""
try:
# use standard fastai download or custom http function
download_data(url, fname=dest, force_download=overwrite)
#self.fdownload_url_http(url, dest, overwrite)
except requests.exceptions.InvalidSchema:
# custom for ftp
self.fdownload_url_ftp(url, dest, overwrite)
#return os.listdir()
def fget_fnames_url_ftp(self, url, ext="txt"):
"""
return list of filenames displayed in ftp webpage
"""
# ftp: cannot use requests .get
resource = request.urlopen(url)
content = resource.read().decode('utf-8')
fnames = [i for i in content.split() if ext in i]
# print out if no new data
#if self.verb: if (path/fnames[-1]).exists():
# print("No New Data")
return fnames
def fget_fnames_dir(self, datadir, ext="txt"):
"""
obtain list of filenames from local directory
"""
filenames = []
for filename in os.listdir(datadir):
if filename.endswith(ext):
filenames.append("{}/{}".format(datadir,filename))
else:
pass
return filenames
def fcopy_local_files(self, src, dst):
"""
copy data files from local server (e.g. those that have been downloaded with cron)
"""
#from shutil import copyfile #or use copy, copy2 (check different properties)
#copyfile(src, dst)
pass
def fget_forecast_comp(self, df_forecast, df_archive, cname="y"):
"""
generate dataframe containing forecasts alongside truth values for metric calculations
assumes df_forecast and df_archive have same interpolation
https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
df_forecast = dataframe with columns [index, gendate, ds, cname]
df_archive = dataframe with columns [ds, cname] (ds as index)
cname = column name of index interested in in archive df
"""
# merge on matching ds
df_unsorted = pd.merge(df_forecast, df_archive, on=['ds'])
# sort by gendate then by ds
df = df_unsorted.sort_values(by=['gendate', 'ds'])
# rename columns
df.columns = ['gendate' , 'ds', cname, '{}_true'.format(cname)]
return df.reset_index(drop=True)
def fget_persistence(self, df, ind, colname="persistence"):
"""
take truth value from the day before gendate and use as forecast
if cannot access "yesterday" truth: sets persistence as NaN
ind = column name of truth value to be used as persistence
"""
# get indices where gendate is the same as ds -> these are truth values for a given gendate==ds
truth = df[df.gendate == df.ds]
x1 = np.array(truth.index)
# need to also append final index, as x2 is taing differences so will be one index short
x1 = np.append(x1, len(df))
# get length of each forecast for each gendate (for which persistence has to be repeated)
x2 = np.diff(x1)
# for a given gendate, take the truth value from the day before and repeat it x2 times
x3 = []
for i in range(len(x2)):
try:
yesterday_idx = x1[i-1]
yesterday_truth = df.iloc[yesterday_idx][ind]
except IndexError:
yesterday_truth = np.nan
x3.append([yesterday_truth]*x2[i])
# set column in df as persistence
df[colname] = [item for sublist in x3 for item in sublist]
return df
def fget_average(self, df):
"""
take average of truth values over last x days and use as forecast
"""
# get indices where gendate is the same as ds -> these are truth values for a given gendate==ds
#truth = df[df.gendate == df.ds]
pass
def finterpolate(self, df, timestep, method="time"):
"""
interpolate dataframe to regular time spacing
timestep = interpolation frequency (hours)
"""
statsobj = cStationary()
df_interp = statsobj.fmake_regular_freq(df, timestep=dt.timedelta(hours=timestep),
method=method)
df_interp = df_interp.dropna()
return df_interp
def fget_daily(self, df, method="20:00"):
"""
get single data value per day
(until 1996 only one value per day, subsequently 3 per day)
options:
- "20:00" : take value at 20:00
- "interp" : interpolate to midnight
- False : take no action
(may have issues accessing single date string from index despite converting to DatetimeIndex)
(have to access as df4["1996-03-15":"1996-03-22"] or df4.loc["1996-03-17"] but not df4["1996-03-17"])
(use dff.sample(1) for manual checking against original dataset)
"""
if method == "20:00":
if self.verb: print("Take daily value as at 20:00...")
# keep observations at midnight and 20:00, drop times
# drop duplicates (some dates in 1996 have both, in all of these cases they are the same)
df1 = df.at_time("00:00").reset_index()
df2 = df.at_time("20:00").reset_index()
df1["ds"] = df1["ds"].dt.date
df2["ds"] = df2["ds"].dt.date
dff =
|
pd.concat([df1, df2], axis=0)
|
pandas.concat
|
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
import re
import io
import ast
import requests
import numpy as np
import pandas as pd
import random
from typing import Any, Text, Dict, List, Union, Optional
from rasa_sdk import Action, Tracker
from rasa_sdk import FormValidationAction
from rasa_sdk.events import SlotSet, FollowupAction
from rasa_sdk.types import DomainDict
from rasa_sdk.executor import CollectingDispatcher
import warnings
from statistics import mean
from os import path, getenv
from datetime import datetime
import matplotlib.pyplot as plt
from botocore.exceptions import ClientError
from boto3.exceptions import S3UploadFailedError
import boto3
DB_AWS_ACCESS_KEY_ID = getenv('DB_AWS_ACCESS_KEY_ID')
DB_AWS_SECRET_ACCESS_KEY = getenv('DB_AWS_SECRET_ACCESS_KEY')
DB_AWS_BUCKET = 'journeypic'
# ------------------------------------------------------------------
def upload_file_to_s3(local_file, s3_folder, s3_file, aws_access_key_id, aws_secret_access_key, aws_bucket,
debug_en=False):
""" upload a given file to given location on Amazon-S3 """
success = True
HTTP_OK = 200
# Connect to Amazon-S3 client:
s3_client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
# Make a new directory on S3 (if not already exists):
if s3_folder + '/' in [x['Key'] for x in s3_client.list_objects(Bucket=aws_bucket)['Contents']]:
pass
elif not debug_en:
res = s3_client.put_object(Bucket=aws_bucket, Key='%s/' % s3_folder)
success = res['ResponseMetadata']['HTTPStatusCode'] == HTTP_OK
if not success:
return success, ""
# Upload local_file to S3:
x = 3
if not debug_en:
try:
if path.exists(local_file):
s3_client.upload_file(local_file, aws_bucket, path.join(s3_folder, s3_file))
s3_client.put_object_acl(ACL='public-read', Bucket=aws_bucket, Key=path.join(s3_folder, s3_file))
except (ClientError, S3UploadFailedError) as e:
success = False, ""
return success, "https://%s.s3.eu-central-1.amazonaws.com/%s/%s" % (aws_bucket, s3_folder, s3_file)
# ------------------------------------------------------------------
def donut_generator(names, sizes, radius=0.7, textstr_title='',
colors=None, figname="image.png"):
if colors is None:
colors = []
my_circle = plt.Circle((0, 0), radius, color='white')
fig, ax = plt.subplots()
labels = [':%s\nתוירולק %d' % (k1, k2) for k1, k2 in zip(names, sizes)]
if colors:
ax.pie(sizes, colors=colors)
else:
ax.pie(sizes)
plt.legend(bbox_to_anchor=(1.0, 0.88), fontsize=18, labels=labels)
p = plt.gcf()
p.gca().add_artist(my_circle)
if textstr_title:
ax.text(0.34, 1.05, textstr_title, transform=ax.transAxes, weight='bold',
fontsize=30, verticalalignment='center_baseline')
textstr_center1 = str(sum(sizes))
textstr_center2 = 'קלוריות'[::-1]
ax.text(0.39, 0.56, textstr_center1, transform=ax.transAxes, weight='bold',
fontsize=24, verticalalignment='center_baseline')
ax.text(0.37, 0.44, textstr_center2, transform=ax.transAxes,
fontsize=18, verticalalignment='center_baseline')
if figname:
fig.patch.set_facecolor('white')
fig.savefig(figname, bbox_inches='tight', facecolor='white')
else:
plt.show()
# ------------------------------------------------------------------
def donut_generator_wrapper(title, data):
names = [x[::-1] for x in list(data.keys())]
sizes = list(data.values())
colors = ['darkorange', 'lightgreen', 'blue']
textstr_title = title[::-1]
figname = "donut_image1.png"
donut_generator(names=names,
sizes=sizes,
radius=0.7,
textstr_title=textstr_title,
colors=colors,
figname=figname)
return figname
# ------------------------------------------------------------------
def iniliatize_Diagram(title, data):
unique_filename = lambda fname: "%s_%s%s" % (path.splitext(fname)[0],
datetime.now().strftime("%m%d%Y_%H%M%S"),
path.splitext(fname)[1])
figname = donut_generator_wrapper(title, data)
res, figure_url = upload_file_to_s3(local_file=figname,
s3_folder="auto_generated",
s3_file=unique_filename(figname),
aws_access_key_id=DB_AWS_ACCESS_KEY_ID,
aws_secret_access_key=DB_AWS_SECRET_ACCESS_KEY,
aws_bucket=DB_AWS_BUCKET)
return figure_url
# ------------------------------------------------------------------
def load_db(db_bitmap):
db_dict = {}
# "Zameret food list 22_JAN_2020"
if (db_bitmap & 0x1) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=84892416"
s = requests.get(url).content
db_dict['tzameret'] = pd.read_csv(io.StringIO(s.decode('utf-8'))).fillna(0)
# "Zameret_hebrew_features" - entities aliases
if (db_bitmap & 0x2) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1805881936"
s = requests.get(url).content
db_dict['lut'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Entity Alias"],
usecols=["Entity Alias", "Entity", "Units",
"Entity name", "RDA name",
"action_simple_question",
"action_nutrition_howmanyxiny_x",
"action_nutrition_howmanyxiny_y",
"action_nutrition_is_food_healthy",
"action_nutrition_is_food_recommended",
"action_nutrition_what_is_healthier_x",
"action_nutrition_what_is_healthier_y",
"action_nutrition_get_rda",
"action_nutrition_bloodtest_generic",
"action_nutrition_bloodtest_value",
"action_nutrition_food_substitute",
"action_nutrition_compare_foods",
"action_nutrition_howmanyxyinz"]).fillna(0)
# "Zameret_hebrew_features" - nutrients_questions
if (db_bitmap & 0x4) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1706335378"
s = requests.get(url).content
db_dict['nutrients_qna'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Entity"]).fillna(0)
# "Zameret_hebrew_features" - Food questions
if (db_bitmap & 0x8) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1099284657"
s = requests.get(url).content
db_dict['food_qna'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["nutrition_density"],
usecols=["nutrition_density", "energy_density",
"description_density"]).fillna(0)
# "Zameret_hebrew_features" - List of common foods
if (db_bitmap & 0x10) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=495295419"
s = requests.get(url).content
db_dict['common_food'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["common_name"],
usecols=["common_name", "shmmitzrach", "smlmitzrach"]).fillna(0)
# "Newt Machine Readable" - FoodItemRanges
if (db_bitmap & 0x20) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=885087351"
s = requests.get(url).content
db_dict['food_ranges'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Nutrient"],
usecols=["Nutrient", "Medium - threshold per 100gr",
"High - threshold per 100gr",
"good_or_bad", "tzameret_name", "hebrew_name"]).fillna(0)
# "Newt Machine Readable" - MicroNutrients
if (db_bitmap & 0x40) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=222801095"
s = requests.get(url).content
micro_nutrients_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0).fillna(0)
db_dict['micro_nutrients'] = micro_nutrients_df
# "Newt Machine Readable" - MicroNutrients
if (db_bitmap & 0x80) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1373096469"
s = requests.get(url).content
food_units_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0).fillna(0)
db_dict['food_units'] = food_units_df
# "Newt Machine Readable" - BloodTestValues
if (db_bitmap & 0x100) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=1011022304"
s = requests.get(url).content
bloodtest_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0, nrows=19, usecols=range(11)).fillna(0)
db_dict['bloodtest_vals'] = bloodtest_df
# "Zameret_hebrew_features" - Weight aliases
if (db_bitmap & 0x200) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=623521836"
s = requests.get(url).content
food_units_aliases_df = pd.read_csv(io.StringIO(s.decode('utf-8')), header=0)
db_dict['food_units_aliases'] = food_units_aliases_df
# "Zameret_hebrew_features" - For Noa
if (db_bitmap & 0x400) > 0:
url = "https://docs.google.com/spreadsheets/d/19rYDpki0jgGeNlKLPnINiDGye8QEfQ4IEEWSkLFo83Y/export?format=csv&gid=82221888"
s = requests.get(url).content
food_units_features_df = pd.read_csv(io.StringIO(s.decode('utf-8')), header=1)
db_dict['food_units_features'] = food_units_features_df.dropna(axis=0, how='all')
db_dict['food_units_features'] = db_dict['food_units_features'].rename({'Primary_SN': 'smlmitzrach'},
axis=1)
# "Zameret_hebrew_features" - subs_tags_alias
if (db_bitmap & 0x800) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=458428667"
s = requests.get(url).content
db_dict['subs_tags_alias'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
usecols=["Entity Alias", "Entity", "Show_stopers"]).set_index(
'Entity Alias')
return db_dict
# ------------------------------------------------------------------
def import_sheets(debug=False):
'''Import the df noa and tzameret food group tabs from the suggested meal planning sheet as a DataFrame. Import weights and measures, and tzameret food list from Tzameret DB as a DataFrame'''
sheet_id = '19rYDpki0jgGeNlKLPnINiDGye8QEfQ4IEEWSkLFo83Y'
# import seperalty
gid_2 = '428717261'
df_tzameret_food_group = pd.read_csv(
f"https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv&gid={gid_2}")
df = load_db(0x481)
df_nutrition = df['tzameret']
df_nutrition.fillna(0, inplace=True)
df_nutrition.rename(columns={'carbohydrates': 'carbs'}, inplace=True)
df_weights = df['food_units']
df_weights.head()
df_noa_pre_1 = df['food_units_features']
df_noa = df['food_units_features']
header = list(df_noa_pre_1.columns.values)
df_noa.loc[-1] = header # adding a row
df_noa.index = df_noa.index + 1 # shifting index
df_noa = df_noa.sort_index() # sorting by index
df_noa.head()
df_noa.columns = df_noa.columns.str.lower()
df_noa = df_noa.iloc[1:] # df_noa doesn not have the first row with the numbers to make it easier to filter data
df_noa['lactose_free'] = df_noa['lactose_free'].replace({'Low Lactose': 'Yes', 'Lactose Free': 'Yes'})
df_noa['food_category'] = df_noa['food_category'].replace({'N/A': 'Savoury_Snacks'})
df_noa.dropna(subset=["food_name"],
inplace=True) # dropping all meals that don't have a meal name to get complete list of actual meals
df_noa = df_noa.rename(columns={'smlmitzrach': 'primary_sn'})
df_noa['sn_1'] = df_noa['primary_sn'].astype(str).str[:1]
df_noa['sn_2'] = df_noa['primary_sn'].astype(str).str[1:2]
return df_noa, df_tzameret_food_group, df_weights, df_nutrition
# ------------------------------------------------------------------
def get_rda(name, tracker, intent_upper=False):
db_dict = load_db(0x46)
lut_df = db_dict['lut']
micro_nutrients_df = db_dict['micro_nutrients']
if intent_upper:
micro_nutrients_df = micro_nutrients_df[micro_nutrients_df['Type'] == "Upper Limit"]
else:
micro_nutrients_df = micro_nutrients_df[micro_nutrients_df['Type'] == "RDA"]
status = "match"
if not (tracker.get_slot('gender') and tracker.get_slot('age') and tracker.get_slot(
'weight') and tracker.get_slot(
'height')):
status = "default"
nutrient = None
x = tracker.get_slot('x') if tracker.get_slot('x') else None
if x is not None and x is not "":
nutrient = x
else:
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[name].values:
nutrient = ent['value']
break
try:
feature = lut_df['Entity'][nutrient]
feature_rda = lut_df['RDA name'][lut_df['Entity name'] == feature][0]
gender = "Male"
if tracker.get_slot('gender') == "זכר":
gender = "Male"
elif tracker.get_slot('gender') == "נקבה":
gender = "Female"
user_vars = {}
user_vars['age'] = tracker.get_slot('age') if tracker.get_slot('age') else "40"
user_vars['weight'] = tracker.get_slot('weight') if tracker.get_slot('weight') else "80"
user_vars['height'] = tracker.get_slot('height') if tracker.get_slot('height') else "180"
rda_row = micro_nutrients_df[(micro_nutrients_df['Micronutrient'] == feature_rda) & \
((micro_nutrients_df['Gender'] == "ANY") | (
micro_nutrients_df['Gender'] == gender)) & \
((micro_nutrients_df['Pregnancy'] == "ANY") | (
micro_nutrients_df['Pregnancy'] == "No")) & \
((micro_nutrients_df['Lactating'] == "ANY") | (
micro_nutrients_df['Lactating'] == "No")) & \
((micro_nutrients_df['Age Min'] == "ANY") | (
micro_nutrients_df['Age Min'].astype(float) <= int(
user_vars['age']))) & \
((micro_nutrients_df['Age Max'] == "ANY") | (
micro_nutrients_df['Age Max'].astype(float) > int(user_vars['age'])))]
rda_text = str(rda_row['Free Text'].values[0])
rda_value = str(rda_row['Value'].values[0])
rda_units = rda_row['Units'].values[0]
if 'slot#' in rda_value:
rda_value_list = rda_value.split(' ')
for k, el in enumerate(rda_value_list):
if 'slot#' in el and el.split('#')[1] in user_vars:
rda_value_list[k] = user_vars[el.split('#')[1]]
rda_value = eval(' '.join(rda_value_list))
rda_value = float(rda_value)
if 'slot#' in rda_text:
rda_text_list = rda_text.split(' ')
for k, el in enumerate(rda_text_list):
if 'slot#' in el:
rda_text_list[k] = tracker.get_slot(el.split('#')[1])
rda_text = ' '.join(rda_text_list)
rda_text_list = re.findall('\{.*?\}', rda_text)
for match in rda_text_list:
rda_text = rda_text.replace(match, str(eval(match[1:-1])))
if rda_text == "0":
rda_text = ""
return rda_value, rda_units, rda_text, status, nutrient
except:
return -1, -1, "", "missmatch", nutrient
# ------------------------------------------------------------------
def get_personal_str(rda_status, tracker):
age = tracker.get_slot('age') if tracker.get_slot('age') and rda_status == "match" else '40'
gender = tracker.get_slot('gender') if tracker.get_slot('gender') and rda_status == "match" else 'זכר'
weight = tracker.get_slot('weight') if tracker.get_slot('weight') and rda_status == "match" else '80'
height = tracker.get_slot('height') if tracker.get_slot('height') and rda_status == "match" else '180'
if rda_status == "default":
personal_str = "עבור %s בגיל %s במשקל %s ובגובה %s" % (gender, age, weight, height)
else:
personal_str = "עבורך (%s בגיל %s במשקל %s ובגובה %s)" % (gender, age, weight, height)
return personal_str
# ------------------------------------------------------------------
def get_food_nutrition_density(food, food_ranges_db):
# Nutrition Density is defined in Tzameret:
density_normalized = float(food["Nutrition density normalized"])
# Thresholds are defined in Machine-Readable:
density = food_ranges_db[food_ranges_db.index == "Nutrition density"]
density_med = float(density["Medium - threshold per 100gr"])
density_high = float(density["High - threshold per 100gr"])
# Binning:
res = "high"
if density_normalized < density_med:
res = "low"
elif density_normalized < density_high:
res = "med"
return density, res
# ------------------------------------------------------------------
def get_food_energy_density(food, food_ranges_db):
# Energy Density is defined in Tzameret:
density_normalized = float(food["Energy density"])
# Thresholds are defined in Machine-Readable:
density = food_ranges_db[food_ranges_db.index == "Energy density"]
density_med = float(density["Medium - threshold per 100gr"])
density_high = float(density["High - threshold per 100gr"])
# Binning:
res = "high"
if density_normalized < density_med:
res = "low"
elif density_normalized < density_high:
res = "med"
return density, res
# ------------------------------------------------------------------
def how_many_x_in_y_core(x, y, food_units, name, tracker):
db_dict = load_db(0x293)
y_common = y
if y in db_dict['common_food'].index:
y_common = db_dict['common_food'][db_dict['common_food'].index == y]['shmmitzrach'][0]
else:
y_food = ' '.join(y.split(' ')[1:])
food_units = db_dict['food_units_aliases'][db_dict['food_units_aliases']['Unit Alias'] == y.split(' ')[0]][
'Zameret unit']
if food_units.empty:
food_units = y.split(' ')[0]
else:
food_units = food_units.values[0]
if y_food in db_dict['common_food'].index:
y_common = db_dict['common_food'][db_dict['common_food'].index == y_food]['shmmitzrach'][0]
else:
y_common = y_food
food = db_dict['tzameret'][db_dict['tzameret']['shmmitzrach'].str.contains(y_common)].iloc[0, :]
feature = db_dict['lut'][db_dict['lut'].index == x]["Entity"][0]
units = db_dict['lut'][db_dict['lut'].index == x]["Units"][0]
food_units_row = pd.Series()
if food_units:
food_units_row = db_dict['food_units'][(db_dict['food_units']['smlmitzrach'] == int(food['smlmitzrach'])) &
(db_dict['food_units']['shmmida'] == food_units)]
is_food_units_match = not food_units_row.empty or food_units == "100 גרם"
food_units_factor = 1.0
if not food_units_row.empty:
food_units_factor = food_units_row['mishkal'].values[0] / 100
val = food[feature] * food_units_factor
if units == 0:
res = "ב-%s של %s יש %.2f %s" % (food_units, food['shmmitzrach'], float(val), x)
else:
res = ""
if not is_food_units_match:
res = "לא הצלחתי למצוא נתונים במאגר על היחידה %s עליה שאלת\n" % food_units
res += "היחידות הבאות קיימות במאגר, עבור %s:\n" % food['shmmitzrach']
res += ', '.join(db_dict['food_units'][db_dict['food_units']['smlmitzrach'] == int(food['smlmitzrach'])][
'shmmida'].to_list())
res += "\n"
food_units = "100 גרם"
res += "ב-%s של %s יש %.2f %s %s" % (food_units, food['shmmitzrach'], float(val), units, x)
rda_val, rda_units, rda_text, rda_status, nutrient = get_rda(name, tracker)
if rda_val > 0 and units not in ['יחב"ל']: # FIXME: unsupported units
rda = 100 * float(val) / rda_val
res += "\n"
res += "שהם כ-%d אחוז מהקצובה היומית המומלצת %s" % (int(rda), get_personal_str(rda_status, tracker))
if rda_text and rda_text != '0':
res += '\n' + rda_text
return val, res
# ------------------------------------------------------------------
# ____ _ _ _ __ __ _
# | __ ) _ _(_) | __| | | \/ | ___ __ _| |
# | _ \| | | | | |/ _` | | |\/| |/ _ \/ _` | |
# | |_) | |_| | | | (_| | | | | | __/ (_| | |
# |____/ \__,_|_|_|\__,_| |_| |_|\___|\__,_|_|
# Dictionary that is equivalent to user inputs and filters the df_noa Database based on the inputs
def arrayToString(s):
return ' '.join([str(elem) for elem in s])
def checkDoublePattern(sentence, pattern):
temp = sentence.count(pattern)
if temp == 2:
return sentence[:sentence.find(pattern) + len(pattern)]
return sentence
def update_budgets(daily_budget, meals_num, snacks_num, weights):
'''Takes total budget, number of meals and snacks, and weights as paramters. Returns budget for each category for every meal'''
# change 0.3 to a user params
budgets = {}
div = (meals_num + inputs.get(
'budget_var') * snacks_num) # Is this supposed to be budget_var(0.3) times snacks num or budget_var times meals_num
if div > 0:
budgets['meal'] = round(daily_budget / div, 1)
budgets['snack'] = round(inputs.get('budget_var') * daily_budget / div, 1)
budgets['Carbs'] = round(weights[0] * budgets['meal'], 1)
budgets['Protein'] = round(weights[1] * budgets['meal'], 1)
budgets['Vegetables'] = round(weights[2] * budgets['meal'], 1)
budgets['Fruits'] = round(weights[3] * budgets['snack'], 1)
budgets['Fat'] = round(weights[4] * budgets['snack'], 1)
budgets['Fat_meal'] = round(weights[4] * budgets['meal'], 1)
budgets['Savoury_Snacks'] = round(weights[5] * budgets['snack'], 1)
budgets['Sweets'] = round(weights[6] * budgets['snack'], 1)
budgets['all'] = round(daily_budget, 1)
return budgets
def filter_meals_by_features(user_params, df_feature):
'''Takes user inputs and a Dataframe as parameters and returns a DataFrame filtered by the user inputs'''
for k, v in user_params.items():
if (v == 'Yes') and (debug['debug_en']):
df_feature = df_feature.loc[df_feature[k] == v]
return df_feature
def filter_meals_by_meal_type(df, meal_type):
'''Filters the DataFrame by the meal type to be used in making a scoreboard for each meal like breakfast, lunch etc.'''
if debug:
return df.loc[(df['il_' + meal_type] == 'Yes')]
def candidate_units_amounts(item, sn, items_type):
'''Returns the different options for mida amount and servings for each amount'''
sn_1 = int(item['sn_1'].values[0])
df_max_meal = df_tzameret_food_group.loc[df_tzameret_food_group['ספרה ראשונה בקוד'] == sn_1]
units_intersection = []
amounts_intersection = []
if items_type != 'snack':
df_max_meal = df_tzameret_food_group.loc[df_tzameret_food_group['ספרה ראשונה בקוד'] == sn_1]
max_amount_meal = df_max_meal['mida_maxAmount_meal'].values[0].replace(' ', '').split(',')
df_weights_list = df_weights[df_weights['smlmitzrach'] == sn]
weights_list = df_weights_list['mida'].tolist()
max_amount_meal_units = [int(value.split('_')[0]) for value in max_amount_meal]
max_amount_meal_amounts = [list(range(1, int(value.split('_')[1]) + 1)) for value in max_amount_meal]
for k, value in enumerate(max_amount_meal_units):
if value in weights_list:
units_intersection.append(value)
amounts_intersection.append(max_amount_meal_amounts[k])
else:
max_amount_snack = df_max_meal['mida_maxAmount_snack'].values[0].replace(' ', '').split(',')
df_weights_list = df_weights[df_weights['smlmitzrach'] == sn]
weights_list = df_weights_list['mida'].tolist()
max_amount_snack_units = [int(value.split('_')[0]) for value in max_amount_snack]
max_amount_snack_amounts = [list(range(1, int(value.split('_')[1]) + 1)) for value in max_amount_snack]
for k, value in enumerate(max_amount_snack_units):
if value in weights_list:
units_intersection.append(value)
amounts_intersection.append(max_amount_snack_amounts[k])
return units_intersection, amounts_intersection
def get_item_property(sn, grams, serving):
'''Returns the total item calories for each item'''
# if the mida is 700 then multiply by 100, if any other number divide by 100
weights = df_weights[(df_weights['smlmitzrach'] == sn) & (df_weights['mida'] == grams)]
mishkal = weights.iloc[0]['mishkal']
if mishkal == 700:
mishkal = mishkal * 100
else:
mishkal = mishkal / 100
attribute = df_nutrition.loc[df_nutrition['smlmitzrach'] == str(int(sn))]
attribute_total = attribute.iloc[0]['food_energy']
total = attribute_total * mishkal * serving
return total, weights.iloc[0]['shmmida'], weights.iloc[0]['mishkal'], weights, serving
def update_calorie_budgets(candidate_calories, item_type, bud):
'''Updates the calories budget based on how many calories were already used'''
bud[item_type] = bud[item_type] - candidate_calories
return bud
def build_meal(meals_bank, meal_type, budget):
# make histogram without penalty score of runnning the simulator 50 times and picking the winners. Run it again with the penalty score
'''Builds a meal taking a DataFrame, meal type and budget as parameters. Meal takes item from each category (Carbs, Protein etc.) and returns the meal, weighted average score and total meal calories'''
budget_weights = {**budget_weights_meals, **budget_weights_snacks_fruits_fat, **budget_weights_savoury_snacks,
**budget_weights_sweets}
bud = {}
meal_similarity_list = []
df_health = df_nutrition.iloc[1:]
max_meal_items = inputs.get('max_items_snack') if meal_type == 'snack' else inputs.get('max_items_meal')
nutrition_density_list = []
energy_density_list = []
meal_score = 0
score_list = []
uti_score = []
ind_score = []
score = 0
meals = []
meal_cals = 0
types = []
total_budget = budget.copy()
item_types = {'breakfast': ['Carbs', 'Protein', 'Vegetables'],
'lunch': ['Carbs', 'Protein', 'Vegetables'],
'dinner': ['Carbs', 'Protein', 'Vegetables'],
'snack': ['Fat']}
if (snacks.get('sweets') == 'Yes') & (len(meals_bank.loc[meals_bank['food_category'] == 'Sweets']) > 0):
item_types['snack'].append('Sweets')
if (snacks.get('Savoury_Snacks') == 'Yes') & (
len(meals_bank.loc[meals_bank['food_category'] == 'Savoury_Snacks']) > 0):
item_types['snack'].append('Savoury_Snacks')
if (user_params.get('fruits') == 'No') & (len(meals_bank.loc[meals_bank['food_category'] == 'Fruits']) > 0):
item_types['snack'].append('Fruits')
for k in range(max_meal_items):
for item_type in item_types[meal_type]:
success = False
if (len(meals_bank.loc[meals_bank['food_category'] == item_type]) > 0):
df = meals_bank.loc[meals_bank['food_category'] == item_type].sample()
candidate_units = candidate_units_amounts(df, int(df['primary_sn'].values[0]), item_type)
candidate_grams = candidate_units[0]
for can_grams in candidate_grams:
sn = float(df['primary_sn'].values[0])
for candidate_amount in candidate_units[1]:
for amount in reversed(candidate_amount):
calories, weight, grams, x, y = get_item_property(sn, can_grams, amount)
can_cals = getattr(calories, "tolist", lambda: candidate_calories)()
if can_cals < budget[item_type]:
success = True
if success:
if success:
sn_int = int(df['primary_sn'].astype(str).str[:1])
sn1 = float(df['primary_sn'].values[0])
calories1, weight, grams, x, y = get_item_property(sn1, can_grams, amount)
bud[item_type] = getattr(calories1, "tolist", lambda: candidate_calories)()
units_priority = candidate_grams.index(can_grams) + 1
meal_score += 1 / units_priority
df_sn1 = df_tzameret_food_group.loc[
df_tzameret_food_group['ספרה ראשונה בקוד'] == sn_int]
df_fish = df_noa.loc[df_noa['primary_sn'] == sn1]
food_group = df_sn1['קבוצת המזון']
if sn_int == 2:
if df_fish['fish_free'].iloc[0] == 'Yes':
meal_similarity_list.append(2.1)
else:
meal_similarity_list.append(2.2)
else:
meal_similarity_list.append(sn_int)
item_score = (bud[item_type]) / (budget[item_type])
df['score'] = item_score
score_list.append(item_score)
types.append(df['food_category'])
nutrition_density_normalized = df_nutrition.loc[
df_nutrition['smlmitzrach'] == str(
int(sn1)), 'Nutrition density normalized']
energy_density = df_health.loc[
df_health['smlmitzrach'] == str(int(sn1)), 'Energy density']
nutrition_density_normalized = nutrition_density_normalized.astype(float)
energy_density = energy_density.astype(float)
dataframe = df[['food_name', 'primary_sn']]
dataframe.insert(2, 'Weight', [grams])
dataframe.insert(3, 'Unit', [weight])
dataframe.insert(4, 'Amount', [amount])
meals.append(dataframe)
nutrition_density_list.append(nutrition_density_normalized.values.tolist())
energy_density_list.append(energy_density.values.tolist())
meal_cals = meal_cals + calories1
budget = update_calorie_budgets(can_cals, item_type, budget)
break
if success or budget[item_type] < units_thr[item_type] or len(meals) >= max_meal_items:
break
if success or budget[item_type] < type_thr[item_type] or len(meals) >= max_meal_items:
break
if budget['all'] < inputs['item_thr'] or len(meals) >= max_meal_items:
break
if len(meals) >= max_meal_items:
break
types_list_no_duplicates = np.unique([x.values[0] for x in types]).tolist()
for each_type in reversed(types_list_no_duplicates):
each_score = (float(total_budget.get(each_type)) - float(budget.get(each_type))) / float(
total_budget.get(each_type))
ind_score.append(each_score)
uti_score.append(budget_weights.get(each_type))
if (len(ind_score) < len(item_types[meal_type])):
ind_score.append(0.000001)
uti_score.append(.35)
if (min(ind_score) < 0.7) and (meal_type != 'snack'):
extra_penalty = inputs.get('extra_penalty')
else:
extra_penalty = 0
if (len(meals)) > 4:
meal_penalty_length = (len(meals) - 4) * inputs.get('meal_penalty_length')
else:
meal_penalty_length = 0
total_utilization = sum(x * y for x, y in zip(ind_score, uti_score)) / sum(uti_score)
if len(meal_similarity_list) != len(set(meal_similarity_list)):
meal_similarity_penalty = inputs.get('meal_similarity_penalty')
else:
meal_similarity_penalty = 0
nutrition_density_list = [float(x) for [x] in nutrition_density_list]
try:
avg_nutrition = round(mean(nutrition_density_list), 4)
except:
avg_nutrition = nutrition_density_list
energy_density_list = [float(x) for [x] in energy_density_list]
avg_energy = round(mean(energy_density_list), 4)
penalty_score = 1 - meal_score / len(meals)
nutrition_boost = avg_nutrition * inputs.get('nutrition_bonus')
energy_boost = avg_energy * inputs.get('energy_bonus')
if scoring.get('legacy'):
score = total_utilization - (
penalty_score * inputs.get('penalty_weight')) - extra_penalty - meal_penalty_length
elif scoring.get('legacy_nut'):
score = total_utilization - (penalty_score * inputs.get(
'penalty_weight')) - extra_penalty - meal_penalty_length + nutrition_boost
elif scoring.get('legacy_ene'):
total_utilization - (
penalty_score * inputs.get('penalty_weight')) - extra_penalty - meal_penalty_length + energy_boost
else:
score = total_utilization - (penalty_score * inputs.get(
'penalty_weight')) - extra_penalty - meal_penalty_length + energy_boost + nutrition_boost
return meals, score, meal_cals, ind_score, meal_penalty_length, avg_nutrition, avg_energy, meal_similarity_penalty, meal_similarity_list
def build_meal_wrapper():
energy_density_listy = 0.0
meal_similarity_listy = []
nutrition_density_listy = []
meal_similarity_penaltyy = []
nutrition_density_listx = []
energy_density_listx = 0.0
meal_similarity_penaltyx = []
meal_similarity_listx = []
penalty_lengthy = []
# Builds and populates a scoreboard that sorts the meals based on their score
x = -3
pd.set_option('precision', 2)
max_iterations = inputs.get('max_iter')
budget_weights = {**budget_weights_meals, **budget_weights_snacks_fruits_fat, **budget_weights_savoury_snacks,
**budget_weights_sweets}
budget_weights_list = []
for k, v in budget_weights.items():
budget_weights_list.append(v)
score_tracker = -2
total_cals = 0
meals = {}
user_meals_num = inputs.get('meals_num')
user_snacks_num = inputs.get('snacks_num')
filler = []
meal_types = ['breakfast', 'lunch', 'dinner']
for k in range(inputs.get('snacks_num')):
meal_types.append('snack')
features = filter_meals_by_features(user_params, df_noa)
for meal_type in meal_types:
bank = filter_meals_by_meal_type(features, meal_type)
x += 1
scoreboard = {}
for k in range(inputs.get('max_iter')):
budgets_dynamic = update_budgets(inputs.get('total_cals'), inputs.get('meals_num'),
inputs.get('snacks_num'), budget_weights_list)
meal_budget = update_budgets(inputs.get('total_cals'), inputs.get('meals_num'),
inputs.get('snacks_num'),
budget_weights_list)
if meal_type != 'snack':
mealy, scorey, calsy, ut_scorey, penalty_lengthy, nutrition_density_listy, energy_density_listy, meal_similarity_penaltyy, meal_similarity_listy = build_meal(
bank, meal_type, budgets_dynamic)
if mealy and scorey and min(ut_scorey) > 0:
scoreboard[meal_type] = mealy, scorey, calsy
if scoreboard[meal_type][1] > score_tracker:
score_tracker = scoreboard[meal_type][1]
total_cals = scoreboard[meal_type][2]
else:
mealx, scorex, calsx, ut_scorex, penalty_lengthx, nutrition_density_listx, energy_density_listx, meal_similarity_penaltyx, meal_similarity_listx = build_meal(
bank, meal_type, meal_budget)
if mealx:
scoreboard[
meal_type] = mealx, scorex, calsx, nutrition_density_listx, energy_density_listx, meal_similarity_penaltyx, meal_similarity_listx
if scoreboard:
meals[meal_type] = scoreboard[meal_type]
for meal_name, whole_meal in scoreboard.items():
df = pd.concat(whole_meal[0])
df = pd.DataFrame(df.values.reshape(1, -1))
df['score'] = float(scoreboard[meal_type][1])
df['meal_cals'] = scoreboard[meal_type][2]
if meal_name != 'snack':
df['name'] = meal_name
df['budget per meal'] = meal_budget.get('meal')
df['meal budget utilization'] = (df['meal_cals'] / df['budget per meal'])
df['average nutrtition'] = nutrition_density_listy
df['average energy'] = energy_density_listy
df['meal_similarity_penalty'] = meal_similarity_penaltyy
df['meal_similarity_list'] = pd.Series([meal_similarity_listy])
df.set_index('name', drop=True, inplace=True)
else:
df['name'] = meal_name + " " + str(x)
df['budget per snack'] = budgets_dynamic.get('snack')
df['snack budget utilization'] = (df['meal_cals'] / df['budget per snack'])
df['average nutrtition'] = nutrition_density_listx
df['average energy'] = energy_density_listx
df['meal_similarity_penalty'] = meal_similarity_penaltyx
df['meal_similarity_list'] = pd.Series([meal_similarity_listx])
df.set_index('name', drop=True, inplace=True)
if meal_name != 'snack':
# rename all the budget as budget leftover so its carbs budget leftover etc.
df['meal penalty length'] = penalty_lengthy
df['carb budget per meal'] = int(meal_budget.get('Carbs'))
df['carbs budget remaining'] = budgets_dynamic.get('Carbs')
df['carb budget utilization'] = (meal_budget.get('Carbs') - budgets_dynamic.get(
'Carbs')) / meal_budget.get('Carbs')
df['protein budget per meal'] = meal_budget.get('Protein')
df['protein budget remaining'] = budgets_dynamic.get('Protein')
df['protein budget utilization'] = (meal_budget.get('Protein') - budgets_dynamic.get(
'Protein')) / meal_budget.get('Protein')
df['vegetable budget per meal'] = meal_budget.get('Vegetables')
df['vegetable budget remaining'] = budgets_dynamic.get('Vegetables')
df['vegetable budget utilization'] = (meal_budget.get('Vegetables') - budgets_dynamic.get(
'Vegetables')) / meal_budget.get('Vegetables')
df['fat budget per meal'] = meal_budget.get('Fat_meal')
df['fat budget remaining'] = budgets_dynamic.get('Fat_meal')
df['fat budget utilization'] = (meal_budget.get('Fat_meal') - budgets_dynamic.get(
'Fat_meal')) / meal_budget.get('Fat_meal')
else:
if snacks.get('sweets') == "Yes":
df['sweets budget per snack'] = meal_budget.get('Sweets')
df['sweets budget remaining'] = budgets_dynamic.get('Sweets')
df['sweets budget utilization'] = (meal_budget.get('Sweets') - budgets_dynamic.get(
'Sweets')) / meal_budget.get('Sweets')
if snacks.get('Savoury_Snacks') == 'Yes':
df['savoury budget per snack'] = meal_budget.get('Savoury_Snacks')
df['savoury budget remaining'] = budgets_dynamic.get('Savoury_Snacks')
df['savoury budget utilization'] = (meal_budget.get('Savoury_Snacks') - budgets_dynamic.get(
'Savoury_Snacks')) / meal_budget.get('Savoury_Snacks')
if user_params.get('fruits') == 'No':
df['fruits budget per snack'] = meal_budget.get('Fruits')
df['fruits budget remaining'] = budgets_dynamic.get('Fruits')
df['fruits budget utilization'] = (meal_budget.get('Fruits') - budgets_dynamic.get(
'Fruits')) / meal_budget.get('Fruits')
df['fat budget per snack'] = meal_budget.get('Fat')
df['fat budget remaining'] = budgets_dynamic.get('Fat')
df['fat budget utilization'] = (meal_budget.get('Fat') - budgets_dynamic.get(
'Fat')) / meal_budget.get('Fat')
filler.append(df)
if meal_type == 'snack':
user_snacks_num -= 1
else:
user_meals_num -= 1
budgets_dynamic = update_budgets(float(inputs.get('total_cals') - total_cals), user_meals_num, user_snacks_num,
budget_weights_list)
df_meals =
|
pd.concat(filler)
|
pandas.concat
|
from mpl_toolkits import mplot3d
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from plotnine import *
import copy, math
dist = 10
def find_min_discm_each_hyperparam(df):
x = df.sort_values(by=['Discm_percent', 'Points-Removed']).groupby("Model-count", as_index=False).first()
assert len(x) == 240
return x
def process_rows(row, batches):
# global batches
model_count = 0
for perm in range(20):
for h1units in [16, 24, 32]:
for h2units in [8, 12]:
for batch in batches: # different batch sizes for this dataset
if perm == row['Dataperm'] and h1units == row['H1Units'] and h2units == row['H2Units'] and batch == row['Batch']:
return model_count
else:
model_count += 1
def process_dfs(name, batches, df):
# import ipdb; ipdb.set_trace()
if 'Model-count' in df.columns:
df['Model-count2'] = df.apply(process_rows, axis=1, args=((batches,)))
assert (df['Model-count'] == df['Model-count2']).all()
df.drop(columns=['Model-count2'], inplace=True)
else:
df['Model-count'] = df.apply(process_rows, axis=1, args=((batches,)))
assert len(df['Model-count'].unique()) == 240 and df['Model-count'].max() == 239 and df['Model-count'].min() == 0
df = df.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
# df = df[['Model-count','Discm_percent','Test_acc']]
df = df[['Model-count','Discm_percent','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df['diff'] = abs(df['Class0_Pos'] - df['Class1_Pos']) * 100
df['Test_acc'] = df['Test_acc'].apply(lambda x: x * 100)
df['Techniques'] = name
if len(name.split()) > 1:
words = name.split()
letters = [word[0] for word in words]
x = "".join(letters)
df['Baseline'] = x
else:
df['Baseline'] = name[:2]
return df
def boxplots_datasets(dataset, plot):
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
batches = sorted(list(df1.Batch.unique())) # sorting is important
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos'])*100
df_our['Test_acc'] = df_our['Test_acc'].apply(lambda x: x*100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm']*12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent','Test_acc','diff']]
df_adver['diff'] = df_adver['diff']*100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = df_nosensitive[['Model-count','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos'])*100
df_nosensitive['Discm_percent'] = 0.0
df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
# df_nosensitive = process_dfs("Sensitive Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv"))
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
df_main = pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our])
try:
assert(len(df_main) == 7*240 + 20)
except:
import ipdb; ipdb.set_trace()
if dataset == "compas-score":
dataset = "Recidivism-score"
elif dataset == "compas-ground":
dataset = "Recidivism-ground"
# df_main['Dataset'] = dataset.capitalize()
if dataset == "adult":
sizeofPSI = 4522200
id_ = "D1"
elif dataset == "adult_race":
sizeofPSI = 4313100
id_ = "D2"
elif dataset == "german":
sizeofPSI = 100000
id_ = "D3"
elif dataset == "student":
sizeofPSI = 64900
id_ = "D4"
elif dataset == "Recidivism-ground":
sizeofPSI = 615000
id_ = "D5"
elif dataset == "Recidivism-score":
sizeofPSI = 615000
id_ = "D6"
elif dataset == "default":
sizeofPSI = 3000000
id_ = "D7"
elif dataset == "salary":
sizeofPSI = 5200
id_ = "D8"
else:
raise NotImplementedError
df_main['Dataset'] = id_
precision = 1
if plot == 0:
min_discm = True
test_accuracy_for_min_discm = True
max_accuracy = True
discm_for_max_accuracy = True
median_discm = False
mean_accuracy = False
median_accuracy = False
if min_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].min():.{precision}e}", '0.0' ,f"{df_DIR['Discm_percent'].min():.{precision}e}", f"{df_ps['Discm_percent'].min():.{precision}e}", f"{df_massaging['Discm_percent'].min():.{precision}e}", f"{df_lfr['Discm_percent'].min():.{precision}e}", f"{df_adver['Discm_percent'].min():.{precision}e}", f"{df_our['Discm_percent'].min():.{precision}e}"])
print_to_tex(x, 'min-discm.tex', dataset)
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR['Test_acc'].max():.{precision}e}", f"{df_ps['Test_acc'].max():.{precision}e}", f"{df_massaging['Test_acc'].max():.{precision}e}", f"{df_lfr['Test_acc'].max():.{precision}e}", f"{df_adver['Test_acc'].max():.{precision}e}", f"{df_our['Test_acc'].max():.{precision}e}"])
print_to_tex(y, 'max-test-accuracy.tex', dataset)
if test_accuracy_for_min_discm:
# for sensitive there is always 0 discrimination.
z = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive['Test_acc'].max():.{precision}e}",
f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
print_to_tex(z, 'test_accuracy_for_min_discm.tex', dataset)
if median_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(0.0) ,f"{df_DIR['Discm_percent'].median():.{precision}e}", f"{df_ps['Discm_percent'].median():.{precision}e}", f"{df_massaging['Discm_percent'].median():.{precision}e}", f"{df_lfr['Discm_percent'].median():.{precision}e}", f"{df_adver['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Discm_percent'].median():.{precision}e}")])
print_to_tex(x, 'median-discm.tex', dataset)
if mean_accuracy:
a = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].mean():.{precision}e}", f"{df_nosensitive['Test_acc'].mean():.{precision}e}", f"{df_DIR['Test_acc'].mean():.{precision}e}", f"{df_ps['Test_acc'].mean():.{precision}e}", f"{df_massaging['Test_acc'].mean():.{precision}e}", f"{df_lfr['Test_acc'].mean():.{precision}e}", f"{df_adver['Test_acc'].mean():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].mean():.{precision}e}")])
print_to_tex(a, 'mean-test-accuracy.tex', dataset)
if median_accuracy:
b = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].median():.{precision}e}", f"{df_nosensitive['Test_acc'].median():.{precision}e}", f"{df_DIR['Test_acc'].median():.{precision}e}", f"{df_ps['Test_acc'].median():.{precision}e}", f"{df_massaging['Test_acc'].median():.{precision}e}", f"{df_lfr['Test_acc'].median():.{precision}e}", f"{df_adver['Test_acc'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].median():.{precision}e}")])
print_to_tex(b, 'median-test-accuracy.tex', dataset)
if discm_for_max_accuracy:
k = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
"0.0",
f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['Discm_percent'].min():.{precision}e}"])
print_to_tex(k, 'discm_for_max_accuracy.tex', dataset)
return df_main
def boxplots_datasets_dist(dataset, plot):
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_dist{dist}.csv")
batches = sorted(list(df1.Batch.unique())) # sorting is important
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count', 'Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos']) * 100 # Statistical parity diff
df_our['Test_acc'] = df_our['Test_acc'].apply(lambda x: x * 100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_dist{dist}.csv"))
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_dist{dist}.csv"))
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_dist{dist}.csv"))
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_dist{dist}.csv"))
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_dist{dist}.csv")
df_adver['Model-count'] = df_adver['Dataperm'] * 12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent','Test_acc','diff']]
df_adver['diff'] = df_adver['diff'] * 100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
# df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = process_dfs("Sens. Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_dist{dist}.csv"))
# df_nosensitive = df_nosensitive[['Model-count','Test_acc', 'Class0_Pos', 'Class1_Pos']]
# df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos'])*100
# df_nosensitive['Discm_percent'] = 0.0
# df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
# df_nosensitive['Techniques'] = "Sens. Removed"
# df_nosensitive['Baseline'] = "SR"
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_dist{dist}.csv"))
df_main = pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our], sort=True)
try:
assert(len(df_main) == 7*240 + 20)
except:
import ipdb; ipdb.set_trace()
if dataset == "compas-score":
dataset = "Recidivism-score"
elif dataset == "compas-ground":
dataset = "Recidivism-ground"
# df_main['Dataset'] = dataset.capitalize()
if dataset == "adult":
sizeofPSI = 4522200
id_ = "D1"
elif dataset == "adult_race":
sizeofPSI = 4313100
id_ = "D2"
elif dataset == "german":
sizeofPSI = 100000
id_ = "D3"
elif dataset == "student":
sizeofPSI = 64900
id_ = "D4"
elif dataset == "Recidivism-ground":
sizeofPSI = 615000
id_ = "D5"
elif dataset == "Recidivism-score":
sizeofPSI = 615000
id_ = "D6"
elif dataset == "default":
sizeofPSI = 3000000
id_ = "D7"
elif dataset == "salary":
sizeofPSI = 5200
id_ = "D8"
else:
raise NotImplementedError
df_main['Dataset'] = id_
precision = 1
if plot == 0:
min_discm = True
test_accuracy_for_min_discm = True
max_accuracy = True
discm_for_max_accuracy = True
median_discm = False
mean_accuracy = False
median_accuracy = False
if min_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].min():.{precision}e}", f"{df_nosensitive['Discm_percent'].min():.{precision}e}" ,f"{df_DIR['Discm_percent'].min():.{precision}e}", f"{df_ps['Discm_percent'].min():.{precision}e}", f"{df_massaging['Discm_percent'].min():.{precision}e}", f"{df_lfr['Discm_percent'].min():.{precision}e}", f"{df_adver['Discm_percent'].min():.{precision}e}", f"{df_our['Discm_percent'].min():.{precision}e}"])
print_to_tex(x, f'min-discm_dist{dist}.tex', dataset)
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR['Test_acc'].max():.{precision}e}", f"{df_ps['Test_acc'].max():.{precision}e}", f"{df_massaging['Test_acc'].max():.{precision}e}", f"{df_lfr['Test_acc'].max():.{precision}e}", f"{df_adver['Test_acc'].max():.{precision}e}", f"{df_our['Test_acc'].max():.{precision}e}"])
print_to_tex(y, f'max-test-accuracy_dist{dist}.tex', dataset)
if test_accuracy_for_min_discm:
z = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive.loc[df_nosensitive['Discm_percent'] == df_nosensitive['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
print_to_tex(z, f'test_accuracy_for_min_discm_dist{dist}.tex', dataset)
if median_discm:
raise NotImplementedError
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(0.0) ,f"{df_DIR['Discm_percent'].median():.{precision}e}", f"{df_ps['Discm_percent'].median():.{precision}e}", f"{df_massaging['Discm_percent'].median():.{precision}e}", f"{df_lfr['Discm_percent'].median():.{precision}e}", f"{df_adver['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Discm_percent'].median():.{precision}e}")])
print_to_tex(x, 'median-discm.tex', dataset)
if mean_accuracy:
raise NotImplementedError
a = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].mean():.{precision}e}", f"{df_nosensitive['Test_acc'].mean():.{precision}e}", f"{df_DIR['Test_acc'].mean():.{precision}e}", f"{df_ps['Test_acc'].mean():.{precision}e}", f"{df_massaging['Test_acc'].mean():.{precision}e}", f"{df_lfr['Test_acc'].mean():.{precision}e}", f"{df_adver['Test_acc'].mean():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].mean():.{precision}e}")])
print_to_tex(a, 'mean-test-accuracy.tex', dataset)
if median_accuracy:
raise NotImplementedError
b = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].median():.{precision}e}", f"{df_nosensitive['Test_acc'].median():.{precision}e}", f"{df_DIR['Test_acc'].median():.{precision}e}", f"{df_ps['Test_acc'].median():.{precision}e}", f"{df_massaging['Test_acc'].median():.{precision}e}", f"{df_lfr['Test_acc'].median():.{precision}e}", f"{df_adver['Test_acc'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].median():.{precision}e}")])
print_to_tex(b, 'median-test-accuracy.tex', dataset)
if discm_for_max_accuracy:
k = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['Discm_percent'].min():.{precision}e}"])
print_to_tex(k, f'discm_for_max_accuracy_dist{dist}.tex', dataset)
return df_main
def print_to_tex(string, file, dataset, mode=None):
if mode == None:
if dataset == "adult":
mode = "w"
else:
mode = "a"
# with open(f"../../neurips_fairness_paper/tables/{file}", mode) as f:
with open(f"tables/{file}", mode) as f:
if dataset == "salary":
string += " \\\ \midrule"
else:
string += " \\\\ "
print(string, file=f)
# print(dataset)
# print("Min discm: ", df_DIR['Discm_percent'].min())
# print("Min discm: ", df_ps['Discm_percent'].min())
# print("Min discm: ", df_massaging['Discm_percent'].min())
# print("Min discm: ", df_lfr['Discm_percent'].min())
# print("Min discm: ", df_adver['Discm_percent'].min())
# print("Min discm: ", df_our['Discm_percent'].min())
def main(plot):
df_main = None
benchmarks = ["adult", "adult_race", "german", "student", "compas-ground", "compas-score", "default", "salary"]
for dataset in benchmarks:
# df_onedataset = boxplots_datasets(dataset, plot)
df_onedataset = boxplots_datasets_dist(dataset, plot)
if not df_main is None:
df_main = pd.concat([df_main, df_onedataset])
else:
df_main = copy.deepcopy(df_onedataset)
print(f"Done {dataset}")
if plot == 0:
return
labels = ['FU', 'SR', 'DIR', 'PS', 'MA', 'LFR', 'AD', 'Our']
tech_cat = pd.Categorical(df_main['Baseline'], categories=labels)
df_main = df_main.assign(Technique_x = tech_cat)
dataset_order = ["D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8"]
data_cat = pd.Categorical(df_main['Dataset'], categories=dataset_order)
df_main = df_main.assign(Dataset_x = data_cat)
# x = (ggplot(aes(x='Technique_x', y='Discm_percent', color='Techniques'), data=df_main) +\
# geom_boxplot() +\
# facet_wrap(['Dataset'], scales = 'free', nrow=2, labeller='label_both', shrink=False) + \
# ylab("Remaining Individual Discrimination") + \
# xlab("Discrimination reducing techniques") + \
# # ylim(0, 20) + \
# # ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
# theme(axis_text_x = element_text(size=6), dpi=151) + \
# theme_seaborn()
# )
# This is responsible for the legend - remove color='Techniques'
x = (ggplot(aes(x='Technique_x', y='Discm_percent'), data=df_main) +\
geom_boxplot() +\
facet_wrap(['Dataset_x'], scales = 'free', nrow=2, labeller='label_value', shrink=True) + \
ylab("Remaining Individual Discrimination") + \
xlab("Discrimination reducing techniques") + \
# ylim(0, 20) + \
# ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
theme(axis_text_x = element_text(size=6), dpi=151) + \
theme_seaborn()
)
x = x.draw()
x.set_figwidth(20)
x.set_figheight(12)
for ax in range(len(benchmarks)):
low_limit = -0.05
top_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Discm_percent'].max()
if df_main[df_main['Dataset'] == f'D{ax+1}']['Discm_percent'].max() > 20:
top_limit = 20
if top_limit > 13: # These hacks are for aligning the 0 at the bottom of the plots.
low_limit = -0.3
x.axes[ax].set_ylim(low_limit, top_limit)
# x.tight_layout() # This didn't work
x.savefig(f"boxplots/boxplot_discm_freeaxis_matplotlib_dist{dist}.eps", format='eps', bbox_inches='tight')
x.savefig(f"boxplots/boxplot_discm_freeaxis_matplotlib_dist{dist}.png", bbox_inches='tight')
# x.save(f"boxplot_discm_freeaxis_matplotlib.png", height=8, width=18)
# x.save(f"boxplot_discm_freeaxis_withoutfull.png", height=12, width=15)
# x.save(f"boxplot_discm_fixedaxis.png", height=5, width=12)
y = (ggplot(aes(x='Technique_x', y='Test_acc'), data=df_main) +\
geom_boxplot() +\
facet_wrap(['Dataset_x'], scales = 'free', nrow=2, labeller='label_value', shrink=True) + \
ylab("Test Accuracy") + \
xlab("Discrimination reducing techniques") + \
# ylim(0, 100) + \
# ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
theme(axis_text_x = element_text(size=6), dpi=151) + \
theme_seaborn()
)
# y.save(f"boxplot_accuracy_freeaxis.png", height=8, width=18)
y = y.draw()
y.set_figwidth(20)
y.set_figheight(12)
for ax in range(len(benchmarks)):
bot_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Test_acc'].min()
top_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Test_acc'].max()
y.axes[ax].set_ylim(bot_limit - 1, top_limit + 2)
# y.tight_layout()
y.savefig(f"boxplots/boxplot_accuracy_freeaxis_matplotlib_dist{dist}.eps", format='eps', bbox_inches='tight')
y.savefig(f"boxplots/boxplot_accuracy_freeaxis_matplotlib_dist{dist}.png", bbox_inches='tight')
def real_accuracy_tables(debiased):
dataset = "compas-score"
if debiased:
deb = "debiased"
else:
deb = "full"
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
batches = sorted(list(df1.Batch.unique()))
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent']]
df_our_2 = pd.read_csv(f"{dataset}/results_our_real_accuracy_{deb}.csv")
df_our_final = pd.merge(df_our, df_our_2, on=['Model-count'])
df_our_final['Test_acc'] = df_our_final['Test_acc'].apply(lambda x: x*100)
df_our_final['Techniques'] = "Our Technique"
df_our_final['Baseline'] = "Our"
# import ipdb; ipdb.set_trace()
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
df_massaging.drop(columns=['Test_acc'], inplace=True)
df_massaging_2 = pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_real_accuracy_{deb}.csv")
df_massaging_final = pd.merge(df_massaging, df_massaging_2, on=['Model-count'])
df_massaging_final['Test_acc'] = df_massaging_final['Test_acc'].apply(lambda x: x*100)
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
df_ps.drop(columns=['Test_acc'], inplace=True)
df_ps_2 = pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_real_accuracy_{deb}.csv")
df_ps_final = pd.merge(df_ps, df_ps_2, on=['Model-count'])
df_ps_final['Test_acc'] = df_ps_final['Test_acc'].apply(lambda x: x*100)
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
df_lfr.drop(columns=['Test_acc'], inplace=True)
df_lfr_2 = pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_real_accuracy_{deb}.csv")
df_lfr_final = pd.merge(df_lfr, df_lfr_2, on=['Model-count'])
df_lfr_final['Test_acc'] = df_lfr_final['Test_acc'].apply(lambda x: x*100)
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
df_DIR.drop(columns=['Test_acc'], inplace=True)
df_DIR_2 = pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_real_accuracy_{deb}.csv")
df_DIR_final =
|
pd.merge(df_DIR, df_DIR_2, on=['Model-count'])
|
pandas.merge
|
import typing
import dataclasses
import pandas as pd
import datetime
import tqdm
from ._fetch_keywords import _fetch_keywords
from lib.twitter.auth import TwitterAuth, GetTwitterAuth
from lib.twitter.tweets.tweet_counts import (
MakeRequest,
Params,
ConvertTweetCount,
TweetCount,
)
from lib.twitter import SendRequest
from lib.aws_util.s3.download import download_from_s3
from lib.aws_util.s3.upload import upload_to_s3
@dataclasses.dataclass
class Result():
word: str
tweet_count : TweetCount
def get_tweet_counts() -> typing.NoReturn:
SECRET_NAME = 'adam-twitter'
auth = GetTwitterAuth.from_secrets_manager(SECRET_NAME)
send = SendRequest(auth)
dt = datetime.datetime.now()
end = dt - datetime.timedelta(seconds=10)
start = end - datetime.timedelta(days=1)
results: typing.List[Result] = []
for word in tqdm.tqdm(_fetch_keywords()):
params = Params(query=word)
params.start_time, params.end_time = start, end
res = send(MakeRequest()(params)).json()
data = res.get('data', None)
if data is None: continue
for tw in data:
results.append(Result(word, ConvertTweetCount()(tw)))
ls = [
{
'search_word': res.word,
**dataclasses.asdict(res.tweet_count),
}
for res in results
]
df = pd.DataFrame(ls)
__store(df)
def __store(df: pd.DataFrame) -> typing.NoReturn:
bucket = 'av-adam-store'
save_path = '/tmp/tweet_counts.csv'
obj = 'twitter/tweet_counts.csv'
date = str(datetime.datetime.now().date())
df['updated_at'] = date
download_from_s3(bucket, obj, save_path)
old_df = pd.read_csv(save_path)
df =
|
pd.concat((old_df, df), ignore_index=True)
|
pandas.concat
|
import pandas as pd
import requests
import json
from lxml import html
from dateutil.parser import parse
def scrap_page_and_fetch_news_data(category, news_page_url):
news_df = pd.DataFrame(
columns=[
"category",
"headline",
"description",
"url",
"image_url",
"posted_date",
]
)
# finding the last entry in the pagination to find the total pages present for the particoular category
last_page_xpath = "//div[contains(@class,'listng_pagntn clear')]/a[contains(@class,'btnLnk arrowBtn next')]/preceding-sibling::a[position()=1]"
page = requests.get(news_page_url)
tree = html.fromstring(page.content)
try:
total_pages = tree.xpath(last_page_xpath + "/text()")[0]
except:
total_pages = 1
headline_list = []
description_list = []
image_url_list = []
url_list = []
posted_date_list = []
for page in range(1, int(total_pages) + 1):
page_url = f"{news_page_url}/page-{page}"
page = requests.get(page_url)
tree = html.fromstring(page.content)
news_header_xpath = "//h2[contains(@class,'newsHdng')]/a"
headline_elements = tree.xpath(news_header_xpath)
for i in range(1, int(len(headline_elements)) + 1):
try:
news_headline = tree.xpath(f"({news_header_xpath})[{i}]/text()")[
0
] # *headline
except IndexError:
news_headline = None
try:
news_url = headline_elements[i - 1].get("href") # *url
except:
news_url = None
description_xpath = (
f"({news_header_xpath})[{i}]/parent::h2/following-sibling::p/text()"
)
try:
description = tree.xpath(description_xpath)[0] # *description
except IndexError:
description = None
img_xpath = f"({news_header_xpath})[{i}]/parent::h2/parent::div/preceding-sibling::div/a/img"
try:
img_url = tree.xpath(img_xpath)[0].get("src") # *image_url
except IndexError:
img_url = None
posted_date_xpath = (
f"({news_header_xpath})[{i}]/parent::h2/following-sibling::span/text()"
)
try:
posted_date_span = tree.xpath(posted_date_xpath) # *posted date
posted_date = None
for text in posted_date_span:
try:
posted_date = parse(text, fuzzy=True).date()
except:
pass
except IndexError:
posted_date = None
headline_list.append(news_headline)
description_list.append(description)
image_url_list.append(img_url)
url_list.append(news_url)
posted_date_list.append(posted_date)
news_df["headline"] = headline_list
news_df["description"] = description_list
news_df["url"] = url_list
news_df["image_url"] = image_url_list
news_df["posted_date"] = posted_date_list
news_df = news_df.assign(category=category)
return news_df
# main_news_dataframe = pd.DataFrame(
# columns=["category", "headline", "description", "url", "image_url", "posted_date"]
# )
# available_categories = {"latest": "https://www.ndtv.com/latest"}
available_categories = {
"latest": "https://www.ndtv.com/latest",
"india": "https://www.ndtv.com/india",
"world": "https://www.ndtv.com/world-news",
"science": "https://www.ndtv.com/science",
"business": "https://www.ndtv.com/business/latest",
"entertainment": "https://www.ndtv.com/entertainment/latest",
"offbeat": "https://www.ndtv.com/offbeat",
"crypto": "https://www.ndtv.com/business/cryptocurrency/news",
}
finalData =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
BSD 3-Clause License
Copyright (c) 2019, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
from pandas.api.types import is_object_dtype, is_unsigned_integer_dtype
from snps import SNPs
from snps.utils import gzip_file, zip_file, Singleton
class BaseSNPsTestCase(TestCase):
def setUp(self):
Singleton._instances = {}
self.del_output_dir_helper()
# def tearDown(self):
# self.del_output_dir_helper()
@staticmethod
def del_output_dir_helper():
if os.path.exists("output"):
shutil.rmtree("output")
def simulate_snps(
self,
chrom="1",
pos_start=1,
pos_max=248140902,
pos_step=100,
genotype="AA",
insert_nulls=True,
null_snp_step=101,
complement_genotype_one_chrom=False,
complement_genotype_two_chroms=False,
complement_snp_step=50,
):
s = SNPs()
s._build = 37
positions = np.arange(pos_start, pos_max, pos_step, dtype=np.uint32)
snps = pd.DataFrame(
{"chrom": chrom},
index=pd.Index(
["rs" + str(x + 1) for x in range(len(positions))], name="rsid"
),
)
snps["pos"] = positions
snps["genotype"] = genotype
if insert_nulls:
snps.loc[snps.iloc[0::null_snp_step, :].index, "genotype"] = np.nan
indices = snps.iloc[0::complement_snp_step, :].index
if complement_genotype_two_chroms:
snps.loc[indices, "genotype"] = snps.loc[indices, "genotype"].apply(
self.complement_two_chroms
)
elif complement_genotype_one_chrom:
snps.loc[indices, "genotype"] = snps.loc[indices, "genotype"].apply(
self.complement_one_chrom
)
s._snps = snps
return s
@property
def downloads_enabled(self):
""" Property indicating if downloads are enabled.
Only download from external resources when an environment variable named
"DOWNLOADS_ENABLED" is set to "true".
Returns
-------
bool
"""
return True if os.getenv("DOWNLOADS_ENABLED") == "true" else False
@staticmethod
def get_complement(base):
if base == "A":
return "T"
elif base == "G":
return "C"
elif base == "C":
return "G"
elif base == "T":
return "A"
else:
return base
def complement_one_chrom(self, genotype):
if pd.isnull(genotype):
return np.nan
complement = ""
for base in list(genotype):
complement += self.get_complement(base)
complement += genotype[1]
return complement
def complement_two_chroms(self, genotype):
if pd.isnull(genotype):
return np.nan
complement = ""
for base in list(genotype):
complement += self.get_complement(base)
return complement
@staticmethod
def create_snp_df(rsid, chrom, pos, genotype):
df = pd.DataFrame(
{"rsid": rsid, "chrom": chrom, "pos": pos, "genotype": genotype},
columns=["rsid", "chrom", "pos", "genotype"],
)
df.rsid = df.rsid.astype(object)
df.chrom = df.chrom.astype(object)
df.pos = df.pos.astype(np.uint32)
df.genotype = df.genotype.astype(object)
df = df.set_index("rsid")
return df
def load_assign_PAR_SNPs(self, path):
""" Load and assign PAR SNPs.
If downloads are not enabled, use a minimal subset of the real responses.
Parameters
----------
path : str
Returns
-------
SNPs
References
----------
1. National Center for Biotechnology Information, Variation Services, RefSNP,
https://api.ncbi.nlm.nih.gov/variation/v0/
2. Yates et. al. (doi:10.1093/bioinformatics/btu613),
`<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_
3. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
4. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;
29(1):308-11.
5. Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession:
rs28736870, rs113313554, rs758419898, and rs113378274 (dbSNP Build ID: 151).
Available from: http://www.ncbi.nlm.nih.gov/SNP/
"""
effects = [
{
"refsnp_id": "758419898",
"create_date": "2015-04-1T22:25Z",
"last_update_date": "2019-07-14T04:19Z",
"last_update_build_id": "153",
"primary_snapshot_data": {
"placements_with_allele": [
{
"seq_id": "NC_000024.9",
"placement_annot": {
"seq_id_traits_by_assembly": [
{"assembly_name": "GRCh37.p13"}
]
},
"alleles": [
{
"allele": {
"spdi": {
"seq_id": "NC_000024.9",
"position": 7364103,
}
}
}
],
}
]
},
},
{
"refsnp_id": "28736870",
"create_date": "2005-05-24T14:43Z",
"last_update_date": "2019-07-14T04:18Z",
"last_update_build_id": "153",
"primary_snapshot_data": {
"placements_with_allele": [
{
"seq_id": "NC_000023.10",
"placement_annot": {
"seq_id_traits_by_assembly": [
{"assembly_name": "GRCh37.p13"}
]
},
"alleles": [
{
"allele": {
"spdi": {
"seq_id": "NC_000023.10",
"position": 220769,
}
}
}
],
}
]
},
},
{
"refsnp_id": "113313554",
"create_date": "2010-07-4T18:13Z",
"last_update_date": "2019-07-14T04:18Z",
"last_update_build_id": "153",
"primary_snapshot_data": {
"placements_with_allele": [
{
"seq_id": "NC_000024.9",
"placement_annot": {
"seq_id_traits_by_assembly": [
{"assembly_name": "GRCh37.p13"}
]
},
"alleles": [
{
"allele": {
"spdi": {
"seq_id": "NC_000024.9",
"position": 535257,
}
}
}
],
}
]
},
},
{
"refsnp_id": "113378274",
"create_date": "2010-07-4T18:14Z",
"last_update_date": "2016-03-3T10:51Z",
"last_update_build_id": "147",
"merged_snapshot_data": {"merged_into": ["72608386"]},
},
{
"refsnp_id": "72608386",
"create_date": "2009-02-14T01:08Z",
"last_update_date": "2019-07-14T04:05Z",
"last_update_build_id": "153",
"primary_snapshot_data": {
"placements_with_allele": [
{
"seq_id": "NC_000023.10",
"placement_annot": {
"seq_id_traits_by_assembly": [
{"assembly_name": "GRCh37.p13"}
]
},
"alleles": [
{
"allele": {
"spdi": {
"seq_id": "NC_000023.10",
"position": 91941055,
}
}
}
],
}
]
},
},
]
if self.downloads_enabled:
return SNPs(path, assign_par_snps=True, deduplicate_XY_chrom=False)
else:
mock = Mock(side_effect=effects)
with patch("snps.ensembl.EnsemblRestClient.perform_rest_action", mock):
return SNPs(path, assign_par_snps=True, deduplicate_XY_chrom=False)
def _get_test_assembly_mapping_data(self, source, target, strands, mappings):
return {
"1": {
"mappings": [
{
"original": {
"seq_region_name": "1",
"strand": strands[0],
"start": mappings[0],
"end": mappings[0],
"assembly": f"{source}",
},
"mapped": {
"seq_region_name": "1",
"strand": strands[1],
"start": mappings[1],
"end": mappings[1],
"assembly": f"{target}",
},
},
{
"original": {
"seq_region_name": "1",
"strand": strands[2],
"start": mappings[2],
"end": mappings[2],
"assembly": f"{source}",
},
"mapped": {
"seq_region_name": "1",
"strand": strands[3],
"start": mappings[3],
"end": mappings[3],
"assembly": f"{target}",
},
},
{
"original": {
"seq_region_name": "1",
"strand": strands[4],
"start": mappings[4],
"end": mappings[4],
"assembly": f"{source}",
},
"mapped": {
"seq_region_name": "1",
"strand": strands[5],
"start": mappings[5],
"end": mappings[5],
"assembly": f"{target}",
},
},
]
},
"3": {
"mappings": [
{
"original": {
"seq_region_name": "3",
"strand": strands[6],
"start": mappings[6],
"end": mappings[6],
"assembly": f"{source}",
},
"mapped": {
"seq_region_name": "3",
"strand": strands[7],
"start": mappings[7],
"end": mappings[7],
"assembly": f"{target}",
},
}
]
},
}
def NCBI36_GRCh37(self):
return self._get_test_assembly_mapping_data(
"NCBI36",
"GRCh37",
[1, 1, 1, 1, 1, 1, 1, -1],
[
742429,
752566,
143649677,
144938320,
143649678,
144938321,
50908372,
50927009,
],
)
def GRCh37_NCBI36(self):
return self._get_test_assembly_mapping_data(
"GRCh37",
"NCBI36",
[1, 1, 1, 1, 1, 1, 1, -1],
[
752566,
742429,
144938320,
143649677,
144938321,
143649678,
50927009,
50908372,
],
)
def GRCh37_GRCh38(self):
return self._get_test_assembly_mapping_data(
"GRCh37",
"GRCh38",
[1, 1, 1, -1, 1, -1, 1, 1],
[
752566,
817186,
144938320,
148946169,
144938321,
148946168,
50927009,
50889578,
],
)
def GRCh37_GRCh38_PAR(self):
return {
"X": {
"mappings": [
{
"original": {
"seq_region_name": "X",
"strand": 1,
"start": 220770,
"end": 220770,
"assembly": "GRCh37",
},
"mapped": {
"seq_region_name": "X",
"strand": 1,
"start": 304103,
"end": 304103,
"assembly": "GRCh38",
},
},
{
"original": {
"seq_region_name": "X",
"strand": 1,
"start": 91941056,
"end": 91941056,
"assembly": "GRCh37",
},
"mapped": {
"seq_region_name": "X",
"strand": 1,
"start": 92686057,
"end": 92686057,
"assembly": "GRCh38",
},
},
]
},
"Y": {
"mappings": [
{
"original": {
"seq_region_name": "Y",
"strand": 1,
"start": 535258,
"end": 535258,
"assembly": "GRCh37",
},
"mapped": {
"seq_region_name": "Y",
"strand": 1,
"start": 624523,
"end": 624523,
"assembly": "GRCh38",
},
}
]
},
}
def snps_NCBI36(self):
return self.create_snp_df(
rsid=["rs3094315", "rs2500347", "rsIndelTest", "rs11928389"],
chrom=["1", "1", "1", "3"],
pos=[742429, 143649677, 143649678, 50908372],
genotype=["AA", np.nan, "ID", "AG"],
)
def snps_GRCh37(self):
return self.create_snp_df(
rsid=["rs3094315", "rs2500347", "rsIndelTest", "rs11928389"],
chrom=["1", "1", "1", "3"],
pos=[752566, 144938320, 144938321, 50927009],
genotype=["AA", np.nan, "ID", "TC"],
)
def snps_GRCh38(self):
return self.create_snp_df(
rsid=["rs3094315", "rsIndelTest", "rs2500347", "rs11928389"],
chrom=["1", "1", "1", "3"],
pos=[817186, 148946168, 148946169, 50889578],
genotype=["AA", "ID", np.nan, "TC"],
)
def snps_GRCh37_PAR(self):
return self.create_snp_df(
rsid=["rs28736870", "rs113378274", "rs113313554", "rs758419898"],
chrom=["X", "X", "Y", "PAR"],
pos=[220770, 91941056, 535258, 1],
genotype=["AA", "AA", "AA", "AA"],
)
def snps_GRCh38_PAR(self):
return self.create_snp_df(
rsid=["rs28736870", "rs113378274", "rs113313554"],
chrom=["X", "X", "Y"],
pos=[304103, 92686057, 624523],
genotype=["AA", "AA", "AA"],
)
def generic_snps(self):
return self.create_snp_df(
rsid=["rs" + str(i) for i in range(1, 9)],
chrom=["1"] * 8,
pos=list(range(101, 109)),
genotype=["AA", "CC", "GG", "TT", np.nan, "GC", "TC", "AT"],
)
def generic_snps_vcf(self):
df = self.generic_snps()
return df.append(
self.create_snp_df(
rsid=["rs" + str(i) for i in range(12, 18)],
chrom=["1"] * 6,
pos=list(range(112, 118)),
genotype=[np.nan] * 6,
)
)
def run_parsing_tests(
self, file, source, phased=False, build=37, build_detected=False, snps_df=None
):
self.make_parsing_assertions(
self.parse_file(file), source, phased, build, build_detected, snps_df
)
self.make_parsing_assertions(
self.parse_bytes(file), source, phased, build, build_detected, snps_df
)
with tempfile.TemporaryDirectory() as tmpdir:
base = os.path.basename(file)
dest = os.path.join(tmpdir, f"{base}.gz")
gzip_file(file, dest)
self.make_parsing_assertions(
self.parse_file(dest), source, phased, build, build_detected, snps_df
)
self.make_parsing_assertions(
self.parse_bytes(dest), source, phased, build, build_detected, snps_df
)
# remove .gz extension
shutil.move(dest, dest[:-3])
self.make_parsing_assertions(
self.parse_file(dest[:-3]),
source,
phased,
build,
build_detected,
snps_df,
)
dest = os.path.join(tmpdir, f"{base}.zip")
zip_file(file, dest, base)
self.make_parsing_assertions(
self.parse_file(dest), source, phased, build, build_detected, snps_df
)
self.make_parsing_assertions(
self.parse_bytes(dest), source, phased, build, build_detected, snps_df
)
# remove .zip extension
shutil.move(dest, dest[:-4])
self.make_parsing_assertions(
self.parse_file(dest[:-4]),
source,
phased,
build,
build_detected,
snps_df,
)
def run_parsing_tests_vcf(
self,
file,
source="vcf",
phased=False,
unannotated=False,
rsids=(),
build=37,
build_detected=False,
snps_df=None,
):
# https://samtools.github.io/hts-specs/VCFv4.2.pdf
# this tests for homozygous snps, heterozygous snps, multiallelic snps,
# phased snps, and snps with missing rsID
self.make_parsing_assertions_vcf(
self.parse_file(file, rsids),
source,
phased,
unannotated,
rsids,
build,
build_detected,
snps_df,
)
self.make_parsing_assertions_vcf(
self.parse_bytes(file, rsids),
source,
phased,
unannotated,
rsids,
build,
build_detected,
snps_df,
)
with tempfile.TemporaryDirectory() as tmpdir:
base = os.path.basename(file)
dest = os.path.join(tmpdir, f"{base}.gz")
gzip_file(file, dest)
self.make_parsing_assertions_vcf(
self.parse_file(dest, rsids),
source,
phased,
unannotated,
rsids,
build,
build_detected,
snps_df,
)
self.make_parsing_assertions_vcf(
self.parse_bytes(dest, rsids),
source,
phased,
unannotated,
rsids,
build,
build_detected,
snps_df,
)
# remove .gz extension
shutil.move(dest, dest[:-3])
self.make_parsing_assertions_vcf(
self.parse_file(dest[:-3], rsids),
source,
phased,
unannotated,
rsids,
build,
build_detected,
snps_df,
)
def make_normalized_dataframe_assertions(self, df):
self.assertEqual(df.index.name, "rsid")
self.assertTrue(is_object_dtype(df.index.dtype))
self.assertTrue(is_object_dtype(df.chrom.dtype))
self.assertTrue(is_unsigned_integer_dtype(df.pos.dtype))
self.assertTrue(
|
is_object_dtype(df.genotype.dtype)
|
pandas.api.types.is_object_dtype
|
# coding=utf-8
import pandas as pd
from sqlalchemy import create_engine
from tabulate import tabulate
# pd.set_option('display.max_columns', None)
|
pd.set_option('display.width', 5000)
|
pandas.set_option
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 16 18:08:28 2021
@author: <NAME> -Spatial structure index value distribution of urban streetscape
"""
from database import postSQL2gpd,gpd2postSQL
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def idx_clustering(idxes_df,field,n_clusters=10):
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import numpy as np
from sklearn import cluster
from shapely.geometry import Point
import geopandas as gpd
import pyproj
pts_geometry=idxes_df[['geometry']]
pts_geometry[['x','y']]=pts_geometry.geometry.apply(lambda row:pd.Series([row.x,row.y]))
# print(pts_geometry)
pts_coordis=pts_geometry[['x','y']].to_numpy()
# print(pts_coordis)
nbrs=NearestNeighbors(n_neighbors=9, algorithm='ball_tree').fit(pts_coordis)
connectivity=nbrs.kneighbors_graph(pts_coordis)
# print(connectivity.toarray())
X=np.expand_dims(idxes_df[field].to_numpy(),axis=1)
# print(X.shape)
clustering=cluster.AgglomerativeClustering(connectivity=connectivity,n_clusters=n_clusters).fit(X)
# print(clustering.labels_.shape)
idxes_df['clustering_'+field]=clustering.labels_
mean=idxes_df.groupby(['clustering_'+field])[field].mean() #.reset_index()
idxes_df['clustering_'+field+'_mean']=idxes_df['clustering_'+field].map(mean.to_dict())
wgs84=pyproj.CRS('EPSG:4326')
idxes_df_gdf=gpd.GeoDataFrame(idxes_df,geometry=idxes_df.geometry,crs=wgs84)
return idxes_df_gdf
def idxes_clustering(idxes_df,fields,n_clusters=10):
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import numpy as np
from sklearn import cluster
from shapely.geometry import Point
import geopandas as gpd
import pyproj
from sklearn.preprocessing import normalize
pts_geometry=idxes_df[['geometry']]
pts_geometry[['x','y']]=pts_geometry.geometry.apply(lambda row:pd.Series([row.x,row.y]))
# print(pts_geometry)
pts_coordis=pts_geometry[['x','y']].to_numpy()
# print(pts_coordis)
nbrs=NearestNeighbors(n_neighbors=9, algorithm='ball_tree').fit(pts_coordis)
connectivity=nbrs.kneighbors_graph(pts_coordis)
# print(connectivity.toarray())
X_=idxes_df[fields].to_numpy()
X=normalize(X_,axis=0, norm='max')
# print(X.shape)
# print(idxes_df[fields].to_numpy().shape)
clustering=cluster.AgglomerativeClustering(connectivity=connectivity,n_clusters=n_clusters).fit(X)
# print(clustering.labels_.shape)
idxes_df['clustering']=clustering.labels_
idxes_df['clustering_']=idxes_df.clustering.apply(lambda row:row+1)
wgs84=pyproj.CRS('EPSG:4326')
xian_epsg=pyproj.CRS('EPSG:32649') #Xi'an WGS84 / UTM zone 49N
idxes_df_gdf=gpd.GeoDataFrame(idxes_df,geometry=idxes_df.geometry,crs=wgs84)
idxes_df_gdf=idxes_df_gdf.to_crs(xian_epsg)
return idxes_df_gdf
def Nclusters_sihouette_analysis(idxes_df,fields,range_n_clusters=[2,3,4,5,6]):
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import numpy as np
from sklearn import cluster
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
for n_clusters in tqdm(range_n_clusters):
pts_geometry=idxes_df[['geometry']]
pts_geometry[['x','y']]=pts_geometry.geometry.apply(lambda row:pd.Series([row.x,row.y]))
# print(pts_geometry)
pts_coordis=pts_geometry[['x','y']].to_numpy()
# print(pts_coordis)
nbrs=NearestNeighbors(n_neighbors=9, algorithm='ball_tree').fit(pts_coordis)
connectivity=nbrs.kneighbors_graph(pts_coordis)
# print(connectivity.toarray())
X_=idxes_df[fields].to_numpy()
X=normalize(X_,axis=0, norm='max')
# X=PCA(n_components=2).fit_transform(X_)
# print(X.shape)
# print(idxes_df[fields].to_numpy().shape)
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
ax1.set_xlim([-1, 1])
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
clusterer=cluster.AgglomerativeClustering(connectivity=connectivity,n_clusters=n_clusters,compute_distances=True).fit(X)
print('distance:{}'.format(clusterer.distances_))
# clusterer = cluster.KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels=clusterer.fit_predict(X)
silhouette_avg=silhouette_score(X, cluster_labels) # The silhouette_score gives the average value for all the samples.
print('\n')
print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # Compute the silhouette scores for each sample
sample_silhouette_values=silhouette_samples(X, cluster_labels)
y_lower=10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# # Labeling the clusters
# centers = clusterer.cluster_centers_
# # Draw white circles at cluster centers
# ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
# plt.show()
# Z = linkage(X, method='ward')
# plt.figure()
# dendrogram(Z)
# plt.show()
# break
plt.show()
def idxes_clustering_contribution(idxes_df,fields,n_clusters=10):
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import numpy as np
from sklearn import cluster
from shapely.geometry import Point
import geopandas as gpd
import pyproj
from sklearn.feature_selection import chi2, SelectKBest, f_classif
from sklearn import preprocessing
from sklearn.preprocessing import normalize
from yellowbrick.cluster import KElbowVisualizer
from yellowbrick.features import Manifold
# import matplotlib
# font = {
# # 'family' : 'normal',
# # 'weight' : 'bold',
# 'size' : 28}
# matplotlib.rc('font', **font)
pts_geometry=idxes_df[['geometry']]
pts_geometry[['x','y']]=pts_geometry.geometry.apply(lambda row:pd.Series([row.x,row.y]))
# print(pts_geometry)
pts_coordis=pts_geometry[['x','y']].to_numpy()
# print(pts_coordis)
nbrs=NearestNeighbors(n_neighbors=9, algorithm='ball_tree').fit(pts_coordis)
connectivity=nbrs.kneighbors_graph(pts_coordis)
# print(connectivity.toarray())
X_=idxes_df[fields].to_numpy()
X=normalize(X_,axis=0, norm='max')
# print(X.shape)
# print(idxes_df[fields].to_numpy().shape)
clustering=cluster.AgglomerativeClustering(connectivity=connectivity,n_clusters=n_clusters).fit(X)
# print(clustering.labels_.shape)
# idxes_df['clustering']=clustering.labels_
# wgs84=pyproj.CRS('EPSG:4326')
# idxes_df_gdf=gpd.GeoDataFrame(idxes_df,geometry=idxes_df.geometry,crs=wgs84)
y=clustering.labels_
selector=SelectKBest(score_func=f_classif, k=len(fields)) #score_func=chi2
selector.fit(X,y)
# scores=-np.log10(selector.pvalues_)
# scores /= scores.max()
# X_indices = np.arange(X.shape[-1])
# print(scores)
# plt.bar(X_indices - .45, scores, width=.2,label=r'Univariate score ($-Log(p_{value})$)')
dfscores =
|
pd.DataFrame(selector.scores_)
|
pandas.DataFrame
|
"""
manage creating new PipelineII stream
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/pipeline/stream.py,v 1.6 2018/01/27 15:38:26 burnett Exp $
"""
import os, datetime, glob, argparse
import numpy as np
import pandas as pd
class PipelineStream(object):
""" manage starting streams
Assume that POINTLIKE_DIR is defined, and that current directory is a skymodel
"""
def __init__(self, fn='summary_log.txt'):
self.pointlike_dir=os.path.expandvars('$POINTLIKE_DIR')
self.summary= os.path.join(self.pointlike_dir,fn)
self.fullskymodel = os.getcwd()
assert os.path.exists(self.summary), 'File "%s" not found' % self.summary
# now may have config.yaml; but that checked by uwpipeline
# assert os.path.exists('config.txt') or os.path.exists('../config.txt'), \
# 'File config.txt not found in %s or its parent' %self.fullskymodel
t =self.fullskymodel.split('/')
self.model = '/'.join(t[t.index('skymodels')+1:])
with open(self.summary, 'r') as slog:
lines = slog.read().split('\n')
last = lines[-1] if lines[-1] != '' else lines[-2]
try:
self.stream_number = int(last.split()[0])
except:
print ('failed to interpret stream number from file %s: last=%s' % (self.summary,self.last))
raise
lastsp = last.split()
self.last_job=dict(stream=self.stream_number, skymodel=lastsp[3],
stage=lastsp[4], model=lastsp[3], job_list=lastsp[5])
def __call__(self, stage, job_list, test=True):
"""Submit a command to the Pipeline to create a stream
"""
pipeline='/afs/slac/g/glast/ground/bin/pipeline -m PROD createStream --stream %d -D "%s" UWpipeline '
self.stream_number += 1
time = str(datetime.datetime.today())[:16]
line = '%5d %s %-15s %-10s %s' % (self.stream_number, time, self.model, stage, job_list)
print (line)
if not test:
with open(self.summary, 'a') as slog:
slog.write('\n'+line)
os.environ['SKYMODEL_SUBDIR']=self.fullskymodel
cmd=pipeline % (self.stream_number,
"stage=%s, POINTLIKE_DIR=%s, SKYMODEL_SUBDIR=%s, job_list=%s"
% (stage, self.pointlike_dir, self.fullskymodel, os.path.expandvars(job_list))
)
if not test:
print ('-->' , cmd)
os.system(cmd)
else:
print ('Test mode: would have submitted \n\t%s'%cmd)
self.stream_number -=1
def restart(self, jobs=None, rois=True, test=True, ):
"""Restart the presumably hung jobs, by starting a new stream, the same stage as the current one,
but with only the specified (hung) substreamss
Do this by creating a file with the substream info, with a file name the same as the hung stream
Its presence should cause any jobs in the current stream to abort when starting.
jobs: list of int | None
the substream ids.
rois : bool
if True, create a substream for each unfinished ROI
"""
ss = SubStreamStats()
if jobs is None:
jobs = ss.todo_list()
print ('Found {} jobs to restart'.format(len(jobs)))
if len(jobs)==0:
print ('no jobs to restart')
return
assert self.model==self.last_job['skymodel'], 'Must run in same folder as last job'
# get the current job_list
# substreams=t = list(substreamids(self.last_job['job_list']))+[1728]
# assert np.all([job in substreams for job in jobs]), 'job problem'
last_stream = str(self.last_job['stream'])
if test:
last_stream= 'test_{}'.format(last_stream)
with open(last_stream, 'w') as out:
# for job in jobs:
# i = t.index(job)
# j,k = t[i:i+2]
# out.write('{:5d}{:5d}\n'.format( j,k) )
for i in ss.todo_list():
out.write('{:5d}\n'.format(i))
if test:
print ('Testing: Created file "{}" to rerun substreams: {}'.format(last_stream, jobs))
print ('Now starting test of restart')
self(self.last_job['stage'], '$SKYMODEL_SUBDIR/'+last_stream, test)
def substreamids(job_list):
# check the substream ids
jl = os.path.expandvars(job_list)
if not os.path.exists(jl):
jl = os.path.expandvars('$POINTLIKE_DIR/infrastructure/'+ job_list)
assert os.path.exists(jl), 'Did not find file {}'.format(jl)
lines = open(jl).read().split('\n')
sslist= []
assert len(lines)>1, 'Bad file {}'.format(jl)
for line in lines:
if line[0]=='#':continue
sslist.append(line.split()[0])
return np.array(sslist,int)
class StreamInfo(dict):
"""A dictionary of the stream info
"""
def __init__(self, model=None, fn='summary_log.txt'):
"""
model : string, default None
If set, a filter.
the name of a model, e.g. "P301_6years/uw972", or a group with ending wildcard
if '.': filter using current directory
"""
self.pointlike_dir=os.path.expandvars('$POINTLIKE_DIR')
self.summary= os.path.join(self.pointlike_dir,fn)
t = open(self.summary,'r').read().split('\n')
if model=='.':
model = '/'.join(os.getcwd().split('/')[-2:])
for line in t[2:]:
if len(line)==0:
continue
tokens = line.split()
if len(tokens)<6:
print ('bad line, %s: %d tokens' % (line, len(tokens)))
continue
if model is None or model==tokens[3] or model[-1]=='*' and tokens[3].startswith(model[:-1]):
self[int(tokens[0])] = dict(stage=tokens[4], date=tokens[1]+' '+tokens[2], model=tokens[3],
job_list=tokens[5],)
def __call__(self, stream ):
return self[stream]
def recent_stream(model_name=None, filter=None):
""" return a dict, key the model name of the most recent stream, with stage and date
model_name : str | None
the full name of the model, e.g. P302_8years/uw8000. If None, use the current folder path
"""
if model_name is None: model_name='/'.join(os.getcwd().split('/')[-2:])
sinfo = StreamInfo(model_name)
sdf =
|
pd.DataFrame(sinfo)
|
pandas.DataFrame
|
import torch
import pandas as pd
import numpy as np
import torch.nn as nn
import torchvision.datasets as datasets
from pathlib import Path
import libs.dirs as dirs
import libs.commons as commons
import libs.utils as utils
import libs.dataset_utils as dutils
import models.utils as mutils
from models.trainer_class import TrainModel
from libs.vis_functions import plot_outputs_histogram
from libs.iteration_manager import SampleImages
if __name__ == "__main__":
iteration = int(input("Enter iteration number.\n"))
seed = np.random.randint(0, 100)
rede = 3
epochs = 150
inferBatchSize = 64
upperThreshPercent = 0.99
if rede == 3:
target_class = dutils.get_input_target_class(commons.rede3_classes)
datasetName = "full_dataset_rede_{}_{}".format(rede, target_class.lower())
else:
target_class = None
datasetName = "full_dataset_rede_{}".format(rede)
def get_iter_folder(iteration):
return Path(dirs.iter_folder) / "{}/iteration_{}/".format(datasetName, iteration)
remoteDatasetFolder = Path(dirs.dataset) / "all_datasets_1s"
previousIterFolder = get_iter_folder(iteration-1)
iterFolder = get_iter_folder(iteration)
nextIterFolder = get_iter_folder(iteration+1)
sampledImageFolder = iterFolder / "sampled_images"
savedModelsFolder = Path(dirs.saved_models) / \
"{}/iteration_{}".format(datasetName, iteration)
valSetFolder = sampledImageFolder / "val/"
imageResultsFolder = Path(dirs.results) / \
"{}/iteration_{}".format(datasetName, iteration)
modelPath = savedModelsFolder / \
"{}_{}_epochs_iteration_{}.pt".format(datasetName, epochs, iteration)
historyPath = savedModelsFolder / \
"history_{}_{}_epochs_iteration_{}.pickle".format(datasetName, epochs, iteration)
valOutputPath = savedModelsFolder / \
"outputs_{}_validation_iteration_{}.pickle".format(datasetName, iteration)
fullOutputPath = savedModelsFolder / \
"outputs_{}_{}_epochs_iteration_{}.pickle".format(datasetName, epochs, iteration)
# originalUnlabeledIndexPath = get_iter_folder(0) / "unlabeled_images_iteration_0.csv"
originalUnlabeledIndexPath = get_iter_folder(0) / "reference_images.csv"
unlabeledIndexPath = previousIterFolder / "unlabeled_images_iteration_{}.csv".format(iteration-1)
sampledIndexPath = iterFolder / "sampled_images_iteration_{}.csv".format(iteration)
manualIndexPath = iterFolder / "manual_annotated_images_iteration_{}.csv".format(iteration)
splitIndexPath = iterFolder / (manualIndexPath.stem + "_train_val_split.csv")
autoLabelIndexPath = iterFolder / "automatic_labeled_images_iteration_{}.csv".format(iteration)
mergedIndexPath = iterFolder / "final_annotated_images_iteration_{}.csv".format(iteration)
previousMergedIndexPath = previousIterFolder / "final_annotated_images_iteration_{}.csv".format(iteration-1)
unlabelNoManualPath = iterFolder / "unlabeled_no_manual_iteration_{}.csv".format(iteration)
newUnlabeledIndexPath = iterFolder / "unlabeled_images_iteration_{}.csv".format(iteration)
unlabelHistogramPath = imageResultsFolder / "histogram_unlabeled_outputs_iteration_{}.pdf".format(iteration)
valHistogramPath = imageResultsFolder / "histogram_validation_outputs_iteration_{}.pdf".format(iteration)
reportPath = iterFolder/"report_iteration_{}.txt".format(iteration)
seedLogPath = iterFolder / "seeds.txt"
## Dataset Inference on Validation set to find thresholds
print("\nSTEP: Perform inference on val set.")
# ImageNet statistics
mean = commons.IMAGENET_MEAN
std = commons.IMAGENET_STD
# Set transforms
dataTransforms = mutils.resnet_transforms(mean, std)
# Perform inference on validation set and save outputs to file
outputDf = mutils.dataset_inference_val(valSetFolder, dataTransforms['val'], modelPath,
valOutputPath, batch_size=inferBatchSize, seed=seed)
# Compute decision thresholds
print("\nSTEP: Compute decision thresholds.")
valOutputs, imgHashes, labels = dutils.load_outputs_df(valOutputPath)
upperThresh, lowerThresh = dutils.compute_thresholds(valOutputs,
labels,
upper_ratio=upperThreshPercent,
lower_ratio=0.01,
resolution=0.0001,#resolution='max',
val_indexes=imgHashes)
# Plot validation outputs histogram
plot_outputs_histogram(valOutputs[:, 0], labels, lowerThresh, upperThresh, show=False,
save_path = valHistogramPath, log=True)
## Perform inference on entire unlabeled dataset
# Get unlabeled set without manual_annotated_images
originalUnlabeledIndex =
|
pd.read_csv(originalUnlabeledIndexPath)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import pytest
from lookback import models
class TestChangeDatesGeneralCase:
@pytest.fixture
def shape_data(self):
shape_df = pd.DataFrame(
data={
'shape_key': ['uts_co_S1', 'uts_co_S2', 'uts_co_S3', 'uts_co_S4'],
'START_DATE': ['2020-02-05', '2020-02-20', '2020-02-25', '2020-03-01'],
'END_DATE': ['2020-02-19', '2020-02-24', '2020-02-28', '2021-01-01'],
},
)
shape_df['END_DATE'] = pd.to_datetime(shape_df['END_DATE'])
shape_df['START_DATE'] = pd.to_datetime(shape_df['START_DATE'])
shape_df.set_index('START_DATE', drop=False, inplace=True)
return shape_df
@pytest.fixture
def district_data(self):
district_df = pd.DataFrame(
data={
'NewDistrict': ['1', '2', '3', '4'],
'district_key': ['co_D1', 'co_D2', 'co_D3', 'co_D4'],
'StartDate': ['2020-02-10', '2020-02-15', '2020-02-25', '2020-03-01'],
'EndDate': ['2020-02-14', '2020-02-24', '2020-02-28', None],
},
)
district_df['StartDate'] = pd.to_datetime(district_df['StartDate'])
district_df['EndDate'] = pd.to_datetime(district_df['EndDate'])
district_df.set_index('StartDate', drop=False, inplace=True)
return district_df
def test_change_dates_shape_before_district(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[0, test_columns].values.tolist() == [
np.datetime64('2020-02-05'), 'co', 'uts_co_S1', 'n/a', 'n/a'
]
def test_change_dates_new_district_existing_shape(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[1, test_columns].values.tolist() == [
np.datetime64('2020-02-10'), 'co', 'uts_co_S1', '1', 'co_D1'
]
def test_change_dates_change_district_same_shape(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[2, test_columns].values.tolist() == [
np.datetime64('2020-02-15'), 'co', 'uts_co_S1', '2', 'co_D2'
]
def test_change_dates_same_district_change_shape(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[3, test_columns].values.tolist() == [
np.datetime64('2020-02-20'), 'co', 'uts_co_S2', '2', 'co_D2'
]
def test_change_dates_change_district_change_shape(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[4, test_columns].values.tolist() == [
np.datetime64('2020-02-25'), 'co', 'uts_co_S3', '3', 'co_D3'
]
def test_change_dates_change_district_change_shape_no_district_end_date(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[5, test_columns].values.tolist() == [
np.datetime64('2020-03-01'), 'co', 'uts_co_S4', '4', 'co_D4'
]
class TestChangeDatesRichlandCase:
@pytest.fixture
def richland_shape_data(self):
shape_df = pd.DataFrame(
data={
'shape_key': ['uts_richland_S1', 'uts_rich_S1', 'uts_rich_S2'],
'START_DATE': ['2020-02-01', '2020-02-10', '2020-02-15'],
'END_DATE': ['2020-02-09', '2020-02-14', '2020-02-24'],
},
)
shape_df['END_DATE'] = pd.to_datetime(shape_df['END_DATE'])
shape_df['START_DATE'] = pd.to_datetime(shape_df['START_DATE'])
shape_df.set_index('START_DATE', drop=False, inplace=True)
return shape_df
@pytest.fixture
def richland_district_data(self):
district_df = pd.DataFrame(
data={
'NewDistrict': ['1', '1', '2'],
'district_key': ['richland_D1', 'rich_D1', 'rich_D2'],
'StartDate': ['2020-02-5', '2020-02-10', '2020-02-20'],
'EndDate': [None, '2020-02-19', None],
},
)
district_df['StartDate'] = pd.to_datetime(district_df['StartDate'])
district_df['EndDate'] = pd.to_datetime(district_df['EndDate'])
district_df.set_index('StartDate', drop=False, inplace=True)
return district_df
def test_change_dates_richland_shape_no_district(self, mocker, richland_shape_data, richland_district_data):
county = mocker.Mock()
county.name = 'rich'
county.shape_df = richland_shape_data
county.district_df = richland_district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[0, test_columns].values.tolist() == [
np.datetime64('2020-02-01'), 'rich', 'uts_richland_S1', 'n/a', 'n/a'
]
def test_change_dates_richland_shape_richland_district(self, mocker, richland_shape_data, richland_district_data):
county = mocker.Mock()
county.name = 'rich'
county.shape_df = richland_shape_data
county.district_df = richland_district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[1, test_columns].values.tolist() == [
np.datetime64('2020-02-05'), 'rich', 'uts_richland_S1', '1', 'richland_D1'
]
def test_change_dates_rich_shape_rich_district(self, mocker, richland_shape_data, richland_district_data):
county = mocker.Mock()
county.name = 'rich'
county.shape_df = richland_shape_data
county.district_df = richland_district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[2, test_columns].values.tolist() == [
np.datetime64('2020-02-10'), 'rich', 'uts_rich_S1', '1', 'rich_D1'
]
def test_change_dates_rich_shape_rich_district_no_district_end_date(
self, mocker, richland_shape_data, richland_district_data
):
county = mocker.Mock()
county.name = 'rich'
county.shape_df = richland_shape_data
county.district_df = richland_district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[4, test_columns].values.tolist() == [
np.datetime64('2020-02-20'), 'rich', 'uts_rich_S2', '2', 'rich_D2'
]
class TestAddingExtraFields:
def test_unique_district_key_creation(self, mocker):
county_mock = mocker.Mock()
county_mock.joined_df = pd.DataFrame(
data={
'change_date': ['2020-01-01', '2030-01-01'],
'district_number': ['1', '1S'],
'EndDate': ['2029-12-31', '2039-12-31'],
'county_version': ['uts_foo_S1', 'uts_foo_S1'],
'END_DATE': ['2029-12-31', '2039-12-31'],
}
)
county_mock.joined_df['change_date'] = pd.to_datetime(county_mock.joined_df['change_date'])
models.County.add_extra_fields(county_mock)
test_columns = ['COMBINED_DST_KEY']
assert county_mock.joined_df.loc[0, test_columns].values.tolist() == ['1_2020-01-01']
assert county_mock.joined_df.loc[1, test_columns].values.tolist() == ['1S_2030-01-01']
def test_end_date_normal_county_mid_cycle_district_changes(self, mocker):
county_mock = mocker.Mock()
county_mock.joined_df = pd.DataFrame(
data={
'change_date': ['2020-01-01', '2030-01-01', '2040-01-01'], #: 3 so that there's a +1 for end date calc
'district_number': ['1', '1S', '2S'],
'EndDate': ['2029-12-31', '2039-12-31', '2049-12-31'], #: District end date
'county_version': ['uts_foo_S1', 'uts_foo_S1', 'uts_foo_S1'], #: uts_something_Sx
'END_DATE': ['2049-12-31', '2049-12-31', '2049-12-31'], #: shape end date
}
)
county_mock.joined_df['change_date'] = pd.to_datetime(county_mock.joined_df['change_date'])
county_mock.joined_df['END_DATE'] = pd.to_datetime(county_mock.joined_df['END_DATE'])
county_mock.joined_df['EndDate'] = pd.to_datetime(county_mock.joined_df['EndDate'])
models.County.add_extra_fields(county_mock)
test_columns = ['change_end_date']
assert county_mock.joined_df.loc[0, test_columns].values.tolist() == [np.datetime64('2029-12-31')]
assert county_mock.joined_df.loc[1, test_columns].values.tolist() == [np.datetime64('2039-12-31')]
def test_end_date_normal_county_mid_cycle_shape_changes(self, mocker):
county_mock = mocker.Mock()
county_mock.joined_df = pd.DataFrame(
data={
'change_date': ['2020-01-01', '2030-01-01', '2040-01-01'], #: 3 so that there's a +1 for end date calc
'district_number': ['1', '1', '1'],
'EndDate': ['2049-12-31', '2049-12-31', '2049-12-31'], #: District end date
'county_version': ['uts_foo_S1', 'uts_foo_S2', 'uts_foo_S3'], #: uts_something_Sx
'END_DATE': ['2029-12-31', '2039-12-31', '2049-12-31'], #: shape end date
}
)
county_mock.joined_df['change_date'] = pd.to_datetime(county_mock.joined_df['change_date'])
county_mock.joined_df['END_DATE'] = pd.to_datetime(county_mock.joined_df['END_DATE'])
county_mock.joined_df['EndDate'] = pd.to_datetime(county_mock.joined_df['EndDate'])
models.County.add_extra_fields(county_mock)
test_columns = ['change_end_date']
assert county_mock.joined_df.loc[0, test_columns].values.tolist() == [np.datetime64('2029-12-31')]
assert county_mock.joined_df.loc[1, test_columns].values.tolist() == [np.datetime64('2039-12-31')]
def test_end_date_normal_county_mid_cycle_both_change(self, mocker):
county_mock = mocker.Mock()
county_mock.joined_df = pd.DataFrame(
data={
'change_date': ['2020-01-01', '2030-01-01', '2040-01-01'], #: 3 so that there's a +1 for end date calc
'district_number': ['1', '1S', '5'],
'EndDate': ['2029-12-31', '2039-12-31', '2049-12-31'], #: District end date
'county_version': ['uts_foo_S1', 'uts_foo_S2', 'uts_foo_S3'], #: uts_something_Sx
'END_DATE': ['2029-12-31', '2039-12-31', '2049-12-31'], #: shape end date
}
)
county_mock.joined_df['change_date'] = pd.to_datetime(county_mock.joined_df['change_date'])
county_mock.joined_df['END_DATE'] = pd.to_datetime(county_mock.joined_df['END_DATE'])
county_mock.joined_df['EndDate'] = pd.to_datetime(county_mock.joined_df['EndDate'])
models.County.add_extra_fields(county_mock)
test_columns = ['change_end_date']
assert county_mock.joined_df.loc[0, test_columns].values.tolist() == [np.datetime64('2029-12-31')]
assert county_mock.joined_df.loc[1, test_columns].values.tolist() == [np.datetime64('2039-12-31')]
def test_end_date_normal_county_end_district_end_notatime(self, mocker):
county_mock = mocker.Mock()
county_mock.joined_df = pd.DataFrame(
data={
'change_date': ['2020-01-01', '2030-01-01'], #: 3 so that there's a +1 for end date calc
'district_number': ['1', '1S'],
'EndDate': ['2029-12-31', None], #: District end date
'county_version': ['uts_foo_S1', 'uts_foo_S1'], #: uts_something_Sx
'END_DATE': ['2029-12-31', '2037-07-07'], #: shape end date (shape ends before current time)
}
)
county_mock.joined_df['change_date'] = pd.to_datetime(county_mock.joined_df['change_date'])
county_mock.joined_df['END_DATE'] = pd.to_datetime(county_mock.joined_df['END_DATE'])
county_mock.joined_df['EndDate'] = pd.to_datetime(county_mock.joined_df['EndDate'])
models.County.add_extra_fields(county_mock)
test_columns = ['change_end_date']
assert county_mock.joined_df.loc[0, test_columns].values.tolist() == [np.datetime64('2029-12-31')]
assert pd.isnull(county_mock.joined_df.loc[1, test_columns].values.tolist()[0])
def test_end_date_extinct_county_end_both_same_date(self, mocker):
county_mock = mocker.Mock()
county_mock.joined_df = pd.DataFrame(
data={
'change_date': ['2020-01-01', '2030-01-01'], #: 3 so that there's a +1 for end date calc
'district_number': ['1', '1S'],
'EndDate': ['2029-12-31', '2037-07-07'], #: District end date
'county_version': ['utt_extinct_S1', 'utt_extinct_S1'], #: utt for territory, all utt except richland
#: are extinct
'END_DATE': ['2029-12-31', '2037-07-07'], #: shape end date
}
)
county_mock.joined_df['change_date'] = pd.to_datetime(county_mock.joined_df['change_date'])
county_mock.joined_df['END_DATE'] = pd.to_datetime(county_mock.joined_df['END_DATE'])
county_mock.joined_df['EndDate'] = pd.to_datetime(county_mock.joined_df['EndDate'])
models.County.add_extra_fields(county_mock)
test_columns = ['change_end_date']
assert county_mock.joined_df.loc[0, test_columns].values.tolist() == [np.datetime64('2029-12-31')]
assert county_mock.joined_df.loc[1, test_columns].values.tolist() == [np.datetime64('2037-07-07')]
class TestCountyKeyGeneration:
def test_shapefile_uts_to_county_key(self):
assert models.create_county_key('uts_saltlake') == 'saltlake'
def test_shapefile_utt_to_county_key(self):
assert models.create_county_key('utt_shambip') == 'shambip'
def test_shapefile_ter_to_county_key(self):
assert models.create_county_key('utt') == 'utt'
def test_district_complex_to_county_key(self):
assert models.create_county_key('St. Marys') == 'stmarys'
def test_richland_rename_county_key(self):
assert models.create_county_key('Richland') == 'rich'
def test_nulls_to_none():
test_df = pd.DataFrame(data={'number': [None, None], 'date': [None, None]})
test_df['number'] = test_df['number'].astype('float64')
test_df['date'] = pd.to_datetime(test_df['date'])
none_rows = []
for row in test_df.values.tolist():
none_rows.append(models.nulls_to_nones(row))
for row in none_rows:
assert row == [None, None]
def test_nulls_to_none_normals_ok():
test_df = pd.DataFrame(data={'number': [1.0, None], 'date': ['2021-03-01', None]})
test_df['number'] = test_df['number'].astype('float64')
test_df['date'] = pd.to_datetime(test_df['date'])
none_rows = []
# for row in test_df.values.tolist():
# none_rows.append(models.nulls_to_nones(row))
none_rows.append(models.nulls_to_nones(test_df.loc[0, :].values.tolist()))
none_rows.append(models.nulls_to_nones(test_df.loc[1, :].values.tolist()))
assert none_rows[0] == [1.0, np.datetime64('2021-03-01')]
assert none_rows[1] == [None, None]
class TestSetups:
@pytest.fixture
def all_districts_df(self):
district_df = pd.DataFrame(
data={
'CountyName': ['grand', 'richland', 'rich', 'shambip'],
'StartDate': ['2020-02-10', '2020-02-15', '2020-02-25', '2020-03-01'],
'EndDate': ['2020-02-14', '2020-02-24', '2020-02-28', None],
'NewDistrict': ['1', '1', '2', '4'],
'OldDistrict': ['1', '1', '1', '4'],
'Version': ['1', '1', '2', '1'],
},
)
return district_df
def test_load_districts_simple(self, mocker, all_districts_df):
pd.read_csv = mocker.Mock(return_value=all_districts_df)
test_state = models.State()
test_state.load_districts('foo')
assert test_state.all_districts_df.loc[0, :].values.tolist() == [
'grand',
np.datetime64('2020-02-10'),
np.datetime64('2020-02-14'),
'1',
'1',
'1',
'grand',
'grand_D1',
]
def test_load_districts_rich(self, mocker, all_districts_df):
pd.read_csv = mocker.Mock(return_value=all_districts_df)
test_state = models.State()
test_state.load_districts('foo')
assert test_state.all_districts_df.loc[1, :].values.tolist() == [
'richland',
np.datetime64('2020-02-15'),
np.datetime64('2020-02-24'),
'1',
'1',
'1',
'rich',
'richland_D1',
]
assert test_state.all_districts_df.loc[2, :].values.tolist() == [
'rich',
np.datetime64('2020-02-25'),
np.datetime64('2020-02-28'),
'2',
'1',
'2',
'rich',
'rich_D2',
]
def test_state_setup_richland_into_rich(self, mocker):
all_districts_df = pd.DataFrame(
data={
'CountyName': ['grand', 'richland', 'rich', 'shambip'],
'StartDate': ['2020-02-10', '2020-02-15', '2020-02-25', '2020-03-01'],
'EndDate': ['2020-02-14', '2020-02-24', '2020-02-28', None],
'NewDistrict': ['1', '1', '2', '4'],
'OldDistrict': ['1', '1', '1', '4'],
'Version': ['1', '1', '2', '1'],
'county_key': ['grand', 'rich', 'rich', 'shambip'],
'district_key': ['grand_D1', 'richland_D1', 'rich_D2', 'shambip_D1']
},
)
mocker.patch('lookback.models.County')
test_state = mocker.Mock()
test_state.all_districts_df = all_districts_df
test_state.counties = []
models.State.setup_counties(test_state)
assert len(test_state.counties) == 3
def test_county_setup_richland_into_rich(self, mocker):
all_districts_df = pd.DataFrame(
data={
'CountyName': ['grand', 'richland', 'rich', 'shambip'],
'StartDate': ['2020-02-10', '2020-02-15', '2020-02-25', '2020-03-01'],
'EndDate': ['2020-02-14', '2020-02-24', '2020-02-28', None],
'NewDistrict': ['1', '1', '2', '4'],
'OldDistrict': ['1', '1', '1', '4'],
'Version': ['1', '1', '2', '1'],
'county_key': ['grand', 'rich', 'rich', 'shambip'],
'district_key': ['grand_D1', 'richland_D1', 'rich_D2', 'shambip_D1']
},
)
all_shapes_df = pd.DataFrame(
data={
'NAME': ['RICHLAND', 'RICH', 'SHAMBIP'],
'ID': ['uts_rich', 'uts_rich', 'utt_shambip'],
'START_DATE': ['2020-02-15', '2020-02-25', '2020-03-01'],
'county_key': ['rich', 'rich', 'shambip']
}
)
rich = models.County('rich')
rich.setup(all_shapes_df, all_districts_df)
assert 'RICH' in rich.shape_df['NAME'].unique()
assert 'RICHLAND' in rich.shape_df['NAME'].unique()
assert 'SHAMBIP' not in rich.shape_df['NAME'].unique()
assert 'rich' in rich.district_df['CountyName'].unique()
assert 'richland' in rich.district_df['CountyName'].unique()
assert 'grand' not in rich.district_df['CountyName'].unique()
assert 'sambip' not in rich.district_df['CountyName'].unique()
class TestFinalJoins:
@pytest.fixture
def shape_data(self):
shape_df = pd.DataFrame(
data={
'shape_key': ['uts_co_S1', 'uts_co_S2', 'uts_co_S3', 'uts_co_S4'],
'START_DATE': ['2020-02-05', '2020-02-20', '2020-02-25', '2020-03-01'],
'END_DATE': ['2020-02-19', '2020-02-24', '2020-02-28', '2021-01-01'],
},
)
shape_df['END_DATE'] = pd.to_datetime(shape_df['END_DATE'])
shape_df['START_DATE'] = pd.to_datetime(shape_df['START_DATE'])
shape_df.set_index('START_DATE', drop=False, inplace=True)
return shape_df
@pytest.fixture
def district_data(self):
district_df = pd.DataFrame(
data={
'NewDistrict': ['1', '2', '3', '4'],
'district_key': ['co_D1', 'co_D2', 'co_D3', 'co_D4'],
'StartDate': ['2020-02-10', '2020-02-15', '2020-02-25', '2020-03-01'],
'EndDate': ['2020-02-14', '2020-02-24', '2020-02-28', None],
},
)
district_df['StartDate'] = pd.to_datetime(district_df['StartDate'])
district_df['EndDate'] = pd.to_datetime(district_df['EndDate'])
district_df.set_index('StartDate', drop=False, inplace=True)
return district_df
@pytest.fixture
def change_dates_df(self):
change_dates_df = pd.DataFrame(
data={
'change_date': ['2020-02-05', '2020-02-10', '2020-02-15', '2020-02-20', '2020-02-25', '2020-03-01'],
'county_name': ['co', 'co', 'co', 'co', 'co', 'co'],
'county_version': ['uts_co_S1', 'uts_co_S1', 'uts_co_S1', 'uts_co_S2', 'uts_co_S3', 'uts_co_S4'],
'district_number': ['n/a', '1', '2', '2', '3', '4'],
'district_version': ['n/a', 'co_D1', 'co_D2', 'co_D2', 'co_D3', 'co_D4'],
'change_end_date': ['2020-02-09', '2020-02-14', '2020-02-19', '2020-02-24', '2020-02-29', None],
}
)
change_dates_df['change_date'] = pd.to_datetime(change_dates_df['change_date'])
change_dates_df['change_end_date'] = pd.to_datetime(change_dates_df['change_end_date'])
return change_dates_df
def test_join_shapes_and_districts_basic_check(self, mocker, change_dates_df, shape_data, district_data):
county_mock = mocker.Mock(spec=models.County)
county_mock.change_dates_df = change_dates_df
county_mock.shape_df = shape_data
county_mock.district_df = district_data
models.County.join_shapes_and_districts(county_mock)
assert county_mock.joined_df.loc[1, :].values.tolist() == [
#: change_dates_df
np.datetime64('2020-02-10'),
'co',
'uts_co_S1',
'1',
'co_D1',
np.datetime64('2020-02-14'),
#: shape_df
'uts_co_S1',
np.datetime64('2020-02-05'),
np.datetime64('2020-02-19'),
#: district_df
'1',
'co_D1',
np.datetime64('2020-02-10'),
np.datetime64('2020-02-14'),
]
def test_join_shapes_and_districts_no_district(self, mocker, change_dates_df, shape_data, district_data):
county_mock = mocker.Mock(spec=models.County)
county_mock.change_dates_df = change_dates_df
county_mock.shape_df = shape_data
county_mock.district_df = district_data
models.County.join_shapes_and_districts(county_mock)
assert models.nulls_to_nones(county_mock.joined_df.loc[0, :].values.tolist()) == [
#: change_dates_df
np.datetime64('2020-02-05'),
'co',
'uts_co_S1',
'n/a',
'n/a',
np.datetime64('2020-02-09'),
#: shape_df
'uts_co_S1',
np.datetime64('2020-02-05'),
np.datetime64('2020-02-19'),
#: district_df should be all Nones thanks to nulls_to_nones()
None,
None,
None,
None,
]
def test_join_shapes_and_districts_no_shape(self, mocker):
county_mock = mocker.Mock(spec=models.County)
#: Set up shape data
county_mock.shape_df = pd.DataFrame(
data={
'shape_key': ['uts_co_S1', 'uts_co_S2'],
'START_DATE': ['2020-02-05', '2020-02-20'],
'END_DATE': ['2020-02-19', '2020-02-24'],
},
)
county_mock.shape_df['END_DATE'] = pd.to_datetime(county_mock.shape_df['END_DATE'])
county_mock.shape_df['START_DATE'] = pd.to_datetime(county_mock.shape_df['START_DATE'])
county_mock.shape_df.set_index('START_DATE', drop=False, inplace=True)
#: Set up district data
county_mock.district_df = pd.DataFrame(
data={
'NewDistrict': ['1', '2', '3'],
'district_key': ['co_D1', 'co_D2', 'co_D3'],
'StartDate': ['2020-02-01', '2020-02-05', '2020-02-20'],
'EndDate': ['2020-02-04', '2020-02-19', None],
},
)
county_mock.district_df['StartDate'] = pd.to_datetime(county_mock.district_df['StartDate'])
county_mock.district_df['EndDate'] = pd.to_datetime(county_mock.district_df['EndDate'])
county_mock.district_df.set_index('StartDate', drop=False, inplace=True)
#: Set up change dates data
county_mock.change_dates_df = pd.DataFrame(
data={
'change_date': ['2020-02-01', '2020-02-05', '2020-02-20'],
'county_name': ['co', 'co', 'co'],
'county_version': ['n/a', 'uts_co_S1', 'uts_co_S2'],
'district_number': ['1', '2', '3'],
'district_version': ['co_D1', 'co_D2', 'co_D3'],
'change_end_date': ['2020-02-04', '2020-02-19', None],
}
)
county_mock.change_dates_df['change_date'] = pd.to_datetime(county_mock.change_dates_df['change_date'])
county_mock.change_dates_df['change_end_date'] = pd.to_datetime(county_mock.change_dates_df['change_end_date'])
models.County.join_shapes_and_districts(county_mock)
assert models.nulls_to_nones(county_mock.joined_df.loc[0, :].values.tolist()) == [
#: change_dates_df
np.datetime64('2020-02-01'),
'co',
'n/a',
'1',
'co_D1',
np.datetime64('2020-02-04'),
#: shape_df should be all Nones thanks to nulls_to_nones()
None,
None,
None,
#: district_df
'1',
'co_D1',
np.datetime64('2020-02-01'),
np.datetime64('2020-02-04'),
]
class TestDistricts:
@pytest.fixture
def joined_df(self):
joined_df = pd.DataFrame(
data={
'COUNTY_KEY': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'],
'DST_NUMBER': ['1', '1', '2', '1', '2', '3'],
'COUNTY_EFFECTIVE_DATE': ['date1', 'date2', 'date3', 'date4', 'date5', 'date6'],
}
)
return joined_df
def test_setup_gets_just_the_districts_data(self, mocker, joined_df):
district_mock = mocker.Mock(speck=models.District)
models.District.__init__(district_mock, '1', joined_df)
assert district_mock.district_records['DST_NUMBER'].unique().tolist() == ['1']
assert district_mock.district_records['COUNTY_KEY'].unique().tolist() == ['foo', 'bar']
assert district_mock.district_records['COUNTY_EFFECTIVE_DATE'].unique().tolist() == ['date1', 'date2', 'date4']
def test_build_versions_dataframes(self, mocker):
district_mock = mocker.Mock()
district_mock.label = '1'
district_mock.row_key_and_versions = {
's1_d1': [
np.datetime64('2020-01-01'),
np.datetime64('2021-01-01'),
],
's1_d2': [
np.datetime64('2022-01-01'),
np.datetime64('2023-01-01'),
],
}
models.District.build_versions_dataframe(district_mock)
assert district_mock.versions_df['UNIQUE_ROW_KEY'].tolist() == ['s1_d1', 's1_d1', 's1_d2', 's1_d2']
assert district_mock.versions_df['DST_VERSION_KEY'].tolist() == [
'1_2020-01-01',
'1_2021-01-01',
'1_2022-01-01',
'1_2023-01-01',
]
def test_join_version_information_record_occurs_twice(self, mocker):
district_mock = mocker.Mock()
district_mock.versions_df = pd.DataFrame(
data={
'UNIQUE_ROW_KEY': ['s1_d1', 's2_d1', 's1_d1'],
'DST_KEY': ['1_2020-01-01', '1_2020-01-01', '1_2021-01-01']
}
)
district_mock.district_records = pd.DataFrame(
data={
'UNIQUE_ROW_KEY': ['s1_d1', 's2_d1'],
'geometry': ['geometry1', 'geometry2']
}
)
models.District.join_version_information(district_mock)
assert district_mock.versions_full_info_df['UNIQUE_ROW_KEY'].tolist() == ['s1_d1', 's2_d1', 's1_d1']
assert district_mock.versions_full_info_df['DST_KEY'].tolist() == [
'1_2020-01-01', '1_2020-01-01', '1_2021-01-01'
]
assert district_mock.versions_full_info_df['geometry'].tolist() == ['geometry1', 'geometry2', 'geometry1']
def test_join_version_information_extra_geometries_ignored(self, mocker):
district_mock = mocker.Mock()
district_mock.versions_df = pd.DataFrame(
data={
'UNIQUE_ROW_KEY': ['s1_d1', 's2_d1', 's1_d1'],
'DST_KEY': ['1_2020-01-01', '1_2020-01-01', '1_2021-01-01']
}
)
district_mock.district_records = pd.DataFrame(
data={
'UNIQUE_ROW_KEY': ['s1_d1', 's2_d1', 's5_d6'],
'geometry': ['geometry1', 'geometry2', 'geometry5']
}
)
models.District.join_version_information(district_mock)
assert district_mock.versions_full_info_df['UNIQUE_ROW_KEY'].tolist() == ['s1_d1', 's2_d1', 's1_d1']
assert district_mock.versions_full_info_df['DST_KEY'].tolist() == [
'1_2020-01-01', '1_2020-01-01', '1_2021-01-01'
]
assert district_mock.versions_full_info_df['geometry'].tolist() == ['geometry1', 'geometry2', 'geometry1']
def test_removes_county_that_changes_after_last_district_change_date(self, mocker):
test_state_df = pd.DataFrame(
data={
'COUNTY_EFFECTIVE_DATE': ['2020-01-01', '2020-01-01', '2030-01-01'],
'COUNTY_EFFECTIVE_END_DATE': ['2039-12-31', '2029-12-31', '2039-12-31'],
'DST_NUMBER': ['1', '1', '42'],
'SHP_KEY': ['shape1', 'shape2', 'shape2'],
'DST_KEY': ['dst1', 'dst1', 'dst42'],
}
)
test_state_df['COUNTY_EFFECTIVE_DATE'] = pd.to_datetime(test_state_df['COUNTY_EFFECTIVE_DATE'])
test_state_df['COUNTY_EFFECTIVE_END_DATE'] = pd.to_datetime(test_state_df['COUNTY_EFFECTIVE_END_DATE'])
test_district = models.District('1', test_state_df)
test_district.find_records_versions()
assert test_district.row_key_and_versions == {
'shape1__dst1': [np.datetime64('2020-01-01'), np.datetime64('2030-01-01')],
'shape2__dst1': [np.datetime64('2020-01-01')],
}
def test_remove_duplicate_version_rows_one_changes_at_end(self, mocker):
versions_full_info_df = pd.DataFrame(
columns=['COUNTY_KEY', 'UNIQUE_ROW_KEY', 'DST_VERSION_KEY', 'DST_EFFECTIVE_DATE', 'DST_END_DATE'],
data=[
['co1', 'co1_s1__d1', 'foo_2020-01-01', '2020-01-01', '2049-12-31'],
['co1', 'co1_s1__d1', 'foo_2030-01-01', '2030-01-01', '2049-12-31'],
['co1', 'co1_s1__d1', 'foo_2040-01-01', '2040-01-01', '2049-12-31'],
['co2', 'co2_s1__d1', 'foo_2020-01-01', '2020-01-01', '2029-12-31'],
['co2', 'co2_s2__d1', 'foo_2030-01-01', '2030-01-01', '2049-12-31'],
['co2', 'co2_s2__d1', 'foo_2040-01-01', '2040-01-01', '2049-12-31'],
]
)
versions_full_info_df['DST_EFFECTIVE_DATE'] = pd.to_datetime(versions_full_info_df['DST_EFFECTIVE_DATE'])
versions_full_info_df['DST_END_DATE'] = pd.to_datetime(versions_full_info_df['DST_END_DATE'])
district_mock = mocker.Mock()
district_mock.versions_full_info_df = versions_full_info_df
models.District.remove_duplicate_version_rows(district_mock)
assert district_mock.deduped_versions_df.to_numpy().tolist() == [
['co1', 'co1_s1__d1', 'foo_2020-01-01', np.datetime64('2020-01-01'), np.datetime64('2049-12-31')],
['co1', 'co1_s1__d1', 'foo_2030-01-01', np.datetime64('2030-01-01'), np.datetime64('2049-12-31')],
['co2', 'co2_s1__d1', 'foo_2020-01-01', np.datetime64('2020-01-01'), np.datetime64('2029-12-31')],
['co2', 'co2_s2__d1', 'foo_2030-01-01', np.datetime64('2030-01-01'), np.datetime64('2049-12-31')]
] # yapf: disable
def test_remove_duplicate_version_rows_new_one_after_multiple_change_dates(self, mocker):
versions_full_info_df = pd.DataFrame(
columns=['COUNTY_KEY', 'UNIQUE_ROW_KEY', 'DST_VERSION_KEY', 'DST_EFFECTIVE_DATE', 'DST_END_DATE'],
data=[
['co1', 'co1_s1__d1', 'foo_2020-01-01', '2020-01-01', '2049-12-31'],
['co1', 'co1_s1__d1', 'foo_2030-01-01', '2030-01-01', '2049-12-31'],
['co1', 'co1_s1__d1', 'foo_2040-01-01', '2040-01-01', '2049-12-31'],
['co2', 'co2_s1__d1', 'foo_2040-01-01', '2040-01-01', '2049-12-31'],
]
)
versions_full_info_df['DST_EFFECTIVE_DATE'] = pd.to_datetime(versions_full_info_df['DST_EFFECTIVE_DATE'])
versions_full_info_df['DST_END_DATE'] = pd.to_datetime(versions_full_info_df['DST_END_DATE'])
district_mock = mocker.Mock()
district_mock.versions_full_info_df = versions_full_info_df
models.District.remove_duplicate_version_rows(district_mock)
assert district_mock.deduped_versions_df.to_numpy().tolist() == [
['co1', 'co1_s1__d1', 'foo_2020-01-01', np.datetime64('2020-01-01'), np.datetime64('2049-12-31')],
['co1', 'co1_s1__d1', 'foo_2040-01-01', np.datetime64('2040-01-01'), np.datetime64('2049-12-31')],
['co2', 'co2_s1__d1', 'foo_2040-01-01', np.datetime64('2040-01-01'), np.datetime64('2049-12-31')],
] # yapf: disable
def test_remove_duplicate_version_rows_many_of_same(self, mocker):
versions_full_info_df = pd.DataFrame(
columns=['COUNTY_KEY', 'UNIQUE_ROW_KEY', 'DST_VERSION_KEY', 'DST_EFFECTIVE_DATE', 'DST_END_DATE'],
data=[
['co1', 'co1_s1__d1', 'foo_2020-01-01', '2020-01-01', '2049-12-31'],
['co1', 'co1_s1__d1', 'foo_2030-01-01', '2030-01-01', '2049-12-31'],
['co1', 'co1_s1__d1', 'foo_2040-01-01', '2040-01-01', '2049-12-31'],
]
)
versions_full_info_df['DST_EFFECTIVE_DATE'] = pd.to_datetime(versions_full_info_df['DST_EFFECTIVE_DATE'])
versions_full_info_df['DST_END_DATE'] = pd.to_datetime(versions_full_info_df['DST_END_DATE'])
district_mock = mocker.Mock()
district_mock.versions_full_info_df = versions_full_info_df
models.District.remove_duplicate_version_rows(district_mock)
assert district_mock.deduped_versions_df.to_numpy().tolist() == [
['co1', 'co1_s1__d1', 'foo_2020-01-01', np.datetime64('2020-01-01'), np.datetime64('2049-12-31')],
] # yapf: disable
def test_remove_duplicate_version_rows_shape_changes_midway(self, mocker):
versions_full_info_df = pd.DataFrame(
columns=['COUNTY_KEY', 'UNIQUE_ROW_KEY', 'DST_VERSION_KEY', 'DST_EFFECTIVE_DATE', 'DST_END_DATE'],
data=[
['co1', 'co1_s1__d1', 'foo_2020-01-01', '2020-01-01', '2039-12-31'],
['co1', 'co1_s1__d1', 'foo_2030-01-01', '2030-01-01', '2039-12-31'],
['co1', 'co1_s2__d1', 'foo_2040-01-01', '2040-01-01', '2059-12-31'],
['co1', 'co1_s2__d1', 'foo_2040-01-01', '2050-01-01', '2059-12-31'],
]
)
versions_full_info_df['DST_EFFECTIVE_DATE'] = pd.to_datetime(versions_full_info_df['DST_EFFECTIVE_DATE'])
versions_full_info_df['DST_END_DATE'] = pd.to_datetime(versions_full_info_df['DST_END_DATE'])
district_mock = mocker.Mock()
district_mock.versions_full_info_df = versions_full_info_df
models.District.remove_duplicate_version_rows(district_mock)
assert district_mock.deduped_versions_df.to_numpy().tolist() == [
['co1', 'co1_s1__d1', 'foo_2020-01-01', np.datetime64('2020-01-01'), np.datetime64('2039-12-31')],
['co1', 'co1_s2__d1', 'foo_2040-01-01', np.datetime64('2040-01-01'), np.datetime64('2059-12-31')],
] # yapf: disable
def test_remove_duplicate_version_rows_first_one_leaves_still_includes_next_version_after(self, mocker):
#: 2020: co1, co2
#: 2030: dupes
#: 2040: co2
#: 2050: dupes
versions_full_info_df = pd.DataFrame(
columns=['COUNTY_KEY', 'UNIQUE_ROW_KEY', 'DST_VERSION_KEY', 'DST_EFFECTIVE_DATE', 'DST_END_DATE'],
data=[
['co1', 'co1_s1__d1', 'foo_2020-01-01', '2020-01-01', '2039-12-31'],
['co1', 'co1_s1__d1', 'foo_2030-01-01', '2030-01-01', '2039-12-31'],
#: We assume co1 went to a different district at the 2040 change date
['co2', 'co2_s1__d1', 'foo_2020-01-01', '2020-01-01', '2059-12-31'],
['co2', 'co2_s1__d1', 'foo_2030-01-01', '2030-01-01', '2059-12-31'],
['co2', 'co2_s1__d1', 'foo_2040-01-01', '2040-01-01', '2059-12-31'],
['co2', 'co2_s1__d1', 'foo_2050-01-01', '2050-01-01', '2059-12-31'],
]
)
versions_full_info_df['DST_EFFECTIVE_DATE'] =
|
pd.to_datetime(versions_full_info_df['DST_EFFECTIVE_DATE'])
|
pandas.to_datetime
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:.conda-bandit_nhgf]
# language: python
# name: conda-env-.conda-bandit_nhgf-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
from collections import OrderedDict
import geopandas as gpd
import numpy as np
import pandas as pd
# %%
calibrations = ['byHRU', 'byHRU_musk', 'byHRU_musk_obs', 'byHRU_noroute', 'byHRU_noroute_obs']
output_suffix = {'Ref': 'ref', 'Non-ref': 'nonref'}
workdir = '/Volumes/USGS_NHM1/calibrations/NHMv10/DAYMET_releases/model_stats'
falc_cls = 'Non-ref' # either: Ref or Non-ref
out_filename = f'{workdir}/ns_curves_{output_suffix[falc_cls]}.csv'
# %%
def get_exceedance_curve(ts, cal):
stat_name = f'{ts.name}_{cal}'
ranked_stat = sorted(ts[ts.notnull()])
prob = np.arange(len(ranked_stat), dtype=float) + 1.0
prob = (prob / (len(ranked_stat) + 1.0))
# Return dataframe of exceedence curve values
df = pd.DataFrame({'exceedance': prob, stat_name: ranked_stat}, columns=['exceedance', stat_name])
df.set_index('exceedance', inplace=True)
return df
# %% [markdown]
# ## Read the gage statistics files
# %%
df_dict = OrderedDict()
for cc in calibrations:
df_dict[cc] =
|
pd.read_csv(f'{workdir}/gage_stats_{cc}.csv', sep=',')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import collections
from functools import partial
import numpy as np
import pytest
from pandas import Series, Timestamp
from pandas.core import (
common as com,
ops,
)
def test_get_callable_name():
getname = com.get_callable_name
def fn(x):
return x
lambda_ = lambda x: x # noqa: E731
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_random_state():
import numpy.random as npr
# Check with seed
state =
|
com.random_state(5)
|
pandas.core.common.random_state
|
import pandas as pd
import numpy as np
from sklearn import preprocessing, cluster
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
import seaborn as sns
def plot_elbow(sse, ks):
fig, axis = plt.subplots(figsize=(9, 6))
axis.set_title('Elbow method for optimal k')
axis.set_xlabel('k')
axis.set_ylabel('SSE')
plt.plot(ks, sse, marker='o')
plt.tight_layout()
plt.show()
def plot_silhouette(sils, ks):
fig, axis = plt.subplots(figsize=(9, 6))
axis.set_title('Silhouette method')
axis.set_xlabel('k')
axis.set_ylabel('Silhouette')
plt.plot(ks, sils, marker='o')
plt.tight_layout()
plt.show()
def elbow_method(data):
sse = []
ks = range(2, 10)
for k in ks:
k_means_model = cluster.KMeans(n_clusters=k, random_state=55)
k_means_model.fit(data)
sse.append(k_means_model.inertia_)
plot_elbow(sse, ks)
def silhouette_method(data):
ks = range(2, 10)
sils = []
for k in ks:
clusterer = KMeans(n_clusters=k, random_state=55)
cluster_labels = clusterer.fit_predict(data)
silhouette_avg = silhouette_score(data, cluster_labels)
sils.append(silhouette_avg)
print("For n_clusters =", k, "The average silhouette_score is :",
silhouette_avg)
plot_silhouette(sils, ks)
def apply_kmeans(df: pd.DataFrame) -> pd.DataFrame:
# 1 Filter rows from 2015 and 2018 to calculate differences
dataset = df.copy()
df = df[df['anyo'].isin([2015, 2018])]
# set 2015 values as negative and sum to calculate differences
kpis = ['num_incidentes', 'inmigracion_mil_hab',
'tasa_natalidad_mil_habitantes', 'num_personas_por_domicilio',
'precio_alquiler_mes_m2', 'precio_compra_venta_m2', 'renta']
for kpi in kpis:
df[kpi] = np.where(df['anyo'] == 2015, -1 * df[kpi], df[kpi])
df = (
df
.groupby(['id_barrio', 'nom_barrio'])
.agg(num_incidentes=('num_incidentes', sum),
inmigracion_mil_hab=('inmigracion_mil_hab', sum),
tasa_natalidad_mil_habitantes=(
'tasa_natalidad_mil_habitantes',
sum),
num_personas_por_domicilio=(
'num_personas_por_domicilio', sum),
precio_alquiler_mes_m2=('precio_alquiler_mes_m2', sum),
precio_compra_venta_m2=('precio_compra_venta_m2', sum),
renta=('renta', sum))
.reset_index()
)
for kpi in kpis:
df[kpi] = np.round(df[kpi], 2)
# 2 Normalize
df2 = df.copy()
df2 = df2.drop(columns=['id_barrio', 'nom_barrio'])
x = df2.values # returns a numpy array
scaler = preprocessing.StandardScaler()
x_scaled = scaler.fit_transform(x)
df2 = pd.DataFrame(x_scaled)
# 3 Correlations
corr_matrix = df2.corr()
corr_matrix = np.round(corr_matrix, 2)
fig, ax = plt.subplots(figsize=(8, 6))
sns.heatmap(corr_matrix, annot=True, fmt="g", cmap='viridis', ax=ax,
xticklabels=kpis, yticklabels=kpis)
plt.tight_layout()
fig.show()
# Remove highly correlated variables
df2 = df2.drop(columns=[1])
# try PCA
pca = PCA(n_components=6)
pca.fit(df2)
print(f'Variancia explicada PCA: {pca.explained_variance_ratio_}')
# Optimal k
silhouette_method(df2)
elbow_method(df2)
# elbow 4 - silhouette 2 -> 3 -> 4
# kmeans with k=3
clusterer = KMeans(n_clusters=3, random_state=55)
cluster_labels = clusterer.fit_predict(df2)
df['cluster_k3'] = cluster_labels
k3 = df[['id_barrio', 'nom_barrio', 'cluster_k3']]
dataset_k3 = pd.merge(dataset, k3, on=['id_barrio', 'nom_barrio'])
dataset_k3 = (
dataset_k3
.groupby(['anyo', 'cluster_k3'])
.agg(num_incidentes=('num_incidentes', 'mean'),
inmigracion_mil_hab=('inmigracion_mil_hab', 'mean'),
tasa_natalidad_mil_habitantes=(
'tasa_natalidad_mil_habitantes', 'mean'),
num_personas_por_domicilio=(
'num_personas_por_domicilio', 'mean'),
precio_alquiler_mes_m2=('precio_alquiler_mes_m2', 'mean'),
precio_compra_venta_m2=('precio_compra_venta_m2', 'mean'),
renta=('renta', 'mean'))
.reset_index()
)
dataset_k3.to_csv('data/dataset/dataset_clusters_3.csv', index=False)
# kmeans with k=4
clusterer = KMeans(n_clusters=4, random_state=55)
cluster_labels = clusterer.fit_predict(df2)
df['cluster_k4'] = cluster_labels
df['cluster_k4'] = df['cluster_k4'].replace({2: 0, 0: 2, 3: 1, 1: 3})
df_cluster = df[['id_barrio', 'nom_barrio', 'cluster_k3', 'cluster_k4']]
df_cluster.to_csv('data/dataset/kmeans_clusters.csv', index=False)
k4 = df[['id_barrio', 'nom_barrio', 'cluster_k4']]
dataset_k4 =
|
pd.merge(dataset, k4, on=['id_barrio', 'nom_barrio'])
|
pandas.merge
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 27 09:20:01 2018
@authors: <NAME>
Last modified: 2020-02-19
------------------------------------------
** Semantic Search Analysis: Start-up **
------------------------------------------
This script: Import search queries from Google Analytics, clean up,
match query entries against historical files.
Okay to run all at once, but see the script for instructions for manual operations.
INPUTS:
- data/raw/SearchConsoleNew.csv - log of google.com search results (GA calls "Queries") where person landed on your site
- data/raw/SiteSearchNew.csv - log from your site search (GA calls "Search Terms")
- data/matchFiles/SiteSpecificMatches.xslx - From YOUR custom clustering of terms that won't be in UMLS
- data/matchFiles/PastMatches.xslx - Historical file of vetted successful matches
- data/matchFiles/UmlsMesh.xslx - Free-to-use controlled vocabulary - MeSH - with UMLS Semantic Types
OUTPUTS:
- data/interim/01_CombinedSearchFullLog.xlsx - Lightly modified full log before changes
- data/interim/ForeignUnresolved.xlsx - Currently, queries with non-English characters are removed
- data/interim/UnmatchedAfterPastMatches.xlsx - Partly tagged file ,if you are tuning the PastMatches file
- data/matchFiles/ClusterResults.xlsx - Unmatched terms, top CLUSTERS - update matchFiles in batches
- data/interim/ManualMatch.xlsx - Unmatched terms, top FREQUENCY COUNTS - update matchFiles one at a time
- data/interim/LogAfterJournals.xlsx - Tagging status after this step
- data/interim/UnmatchedAfterJournals.xlsx - What still needs to be tagged after this step.
-------------------------------
HOW TO EXPORT YOUR SOURCE DATA
-------------------------------
Script assumes Google Analytics where search logging has been configured. Can
be adapted for other tools. This method AVOIDS personally identifiable
information ENTIRELY.
1. Set date parameters (Consider 1 month)
2. Go to Acquisition > Search Console > Queries
3. Select Export > Unsampled Report as SearchConsoleNew.csv
4. Copy the result to data/raw folder
5. Do the same from Behavior > Site Search > Search Terms with file name
SiteSearchNew.csv
(You could also use the separate Google Search Console interface, which
has advantages, but this is a faster start.)
----------------
SCRIPT CONTENTS
----------------
1. Start-up / What to put into place, where
2. Create dataframe from query log; globally update columns and rows
3. Assign terms with non-English characters to ForeignUnresolved
4. Make special-case assignments with F&R, RegEx: Bibliographic, Numeric, Named entities
5. Ignore everything except one program/product/service term
6. Exact-match to site-specific and vetted past matches
7. Eyeball results; manually classify remaining "brands" into SiteSpecificMatches
* PROJECT STARTUP - OPTIONAL: UPDATE SITE-SEPCIFIC MATCHES AND RE-RUN TO THIS POINT *
8. Exact-match to UmlsMesh
9. Exact match to journal file (necessary for pilot site, replace with your site-specific need)
10. MANUAL PROCESS: Re-cluster, update SiteSpecificMatches.xlsx, re-run
11. MANUALLY add matches from ManualMatch.xlsx for high-frequency unclassified
12. Write out LogAfterJournals and UnmatchedAfterJournals
13. Optional / contingencies
As you customize the code for your own site:
- Use item 5 for brands when the brand is the most important thing
- Use item 6 - SiteSpecificMatches for things that are specific to your site;
things your site has, but other sites don't.
- Use item 6 - PastMatches, for generic terms that would be relevant
to any health-medical site.
"""
#%%
# ============================================
# 1. Start-up / What to put into place, where
# ============================================
'''
File locations, etc.
'''
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import pie, axis, show
import matplotlib.ticker as mtick # used for example in 100-percent bars chart
import numpy as np
import os
import re
import string
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import collections
import copy
from pathlib import *
# To be used with str(Path.home())
# Set working directory and directories for read/write
home_folder = str(Path.home()) # os.path.expanduser('~')
os.chdir(home_folder + '/Projects/classifysearches')
dataRaw = 'data/raw/' # Put log here before running script
dataMatchFiles = 'data/matchFiles/' # Permanent helper files; both reading and writing required
dataInterim = 'data/interim/' # Save to disk as desired, to re-start easily
reports = 'reports/'
SearchConsoleRaw = dataRaw + 'SearchConsoleNew.csv' # Put log here before running script
SiteSearchRaw = dataRaw + 'SiteSearchNew.csv' # Put log here before running script
#%%
# ======================================================================
# 2. Create dataframe from query log; globally update columns and rows
# ======================================================================
'''
If you need to concat multiple files, one option is
searchLog = pd.concat([x1, x2, x3], ignore_index=True)
File will have junk rows at top and bottom that this code removes.
'''
# --------------
# SearchConsole
# --------------
SearchConsole = pd.read_csv(SearchConsoleRaw, sep=',', index_col=False) # skiprows=7,
SearchConsole.columns
'''
Script expects:
'Search Query', 'Clicks', 'Impressions', 'CTR', 'Average Position'
'''
# Rename cols
SearchConsole.rename(columns={'Search Query': 'Query',
'Average Position': 'AveragePosition'}, inplace=True)
SearchConsole.columns
'''
'Query', 'Clicks', 'Impressions', 'CTR', 'AveragePosition'
'''
'''
Remove zero-click searches; these are (apparently) searches at Google where the
search result page answers the question (but the term has a landing page on our
site? Unclear what's going on.
For example, https://www.similarweb.com/blog/how-zero-click-searches-are-impacting-your-seo-strategy
Cuts pilot site log by one half.
'''
SearchConsole = SearchConsole.loc[(SearchConsole['Clicks'] > 0)]
# SearchConsole.shape
# -----------
# SiteSearch
# -----------
SiteSearch = pd.read_csv(SiteSearchRaw, sep=',', index_col=False) # skiprows=7,
SiteSearch.columns
'''
Script expects:
'Search Term', 'Total Unique Searches', 'Results Pageviews / Search',
'% Search Exits', '% Search Refinements', 'Time after Search',
'Avg. Search Depth'
'''
# Rename cols
SiteSearch.rename(columns={'Search Term': 'Query',
'Total Unique Searches': 'TotalUniqueSearches',
'Results Pageviews / Search': 'ResultsPVSearch',
'% Search Exits': 'PercentSearchExits',
'% Search Refinements': 'PercentSearchRefinements',
'Time after Search': 'TimeAfterSearch',
'Avg. Search Depth': 'AvgSearchDepth'}, inplace=True)
SiteSearch.columns
'''
'Query', 'TotalUniqueSearches', 'ResultsPVSearch', 'PercentSearchExits',
'PercentSearchRefinements', 'TimeAfterSearch', 'AvgSearchDepth'
'''
# Join the two df's, keeping all rows and putting terms in common into one row
CombinedLog = pd.merge(SearchConsole, SiteSearch, on = 'Query', how = 'outer')
# New col for total times people searched for term, regardless of location searched from
CombinedLog['TotalSearchFreq'] = CombinedLog.fillna(0)['Clicks'] + CombinedLog.fillna(0)['TotalUniqueSearches']
CombinedLog = CombinedLog.sort_values(by='TotalSearchFreq', ascending=False).reset_index(drop=True)
# Queries longer than 255 char generate an error in Excel. Shouldn't be that
# long anyway; let's cut off at 100 char (still too long but stops the error)
# ?? df.apply(lambda x: x.str.slice(0, 20))
CombinedLog['Query'] = CombinedLog['Query'].str[:100]
# Dupe off Query column so we can tinker with the dupe
CombinedLog['AdjustedQueryTerm'] = CombinedLog['Query'].str.lower()
# -------------------------
# Remove punctuation, etc.
# -------------------------
# Replace hyphen with space because the below would replace with nothing
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace('-', ' ')
# Remove https:// if used
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace('http://', '')
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace('https://', '')
'''
Regular expressions info from https://docs.python.org/3/library/re.html
^ (Caret.) Matches the start of the string, and in MULTILINE mode also
matches immediately after each newline.
w For Unicode (str) patterns: Matches Unicode word characters; this
includes most characters that can be part of a word in any language,
as well as numbers and the underscore. If the ASCII flag is used, only
[a-zA-Z0-9_] is matched.
s For Unicode (str) patterns: Matches Unicode whitespace characters
(which includes [ \t\n\r\fv], and also many other characters, for
example the non-breaking spaces mandated by typography rules in many
languages). If the ASCII flag is used, only [ \t\n\r\fv] is matched.
+ Causes the resulting RE to match 1 or more repetitions of the preceding
RE. ab+ will match ‘a’ followed by any non-zero number of ‘b’s; it will
not match just ‘a’.
Spyder editor can somehow lose the regex, such as when it is copied and pasted
inside the editor; an attempt to preserve inside this comment: (r'[^\w\s]+','')
'''
# Remove all chars except a-zA-Z0-9 and leave foreign chars alone
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace(r'[^\w\s]+', '')
# Remove modified entries that are now dupes or blank entries
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace(' ', ' ') # two spaces to one
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.strip() # remove leading and trailing spaces
CombinedLog = CombinedLog.loc[(CombinedLog['AdjustedQueryTerm'] != "")]
# Write out this version; won't need most columns until later
writer = pd.ExcelWriter(dataInterim + '01_CombinedSearchFullLog.xlsx')
CombinedLog.to_excel(writer,'CombinedLogFull', index=False)
# df2.to_excel(writer,'Sheet2')
writer.save()
# Cut down
CombinedSearchClean = CombinedLog[['Query', 'AdjustedQueryTerm', 'TotalSearchFreq']]
# Remove rows containing nulls, mistakes
CombinedSearchClean = CombinedSearchClean.dropna()
# Add match cols
CombinedSearchClean['PreferredTerm'] = ''
CombinedSearchClean['SemanticType'] = ''
# Free up memory
del [[SearchConsole, SiteSearch, CombinedLog]]
# CombinedSearchClean.head()
CombinedSearchClean.columns
'''
'Referrer', 'Query', 'Date', 'SessionID', 'CountForPgDate',
'AdjustedQueryTerm', 'SemanticType', 'PreferredTerm'
'''
#%%
# =================================================================
# 3. Assign terms with non-English characters to ForeignUnresolved
# =================================================================
'''
UMLS MetaMap should not be given anything other than flat ASCII - no foreign
characters, no high-ASCII apostrophes or quotes, etc., at least as of October
2019. Flag these so later you can remove them from processing. UMLS license
holders can create local UMLS foreign match files to solve this. The current
implementation runs without need for a UMLS license (i.e., many vocabularies
have been left out).
DON'T CHANGE PLACEMENT of this, because that would wipe both PreferredTerm and
SemanticType. Future procedures can replace this content with the correct
translation.
FIXME - Some of these are not foreign; R&D how to avoid assigning as foreign;
start by seeing whether orig term had non-ascii characters.
Mistaken assignments that are 1-4-word single-concept searches will be
overwritten with the correct data. And a smaller number of other types will
be reclaimed as well.
- valuation of fluorescence in situ hybridization as an ancillary tool to
urine cytology in diagnosing urothelial carcinoma
- comparison of a light‐emitting diode with conventional light sources for
providing phototherapy to jaundiced newborn infants
- crystal structure of ovalbumin
- diet exercise or diet with exercise 18–65 years old
'''
# Other unrecognized characters, flag as foreign. Eyeball these once in a while and update the above.
def checkForeign(row):
# print(row)
foreignYes = {'AdjustedQueryTerm':row.AdjustedQueryTerm, 'PreferredTerm':'Foreign unresolved', 'SemanticType':'Foreign unresolved'}
foreignNo = {'AdjustedQueryTerm':row.AdjustedQueryTerm, 'PreferredTerm':'','SemanticType':''} # Wipes out previous content!!
try:
row.AdjustedQueryTerm.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return pd.Series(foreignYes)
else:
return pd.Series(foreignNo)
CombinedSearchClean[['AdjustedQueryTerm', 'PreferredTerm','SemanticType']] = CombinedSearchClean.apply(checkForeign, axis=1)
# Write out foreign
ForeignUnresolved = CombinedSearchClean[CombinedSearchClean.SemanticType.str.contains("Foreign unresolved") == True]
writer = pd.ExcelWriter(dataInterim + 'ForeignUnresolved.xlsx')
ForeignUnresolved.to_excel(writer,'ForeignUnresolved', index=False)
# df2.to_excel(writer,'Sheet2')
writer.save()
# Remove from consideration
LogAfterForeign = CombinedSearchClean[CombinedSearchClean.SemanticType.str.contains("Foreign unresolved") == False]
# Free memory
del [[ForeignUnresolved, CombinedSearchClean]]
#%%
# =========================================================================================
# 4. Make special-case assignments with F&R, RegEx: Bibliographic, Numeric, Named entities
# =========================================================================================
'''
Later procedures won't be able to match the below very well, so match them here.
NOTE: Doing this will ignore additional concepts when the search query was a
complex one. Coverage of hard-to-match queries, at the expense of accuracy/completeness,
which may require a good deal of manual work.
'''
# --- Bibliographic Entity: Usually people searching for a document title ---
# Assign ALL queries over x char to 'Bibliographic Entity' (often citations, search strategies, publication titles...)
LogAfterForeign.loc[(LogAfterForeign['AdjustedQueryTerm'].str.len() > 40), 'PreferredTerm'] = 'Bibliographic Entity'
LogAfterForeign.loc[LogAfterForeign['AdjustedQueryTerm'].str.contains('page number 1 page size', na=False), 'PreferredTerm'] = 'Bibliographic Entity'
LogAfterForeign.loc[LogAfterForeign['PreferredTerm'].str.contains('Bibliographic Entity', na=False), 'SemanticType'] = 'Bibliographic Entity'
# --- Numeric ID: Usually people searching for database ID ---
# Assign entries starting with 3 digits
# FIXME - Clarify and grab the below, PMID, ISSN, ISBN 0-8016-5253-7), etc.
# LogAfterForeign.loc[LogAfterForeign['AdjustedQueryTerm'].str.contains('^[0-9]{3,}', na=False), 'PreferredTerm'] = 'Numeric ID'
LogAfterForeign.loc[LogAfterForeign['AdjustedQueryTerm'].str.contains('[0-9]{5,}', na=False), 'PreferredTerm'] = 'Numeric ID'
LogAfterForeign.loc[LogAfterForeign['AdjustedQueryTerm'].str.contains('[0-9]{4,} [0-9]{4,}', na=False), 'PreferredTerm'] = 'Numeric ID'
LogAfterForeign.loc[LogAfterForeign['PreferredTerm'].str.contains('Numeric ID', na=False), 'SemanticType'] = 'Numeric ID'
# Unresolved - Mysterious / mistakes / unknown
LogAfterForeign.loc[LogAfterForeign['AdjustedQueryTerm'].str.contains('^xxxx$', na=False), 'PreferredTerm'] = 'Unresolved'
LogAfterForeign.loc[LogAfterForeign['AdjustedQueryTerm'].str.contains('^xxxxx$', na=False), 'PreferredTerm'] = 'Unresolved'
LogAfterForeign.loc[LogAfterForeign['AdjustedQueryTerm'].str.contains('^xxxxxx$', na=False), 'PreferredTerm'] = 'Unresolved'
LogAfterForeign.loc[LogAfterForeign['PreferredTerm'].str.contains('Unresolved', na=False), 'SemanticType'] = 'Unresolved'
# -------------
# How we doin?
# -------------
'''
Multiple sections have these "How we doin?" counts, BUT they are not the
same. Sometimes SemanticType nulls are counted, and sometimes SemanticType
empty strings are counted.
'''
# Total queries in log
SearchesRepresentedTot = LogAfterForeign['TotalSearchFreq'].sum().astype(int)
SearchesAssignedTot = LogAfterForeign.loc[LogAfterForeign['SemanticType'] != '']
SearchesAssignedTot = SearchesAssignedTot['TotalSearchFreq'].sum().astype(int)
SearchesAssignedPercent = (SearchesAssignedTot / SearchesRepresentedTot * 100).astype(int)
# PercentOfSearchesUnAssigned = 100 - PercentOfSearchesAssigned
RowsTot = len(LogAfterForeign)
RowsAssignedCnt = (LogAfterForeign['SemanticType'].values != '').sum() # .isnull().sum()
# RowsUnassignedCnt = TotRows - RowsAssigned
RowsAssignedPercent = (RowsAssignedCnt / RowsTot * 100).astype(int)
# print("\nTop Semantic Types\n{}".format(LogAfterForeign['SemanticType'].value_counts().head(10)))
print("\n==========================================================\n ** LogAfterForeign: {}% of total search volume tagged **\n==========================================================\n{:,} of {:,} searches ({}%) assigned;\n{:,} of {:,} rows ({}%) assigned".format(SearchesAssignedPercent, SearchesAssignedTot, SearchesRepresentedTot, SearchesAssignedPercent, RowsAssignedCnt, RowsTot, RowsAssignedPercent))
#%%
# =============================================================
# 5. Ignore everything except one program/product/service term
# =============================================================
'''
USE CAREFULLY, when part of a search term is IMPORTANT ENOUGH TO OVERRIDE
EVERYTHING ELSE in the query. Often this is a "brand," but it can also be
terminology that is only associated with a specific program/product/service.
Example uses:
- You want to view all the ways customers are trying to get to the home
page of a program, product or service.
- You are noticing that perhaps a specific entity should be more
visible within your site navigation and you want to understand the
issue better.
TODO - Update this to a function that is fed by a file.
'''
LogAfterOverride = LogAfterForeign
# --------------------------
# Product-NLM (multi-group)
# --------------------------
# pubmed / pmc / medline / journals
LogAfterOverride.loc[LogAfterOverride['AdjustedQueryTerm'].str.contains('pubmed', na=False), 'PreferredTerm'] = 'PubMed/PMC/MEDLINE'
LogAfterOverride.loc[LogAfterOverride['AdjustedQueryTerm'].str.contains('pub med', na=False), 'PreferredTerm'] = 'PubMed/PMC/MEDLINE'
LogAfterOverride.loc[LogAfterOverride['AdjustedQueryTerm'].str.contains('medline', na=False), 'PreferredTerm'] = 'PubMed/PMC/MEDLINE'
LogAfterOverride.loc[LogAfterOverride['AdjustedQueryTerm'].str.contains('journal abbreviation', na=False), 'PreferredTerm'] = 'PubMed/PMC/MEDLINE'
LogAfterOverride.loc[LogAfterOverride['PreferredTerm'].str.contains('PubMed/PMC/MEDLINE', na=False), 'SemanticType'] = 'Product-NLM'
#%%
# ======================================================================
# 6. Exact-match to site-specific and vetted past matches
# ======================================================================
'''
Build a file of terms your site visitors are most commonly searching for,
which might be handled poorly by the resources in Phase 2, to include:
1. Your product and service names, as people search for them
2. Person names, whether staff, authors, etc.
3. Organizational names specific to your organization
4. Any homonymns, etc., that you review after Phase 2 that you want to
control tagging for to PREVENT the Phase 2 tools from tagging.
Focus on queries that are correct as typed and can be extact-matched to terms
that Phase 2 might handle incorrectly. Over time this will lighten the manual
work in later steps.
DO use correct spellings, because later we fuzzy match off of the terms here.
Okay to add previously matched foreign terms here.
** TO BUILD A NEW FILE - RECOMMENDATION **
Export the top 1,000 queries from the past 12 months and
cluster them using the code at x. Then process similar BRANDED PRODUCTS, etc.
(ONLY the categories above!) in a spreadsheet, building the additional column
information as you go, following what's in the model PastMatches file.
'''
# -------------------------
# SiteSpecificMatches.xlsx
# -------------------------
# Bring in
SiteSpecificMatches = pd.read_excel(dataMatchFiles + 'SiteSpecificMatches.xlsx')
SiteSpecificMatches.columns
'''
'AdjustedQueryTerm', 'PreferredTerm', 'SemanticType'
'''
# Combine
LogAfterSiteSpecific = pd.merge(LogAfterOverride, SiteSpecificMatches, how='left', on=['AdjustedQueryTerm'])
LogAfterSiteSpecific.columns
'''
'Query_x', 'AdjustedQueryTerm', 'TotalSearchFreq', 'PreferredTerm_x',
'SemanticType_x', 'PreferredTerm_y', 'SemanticType_y', 'Query_y'
'''
# TODO - Look for a better way to do the above - MERGE WITH CONDITIONAL OVERWRITE.
# This code stops the merge from wiping out some data. Temporary fix:
# For the next operation, set _x cols to nan instead of empty string ''
LogAfterSiteSpecific['PreferredTerm_x'] = LogAfterSiteSpecific['PreferredTerm_x'].replace(r'^\s*$', np.nan, regex=True)
LogAfterSiteSpecific['SemanticType_x'] = LogAfterSiteSpecific['SemanticType_x'].replace(r'^\s*$', np.nan, regex=True)
# LogAfterSiteSpecific['Query2'] = LogAfterSiteSpecific['Query_x'].where(LogAfterSiteSpecific['Query_x'].notnull(), LogAfterSiteSpecific['Query_y'])
# LogAfterSiteSpecific['Query2'] = LogAfterSiteSpecific['Query_y'].where(LogAfterSiteSpecific['Query_y'].notnull(), LogAfterSiteSpecific['Query_x'])
LogAfterSiteSpecific['PreferredTerm2'] = LogAfterSiteSpecific['PreferredTerm_x'].where(LogAfterSiteSpecific['PreferredTerm_x'].notnull(), LogAfterSiteSpecific['PreferredTerm_y'])
LogAfterSiteSpecific['PreferredTerm2'] = LogAfterSiteSpecific['PreferredTerm_y'].where(LogAfterSiteSpecific['PreferredTerm_y'].notnull(), LogAfterSiteSpecific['PreferredTerm_x'])
LogAfterSiteSpecific['SemanticType2'] = LogAfterSiteSpecific['SemanticType_x'].where(LogAfterSiteSpecific['SemanticType_x'].notnull(), LogAfterSiteSpecific['SemanticType_y'])
LogAfterSiteSpecific['SemanticType2'] = LogAfterSiteSpecific['SemanticType_y'].where(LogAfterSiteSpecific['SemanticType_y'].notnull(), LogAfterSiteSpecific['SemanticType_x'])
LogAfterSiteSpecific.drop(['PreferredTerm_x', 'PreferredTerm_y', 'SemanticType_x', 'SemanticType_y'], axis=1, inplace=True)
LogAfterSiteSpecific.rename(columns={'PreferredTerm2': 'PreferredTerm',
'SemanticType2': 'SemanticType'}, inplace=True)
# 'Query_x', 'Query_y', 'Query2': 'Query',
LogAfterSiteSpecific.columns
'''
Early in your project we recommend that you cycle through the clustering several
times at this point.
'''
# SemanticType empties alternate between null and empty string
UnassignedAfterSS = LogAfterSiteSpecific[LogAfterSiteSpecific['SemanticType'].isnull()]
# UnassignedAfterSS = LogAfterSiteSpecific.loc[LogAfterSiteSpecific['SemanticType'] == '']
# Set a limit, say, frequency of 10 or more
UnassignedAfterSS = UnassignedAfterSS.loc[(UnassignedAfterSS['TotalSearchFreq'] >= 5)]
# We updated AdjustedQueryTerm so put that in the place of Query
UnassignedAfterSS = UnassignedAfterSS[['AdjustedQueryTerm']].reset_index(drop=True)
UnassignedAfterSS.rename(columns={'AdjustedQueryTerm': 'Query'}, inplace=True)
# -------------
# How we doin?
# -------------
# Total queries in log
SearchesRepresentedTot = LogAfterSiteSpecific['TotalSearchFreq'].sum().astype(int)
SearchesAssignedTot = LogAfterSiteSpecific[LogAfterSiteSpecific['SemanticType'].notnull()]
# SearchesAssignedTot = LogAfterSiteSpecific.loc[LogAfterSiteSpecific['SemanticType'] != '']
SearchesAssignedTot = SearchesAssignedTot['TotalSearchFreq'].sum().astype(int)
SearchesAssignedPercent = (SearchesAssignedTot / SearchesRepresentedTot * 100).astype(int)
# PercentOfSearchesUnAssigned = 100 - PercentOfSearchesAssigned
RowsTot = len(LogAfterSiteSpecific)
RowsAssignedCnt = (LogAfterSiteSpecific['SemanticType'].notnull().sum())
# RowsAssignedCnt = (LogAfterSiteSpecific['SemanticType'].values != '').sum() # .isnull().sum()
# RowsUnassignedCnt = TotRows - RowsAssigned
RowsAssignedPercent = (RowsAssignedCnt / RowsTot * 100).astype(int)
# print("\nTop Semantic Types\n{}".format(LogAfterSiteSpecific['SemanticType'].value_counts().head(10)))
print("\n===============================================================\n ** LogAfterSiteSpecific: {}% of total search volume tagged **\n===============================================================\n{:,} of {:,} searches ({}%) assigned;\n{:,} of {:,} rows ({}%) assigned".format(SearchesAssignedPercent, SearchesAssignedTot, SearchesRepresentedTot, SearchesAssignedPercent, RowsAssignedCnt, RowsTot, RowsAssignedPercent))
# -----------------
# PastMatches.xlsx
# -----------------
# Bring in file containing this site's historical matches
PastMatches = pd.read_excel(dataMatchFiles + 'PastMatches.xlsx')
PastMatches.columns
'''
'SemanticType', 'AdjustedQueryTerm', 'PreferredTerm', 'ui'
'''
# Second, focus on AdjustedQueryTerm in PastMatches; higher success rate.
LogAfterPastMatches = pd.merge(LogAfterSiteSpecific, PastMatches, how='left', left_on=['AdjustedQueryTerm'], right_on=['AdjustedQueryTerm'])
LogAfterPastMatches.columns
'''
'Query', 'AdjustedQueryTerm', 'TotalSearchFreq', 'PreferredTerm_x',
'SemanticType_x', 'PreferredTerm_y', 'SemanticType_y', 'ui'
'''
# TODO - Look for a better way to do the above - MERGE WITH CONDITIONAL OVERWRITE.
# This code stops the merge from wiping out some data. Temporary fix:
# Move _y into _x if _x is empty; or here: where _x has content, use _x, otherwise use _y
LogAfterPastMatches['PreferredTerm2'] = LogAfterPastMatches['PreferredTerm_x'].where(LogAfterPastMatches['PreferredTerm_x'].notnull(), LogAfterPastMatches['PreferredTerm_y'])
LogAfterPastMatches['PreferredTerm2'] = LogAfterPastMatches['PreferredTerm_y'].where(LogAfterPastMatches['PreferredTerm_y'].notnull(), LogAfterPastMatches['PreferredTerm_x'])
LogAfterPastMatches['SemanticType2'] = LogAfterPastMatches['SemanticType_x'].where(LogAfterPastMatches['SemanticType_x'].notnull(), LogAfterPastMatches['SemanticType_y'])
LogAfterPastMatches['SemanticType2'] = LogAfterPastMatches['SemanticType_y'].where(LogAfterPastMatches['SemanticType_y'].notnull(), LogAfterPastMatches['SemanticType_x'])
LogAfterPastMatches.drop(['PreferredTerm_x', 'PreferredTerm_y',
'SemanticType_x', 'SemanticType_y'], axis=1, inplace=True)
LogAfterPastMatches.rename(columns={'PreferredTerm2': 'PreferredTerm',
'SemanticType2': 'SemanticType'}, inplace=True)
LogAfterPastMatches.columns
"""
TODO - Clean up the journal match file before going further with this -
incorrect column separators for some rows. Re-create source and separate with |
pipe.
# -------------------
# JournalMatches.csv
# -------------------
'''
Example of custom list matching
'''
JournalMatches = pd.read_csv(dataMatchFiles + 'JournalMatches.csv', sep = ',') # , skiprows=2
JournalMatches.columns
'''
'AdjustedQueryTerm', 'PreferredTerm', 'SemanticType', 'ui'
'''
"""
# -------------
# How we doin?
# -------------
# Total queries in log
SearchesRepresentedTot = LogAfterPastMatches['TotalSearchFreq'].sum().astype(int)
SearchesAssignedTot = LogAfterPastMatches[LogAfterPastMatches['SemanticType'].notnull()]
# SearchesAssignedTot = LogAfterPastMatches.loc[LogAfterPastMatches['SemanticType'] != '']
SearchesAssignedTot = SearchesAssignedTot['TotalSearchFreq'].sum().astype(int)
SearchesAssignedPercent = (SearchesAssignedTot / SearchesRepresentedTot * 100).astype(int)
# PercentOfSearchesUnAssigned = 100 - PercentOfSearchesAssigned
RowsTot = len(LogAfterPastMatches)
RowsAssignedCnt = (LogAfterPastMatches['SemanticType'].notnull().sum())
# RowsAssignedCnt = (LogAfterPastMatches['SemanticType'].values != '').sum() # .isnull().sum()
# RowsUnassignedCnt = TotRows - RowsAssigned
RowsAssignedPercent = (RowsAssignedCnt / RowsTot * 100).astype(int)
# print("\nTop Semantic Types\n{}".format(LogAfterPastMatches['SemanticType'].value_counts().head(10)))
print("\n==============================================================\n ** LogAfterPastMatches: {}% of total search volume tagged **\n==============================================================\n{:,} of {:,} searches ({}%) assigned;\n{:,} of {:,} rows ({}%) assigned".format(SearchesAssignedPercent, SearchesAssignedTot, SearchesRepresentedTot, SearchesAssignedPercent, RowsAssignedCnt, RowsTot, RowsAssignedPercent))
# Separate next operations so previous matches won't be overwritten
# UnmatchedAfterPastMatches = LogAfterPastMatches.loc[LogAfterPastMatches['SemanticType'] == '']
UnmatchedAfterPastMatches = LogAfterPastMatches[LogAfterPastMatches['SemanticType'].isnull()]
UnmatchedAfterPastMatches = UnmatchedAfterPastMatches[['AdjustedQueryTerm', 'TotalSearchFreq']].reset_index(drop=True)
UnmatchedAfterPastMatches.rename(columns={'AdjustedQueryTerm': 'Search Query'}, inplace=True)
# Remove from memory. PastMatches is used below, so leave that.
del [[LogAfterForeign, LogAfterSiteSpecific, SiteSpecificMatches,
UnassignedAfterSS]]
#%%
# =========================================================================================
# 7. Eyeball results; manually classify remaining "brands," etc., into SiteSpecificMatches
# =========================================================================================
'''
RUNNING THIS SCRIPT FOR THE FIRST TIME OR FIRST FEW TIMES?
The MeSH procedure (follows this one) can generate incorrect matches for your site-specific
terms. For example, BLAST is a "branded" genetics resource, that MeSH will
classify as an explosion. This is the time to make sure it's classified as a brand.
Create this spreadsheet of remaining unmatched; open it and manually move ONLY
site-specific terms, into SiteSpecificMatches, and re-run this script.
Later in this script you will have the chance to run fuzzy-match clustering
multiple times until you have captured what you want / what you have time for.
But if you have program, product, or service names THAT ARE GENERIC TERMS, you
should classify them before running MeSH matching.
We developed a Django-NoSQL interface that could be used for long-term projects,
with some additional work required;
https://github.com/NCBI-Hackathons/Semantic-search-log-analysis-pipeline.
If you're using Google Search Console data, you could use site-area data,
where the person landed, to help you classify the terms. Some terms could use
this extra context.
'''
# write out
writer = pd.ExcelWriter(dataInterim + 'UnmatchedAfterPastMatches.xlsx')
UnmatchedAfterPastMatches.to_excel(writer,'UnmatchedAfterPastMatches', index=False)
# df2.to_excel(writer,'Sheet2')
writer.save()
#%%
# ====================================================================
# 8. Exact-match to UmlsMesh
# ====================================================================
'''
UmlsMesh is a custom-created file, with NLM's free MeSH vocabulary,
plus UMLS Semantic Types.
The script to create the file is in src/data/, however a UMLS license
is required to create the file.
'''
# Reviewing from above
UnmatchedAfterPastMatches.columns
'''
'Search Query'
'''
# Custom MeSH vocabulary from UMLS, with semantic types
UmlsMesh = pd.read_csv(dataMatchFiles + 'UmlsMesh.csv', sep='|') # , index_col=False
UmlsMesh.drop('wordCount', axis=1, inplace=True)
UmlsMesh.columns
'''
'AdjustedQueryTerm', 'PreferredTerm', 'SemanticType', 'ui', 'LAT',
'SAB'
'''
# Combine with the subset, unmatched only
UmlsMeshMatches = pd.merge(UnmatchedAfterPastMatches, UmlsMesh, how='inner', left_on=['Search Query'], right_on=['AdjustedQueryTerm'])
UmlsMeshMatches.columns
'''
'Search Query', 'TotalSearchFreq', 'AdjustedQueryTerm', 'PreferredTerm',
'SemanticType', 'ui', 'LAT', 'SAB'
'''
# Join to full log
LogAfterUmlsMesh =
|
pd.merge(LogAfterPastMatches, UmlsMeshMatches, how='left', left_on=['AdjustedQueryTerm'], right_on=['Search Query'])
|
pandas.merge
|
#App Heroku
from PIL import Image
import pandas as pd
import numpy as np
import plotly.express as px
import streamlit as st
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
import geopandas
import folium
# ------------------------------------------
# settings
# ------------------------------------------
st.set_page_config( layout='wide' )
# ------------------------------------------
# Welcome
# ------------------------------------------
st.title('House Rocket Dashboard')
st.markdown('---')
st.markdown('## Sobre a base de dados')
st.markdown('''Os dados analisados foram extraídos do Kaggle, a House Rocket é uma empresa real sendo uma plataforma digital
que tem como modelo de negócio, a compra e a venda de imóveis usando tecnologia. Os dados presentes no dataset não
representam vínculo com a empresa, toda a situação discutida aqui é para fins de estudo (desde o CEO até as perguntas de negócio).
''')
st.markdown('\n''''*Dicionário dos dados*:''')
st.markdown("- id: Notação para cada imóvel"
'\n'"- date: Data de venda da imóvel"
'\n'"- price: Preço de venda da imóvel"
'\n'"- bedrooms: Número de quartos"
'\n'"- bathrooms: Número de banheiros"
'\n'"- sqft_living: Metragem quadrada da sala de estar"
'\n'"- sqrt_log: Metragem quadrada do lote"
'\n'"- floors: Quantidade de andares"
'\n'"- waterfront: Vista para a água"
'\n'"- view: Foi visualizado"
'\n'"- condition: Condição do imóvel"
'\n'"- grade: Nota geral dada à unidade habitacional"
'\n'"- sqft_above: Metragem quadrada do imóvel"
'\n'"- sqft_basement: Metragem quadrada do porão"
'\n'"- yr_built: Ano de construção"
'\n'"- yr_renovated: Ano em que o imóvel foi reformado"
'\n'"- zipcode: Código postal"
'\n'"- lat: Latitude"
'\n'"- long: Longitude"
'\n'"- sqft_living15: Metragem quadrada da sala de estar em 2015 (implica em algumas renovações)"
'\n'"- sqrt_lot15: Metragem quadrada do lote em 2015 (implica em algumas renovações)"
'\n'"- dormitory_type: Classificação do imóvel baseado na quantidade de quartos"
'\n'"- condition_type: Classificação da condição de conversação do imóvel"
'\n'"- size: Classificação do tamanho do imóvel baseado no tamanho da sala de estar"
'\n'"- is_renovated: Se o imóvel foi reformado ou não"
'\n'"- is_waterfront: Se o imóvel possui vista para a água"
'\n'"- house_age: Classificação do imóvel se é antigo (old) ou novo (new)"
'\n'"- yr_date: Ano da data de venda do imóvel"
'\n'"- month_date: Mês da data de venda do imóvel"
)
st.markdown('---')
# ------------------------------------------
# helper functions
# ------------------------------------------
@st.cache(allow_output_mutation=True)
def get_geofile(url):
geofile = geopandas.read_file(url)
return geofile
@st.cache(allow_output_mutation=True)
def get_data(path):
data =
|
pd.read_csv(path)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf =
|
StringIO()
|
pandas.compat.StringIO
|
"""
Extracts information from Visual Field Test files in XML format.
"""
import xml.etree.ElementTree as et
import os.path as osp
import numpy as np
import pandas as pd
from tqdm import tqdm
from constants import *
import matplotlib
#matplotlib.use('TkAgg')
# matplotlib.use('Qt5Agg')
# matplotlib.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
from keras.utils.np_utils import to_categorical
from glob import glob
from datetime import datetime
BASEPATH = "/Users/gyasmeen/Desktop/Results/nyu_vft_xml/*.xml"
DISPLAY_NAME = 'PATIENT/STUDY/SERIES/DISPLAY_NAME' # e.g. SS-24-2 Thr
VISIT_DATE = 'PATIENT/STUDY/VISIT_DATE'
SERIES_DATE_TIME = 'PATIENT/STUDY/SERIES/SERIES_DATE_TIME'
TEST_NODE = 'PATIENT/STUDY/SERIES/FIELD_EXAM/'
STATIC_TEST = TEST_NODE+'STATIC_TEST/'
STATPAC = STATIC_TEST + 'THRESHOLD_TEST/STATPAC/'
GLOBAL_INDICES = STATPAC + 'GLOBAL_INDICES/'
params = ['TEST_PATTERN', 'TEST_STRATEGY', 'STIMULUS_COLOR', 'STIMULUS_SIZE', 'BACKGROUND_COLOR', 'EXAM_DURATION',
'FIXATION_TARGET' , 'FIXATION_MONITOR', 'BLIND_SPOT_X', 'BLIND_SPOT_Y', 'BLIND_SPOT_STIMULUS_SIZE', 'FALSE_NEGATIVE_METHOD',
'FALSE_NEGATIVE_PERCENT' , 'FALSE_POSITIVE_METHOD', 'FALSE_POSITIVE_PERCENT', 'FIXATION_CHECK/TRIALS', 'FIXATION_CHECK/ERRORS',
'FOVEAL_RESULT' , 'FOVEAL_THRESHOLD', 'CENTRAL_REF_LEVEL', 'THROWN_OUT_POINTS', 'MINIMUM_STIMULUS', 'FIELD_SIZE','LANGUAGE',
'THRESHOLD_TEST/SF_STATUS', 'THRESHOLD_TEST/NUM_THRESHOLD_POINTS']
class VFT(object):
def __init__(self, fname, mat, gi):
self.fname = fname
self.mat = mat # VFT as OS oriented matrix 8x9
self.gi = gi # global index, e.g. VFI
self.dt = fname2dt(fname) # visit time
self.x = None # embedding x-pos
self.y = None # embedding y-pos
def fname2dt(fname):
"""001797-2001-07-17-15-03-30-OD.xml -> 200107171503 """
elems = [int(e) for e in fname.split('-')[1:7]]
return datetime(*elems)
def extract_pattern_deviation(root):
"""Returns pattern deviation as sorted list of form [(x,y,value), ...]"""
xpath = STATPAC + 'PATTERN_DEVIATION_VALUE_LIST/PATTERN_DEV_XY_LOCATION'
vs = [[int(e.text) for e in elem] for elem in root.findall(xpath)]
vs.sort()
return vs
def extract_total_deviation(root):
"""Returns total deviation as sorted list of form [(x,y,value), ...]"""
xpath = STATPAC + 'TOTAL_DEVIATION_VALUE_LIST/TOTAL_DEV_XY_LOCATION'
vs = [[int(e.text) for e in elem] for elem in root.findall(xpath)]
vs.sort()
return vs
def extract_thresholds(root):
"""Returns test thresholds as sorted list of form [(x,y,value), ...]"""
xpath = STATIC_TEST + 'THRESHOLD_TEST/THRESHOLD_SITE_LIST/THRESHOLD_XY_LOCATION'
vs = [[int(e.text) for e in elem] for elem in root.findall(xpath)]
#vs = [(x, y, v) for x, y, r, v in vs] # filter out RESULT_1
vs = [(row[0], row[1], row[3]) for row in vs] # filter out RESULT_1
vs.sort()
return vs
def extract_vft_values(root, kind='THRESHOLD'):
"""VFT values: PATTERN, TOTAL, THRESHOLD"""
if kind == 'PATTERN':
return extract_pattern_deviation(root)
if kind == 'TOTAL':
return extract_total_deviation(root)
if kind == 'THRESHOLD':
return extract_thresholds(root)
raise ValueError('Unknown VFT value kind: ' + kind)
def extract_global_index(root, kind='MD'):
"""Global VFT indices: MD, VFI, PSD"""
xpath = GLOBAL_INDICES + kind
elems = root.findall(xpath)
if not elems: return None
gi = float(elems[0].text)
return gi
def extract_test_params(root):
"""VFT parameters, e.g. TEST_PATTERN, TEST_STRATEGY, ..."""
res = {}
'''
xpath = STATIC_TEST + '*'
elems = root.findall(xpath) + root.findall(xpath+'/FIXATION_CHECK*')
#return {e.tag:int(e.text) for e in elems if e.text.isdigit()}
print(xpath)
for e in elems:
print(e.tag)
if e.text.isdigit():
res[e.tag] = int(e.text)
elif len(e.text) > 1:
#print(e.tag, e.text,type(e.text),'$'*100)
res[e.tag] =e.text
else:
for ee in e:
if ee.tag not in ['QUESTIONS_ASKED','SF']:
if ee.text.isdigit():
res[ee.tag] = int(ee.text)
elif len(ee.text) > 1:
res[ee.tag] = ee.text
'''
for p in params:
xpath = STATIC_TEST + p
el = root.findall(xpath)
if not el:
res[p.split('/')[-1]] =''
elif el[0].text.isdigit():
res[el[0].tag] = int(el[0].text)
else:
res[el[0].tag] = el[0].text
for pth in [DISPLAY_NAME,VISIT_DATE,SERIES_DATE_TIME,TEST_NODE+'PUPIL_DIAMETER',TEST_NODE+'PUPIL_DIAMETER_AUTO',TEST_NODE+'EXAM_TIME']:
e=root.find(pth)
if e.text is None:
res[e.tag] = e.text
else:
if e.text.isdigit():
res[e.tag] = int(e.text)
else:
res[e.tag] = e.text
'''
vkind = ['THRESHOLD', 'TOTAL', 'PATTERN']
for vk in vkind:
vs = extract_vft_values(root, vk)
mat = vf2matrix(vs)
res[vk+'_MATRIX'] = [mat]
'''
return res
def extract_display_name(root):
"""VFT display name, e.g. SS-24-2 Thr"""
elems = root.findall(STATIC_TEST)
return elems[0].text if elems else None
def vf_dimensions(vs):
"""Min and max of VFT test point coordiantes"""
xcoord = lambda v: v[0]
ycoord = lambda v: v[1]
xmin, xmax = min(vs, key=xcoord), max(vs, key=xcoord)
ymin, ymax = min(vs, key=ycoord), max(vs, key=ycoord)
return xmin[0], xmax[0], ymin[1], ymax[1]
def vf2matrix(vs,bg_val):
"""Convert VFT values to matrix"""
c = 3 * 2
vs = [(x // c, y // c, v) for x, y, v in vs]
xmin, xmax, ymin, ymax = vf_dimensions(vs)
mat = np.zeros((ymax - ymin + 1, xmax - xmin + 1)) + bg_val
for x, y, v in vs:
mat[y - ymin, x - xmin] = v
return mat
def read_vft(filepath):
"""Read Visual Field Tests from XML file"""
root = et.parse(filepath).getroot()
return root
def is_25_4(root):
"""Return true if VFT is of type 25-4"""
tp = extract_test_params(root)
p, s = tp['TEST_PATTERN'], tp['TEST_STRATEGY']
return (p, s) == (25, 4)
def normalize(mat,vrange):
"""Normalize to range [0...1]"""
return (np.clip(mat, vrange[0], vrange[1]) - vrange[0])/(vrange[1]-vrange[0])
def normalize_vft(mat):
"""Normalize to range [0...1]"""
return (np.clip(mat, -34, 1)-1)/-35.0
def read_vfts(n, gkind='MD', vkind='THRESHOLD', basepath=BASEPATH):
"""Read n Visual Field Tests from XML files"""
vfts = []
for fpath in sorted(glob(basepath)):#[:n]):
fname = osp.basename(fpath).split('.')[0]
try:
root = read_vft(fpath)
except:
print("can't parse:" + fpath)
continue
gi = extract_global_index(root, gkind)
# fpr = tp.get('FALSE_POSITIVE_PERCENT', 0)
# fnr = tp.get('FALSE_NEGATIVE_PERCENT', 0)
if not is_25_4(root) or gi is None:
continue
vs = extract_vft_values(root, vkind) # PATTERN TOTAL THRESHOLD
mat = vf2matrix(vs)
print(mat)
####mat = mat if fpath.endswith('-OS.xml') else np.fliplr(mat)
assert mat.shape == (8, 9) # (8,9)
vfts.append(VFT(fname, mat, gi))
return vfts, fname, mat, gi
def view_vf(mat,tit):
# fig = plt.figure(figsize=(1.5, 1.5), frameon=False)
# #fig.canvas.window().statusBar().setVisible(False)
# ax = plt.subplot()
# ax.set_axis_off()
# ax.imshow(mat, interpolation='nearest', cmap='gray', vmax=0, vmin=-30)
plt.imshow(mat, interpolation='nearest', cmap='gray', vmax=mat.max(), vmin=mat.min())
plt.title(tit)
plt.show()
def xml_stats(basepath = BASEPATH):
dfObj = None
count = 0
for fpath in tqdm(sorted(glob(basepath))): # [:n]):
fname = osp.basename(fpath).split('.')[0]
try:
root = read_vft(fpath)
except:
print("can't parse:" + fpath)
continue
tp = extract_test_params(root)
#p, s = tp['TEST_PATTERN'], tp['TEST_STRATEGY']
#print('Testing Algorithm', tp, p, s)
tp['Name']=fname
'''
gkind = ['VFI', 'MD', 'PSD']
for gk in gkind:
gi = extract_global_index(root, gk)
tp[gk] = gi
'''
xpath = GLOBAL_INDICES
types = ['MD','MD_PROBABILITY' ,'PSD','PSD_PROBABILITY','VFI','CPSD','CPSD_PROBABILITY','SF_PROBABILITY']
for kind in types:
e = root.findall(xpath+kind)
if not e:
tp[kind] = ''
else:
tp[e[0].tag] = float(e[0].text)
xpath = STATPAC +'GHT'
e = root.findall(xpath)
tp[e[0].tag] = float(e[0].text)
if dfObj is None:
dfObj =
|
pd.DataFrame(tp,index=[0])
|
pandas.DataFrame
|
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, backref
import numpy as np
import pandas as pd
import datetime
def auto_truncate_description(val):
return val[:1024]
def auto_truncate_title(val):
return val[:255]
df_ratings =
|
pd.read_csv("data/reviews_amazon_musical_instruments_small.csv")
|
pandas.read_csv
|
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
import tarfile
from tempfile import NamedTemporaryFile, TemporaryDirectory
from unittest import mock
from nesta.packages.crunchbase.crunchbase_collect import rename_uuid_columns
from nesta.packages.crunchbase.crunchbase_collect import process_orgs
from nesta.packages.crunchbase.crunchbase_collect import bool_convert
from nesta.packages.crunchbase.crunchbase_collect import process_non_orgs
from nesta.packages.crunchbase.crunchbase_collect import crunchbase_tar
from nesta.packages.crunchbase.crunchbase_collect import get_csv_list
from nesta.packages.crunchbase.crunchbase_collect import get_files_from_tar
@pytest.fixture
def crunchbase_tarfile():
tar = NamedTemporaryFile(suffix='.tar.gz')
print(tar.name)
temp_tar = tarfile.open(tar.name, mode='w:gz')
with TemporaryDirectory() as temp_dir:
# add 3 test csv files
for i in range(3):
with open(f'{temp_dir}/test_{i}.csv', mode='w') as f:
f.write("id,data\n111,aaa\n222,bbb")
temp_tar.add(f.name, f'test_{i}.csv') # rename to remove temp folder structure from filename
temp_tar.close()
yield tar
tar.close()
@mock.patch('nesta.packages.crunchbase.crunchbase_collect.NamedTemporaryFile')
@mock.patch('nesta.packages.crunchbase.crunchbase_collect.requests.get')
def test_crunchbase_tar(mocked_requests, mocked_temp_file, crunchbase_tarfile):
mocked_temp_file().__enter__.return_value = crunchbase_tarfile
mocked_temp_file().__exit__.side_effect = lambda *args: crunchbase_tarfile.close()
crunchbase_tarfile.write = lambda x: None # patch write method to do nothing
with crunchbase_tar() as test_tar:
assert type(test_tar) == tarfile.TarFile
assert test_tar.getnames() == ['test_0.csv', 'test_1.csv', 'test_2.csv']
@mock.patch('nesta.packages.crunchbase.crunchbase_collect.crunchbase_tar')
def test_get_csv_list(mocked_crunchbase_tar, crunchbase_tarfile):
mocked_crunchbase_tar.return_value = tarfile.open(crunchbase_tarfile.name)
expected_result = ['test_0', 'test_1', 'test_2']
assert get_csv_list() == expected_result
@mock.patch('nesta.packages.crunchbase.crunchbase_collect.crunchbase_tar')
def test_get_files_from_tar(mocked_crunchbase_tar, crunchbase_tarfile):
mocked_crunchbase_tar.return_value = tarfile.open(crunchbase_tarfile.name)
expected_result = pd.DataFrame({'id': [111, 222], 'data': ['aaa', 'bbb']})
dfs = get_files_from_tar(['test_0'])
assert type(dfs) == list
assert_frame_equal(dfs[0], expected_result, check_like=True)
@mock.patch('nesta.packages.crunchbase.crunchbase_collect.crunchbase_tar')
def test_get_files_from_tar_limits_rows(mocked_crunchbase_tar, crunchbase_tarfile):
mocked_crunchbase_tar.return_value = tarfile.open(crunchbase_tarfile.name)
expected_result = pd.DataFrame({'id': [111], 'data': ['aaa']})
dfs = get_files_from_tar(['test_0'], nrows=1) # only return 1 row
assert_frame_equal(dfs[0], expected_result, check_like=True)
def test_rename_uuid_columns():
test_df = pd.DataFrame({'uuid': [1, 2, 3],
'org_uuid': [11, 22, 33],
'other_id': [111, 222, 333]
})
expected_result = pd.DataFrame({'id': [1, 2, 3],
'other_id': [111, 222, 333],
'org_id': [11, 22, 33]
})
assert_frame_equal(rename_uuid_columns(test_df), expected_result, check_like=True)
def test_bool_convert():
assert bool_convert('t') is True
assert bool_convert('f') is False
assert bool_convert('aaa') is None
assert bool_convert(None) is None
@pytest.fixture
def generate_test_data():
def _generate_test_data(n):
return [{'data': 'foo', 'other': 'bar'} for i in range(n)]
return _generate_test_data
class TestProcessOrgs():
@staticmethod
@pytest.fixture
def valid_org_data():
return pd.DataFrame({'uuid': ['1-1', '2-2', '3-3'],
'country_code': ['FRA', 'DEU', 'GBR'],
'category_list': ['Data,Digital,Cats', 'Science,Cats', 'Data'],
'category_groups_list': ['Groups', 'More groups', 'extra group'],
'city': ['Paris', 'Berlin', 'London']
})
@staticmethod
@pytest.fixture
def invalid_org_data():
return pd.DataFrame({'uuid': ['1-1', '2-2', '3-3'],
'country_code': ['FRI', 'DEU', 'GBR'],
'category_list': ['Data,Digital,Dogs', 'Science,Cats,Goats', pd.np.nan],
'category_groups_list': ['Groups', 'More groups', 'extra group'],
'city': [None, 'Berlin', 'London']
})
@staticmethod
@pytest.fixture
def existing_orgs():
return {'2-2', '3-3'}
@staticmethod
@pytest.fixture
def no_existing_orgs():
return set()
@staticmethod
@pytest.fixture
def valid_cat_groups():
return pd.DataFrame({'id': ['A', 'B', 'C', 'D'],
'name': ['data', 'digital', 'cats', 'science'],
'category_groups_list': ['Group', 'Groups', 'Grep', 'Grow']
})
@staticmethod
@pytest.fixture
def valid_org_descs():
return pd.DataFrame({'uuid': ['3-3', '2-2', '1-1'],
'description': ['org three', 'org two', 'org one']
})
@staticmethod
@pytest.fixture
def invalid_org_descs():
return pd.DataFrame({'uuid': ['3-3', '2-2'],
'description': ['org three', 'org two']
})
def test_process_orgs_renames_uuid_column(self, valid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs):
processed_orgs, _, _ = process_orgs(valid_org_data, no_existing_orgs, valid_cat_groups, valid_org_descs)
processed_orgs = pd.DataFrame(processed_orgs)
assert 'id' in processed_orgs
assert 'uuid' not in processed_orgs
def test_process_orgs_correctly_applies_country_name(self, valid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs):
processed_orgs, _, _ = process_orgs(valid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs)
processed_orgs = pd.DataFrame(processed_orgs)
expected_result = pd.Series(['France', 'Germany', 'United Kingdom'])
assert_series_equal(processed_orgs['country'], expected_result, check_names=False)
def test_process_orgs_generates_location_id_composite_keys(self, valid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs):
processed_orgs, _, _ = process_orgs(valid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs)
processed_orgs = pd.DataFrame(processed_orgs)
expected_result = pd.Series(['paris_france', 'berlin_germany', 'london_united-kingdom'])
assert_series_equal(processed_orgs.location_id, expected_result, check_names=False)
def test_process_orgs_inserts_none_if_composite_key_fails(self, invalid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs):
processed_orgs, _, _ = process_orgs(invalid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs)
processed_orgs = pd.DataFrame(processed_orgs)
expected_result = pd.Series([None, 'berlin_germany', 'london_united-kingdom'])
assert_series_equal(processed_orgs.location_id, expected_result, check_names=False)
def test_process_orgs_generates_org_cats_link_table(self, valid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs):
_, org_cats, _ = process_orgs(valid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs)
expected_result = [{'organization_id': '1-1', 'category_name': 'data'},
{'organization_id': '1-1', 'category_name': 'digital'},
{'organization_id': '1-1', 'category_name': 'cats'},
{'organization_id': '2-2', 'category_name': 'science'},
{'organization_id': '2-2', 'category_name': 'cats'},
{'organization_id': '3-3', 'category_name': 'data'}
]
assert org_cats == expected_result
def test_process_orgs_returns_missing_cat_groups(self, invalid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs):
_, org_cats, missing_cat_groups = process_orgs(invalid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs)
expected_org_cats = [{'organization_id': '1-1', 'category_name': 'data'},
{'organization_id': '1-1', 'category_name': 'digital'},
{'organization_id': '1-1', 'category_name': 'dogs'},
{'organization_id': '2-2', 'category_name': 'science'},
{'organization_id': '2-2', 'category_name': 'cats'},
{'organization_id': '2-2', 'category_name': 'goats'}
]
missing_cats = {c['name'] for c in missing_cat_groups}
expected_missing_cat_groups = {'dogs', 'goats'}
assert org_cats == expected_org_cats
assert missing_cats == expected_missing_cat_groups
def test_process_orgs_appends_long_descriptions(self, valid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs):
processed_orgs, _, _ = process_orgs(valid_org_data, no_existing_orgs,
valid_cat_groups, valid_org_descs)
processed_orgs = pd.DataFrame(processed_orgs)
expected_result = pd.DataFrame({'id': ['1-1', '2-2', '3-3'],
'long_description': ['org one', 'org two', 'org three']})
|
assert_frame_equal(processed_orgs[['id', 'long_description']], expected_result, check_like=True)
|
pandas.testing.assert_frame_equal
|
from Calculatefunction import*
import pandas as pd
df=pd.read_excel("/Users/dingding/Desktop/sample5.9.xlsx")
grbname=df['grb']
z=df['z']
ep=149.804392
s=df['s']
#GRBname z ep[kev] s[erg/cm^-2] luminositydistance[cm] egamma[erg] eiso[erg] kcorrection seita[degree]
alpha=-1
beta=-2.5
grbname1=[]
z1=[]
alpha1=[]
beta1=[]
s1=[]
luminositydistance1=[]
egamma1=[]
eiso1=[]
k1=[]
seita1=[]
for i in range(296):
seita1=np.append(seita1,seita(float(z[i]),float(ep),float(s[i]),float(alpha),float(beta),15,350))
grbname1=np.append(grbname1,grbname[i])
z1=np.append(z1,z[i])
alpha1=np.append(alpha1,alpha)
beta1=np.append(beta1,beta)
s1=np.append(s1,s[i])
luminositydistance1=np.append(luminositydistance1,dl(z[i]))
egamma1=np.append(egamma1,egamma(z[i],ep))
eiso1=np.append(eiso1,eiso(float(z[i]),float(ep),float(s[i]),float(alpha),float(beta),15,350))
k1=np.append(k1,k(ep,z[i],alpha,beta,15,350))
print(seita1)
dataframename=pd.DataFrame(grbname1)
dataframename.to_csv('/users/dingding/desktop/grbname.csv',sep=',')
dataframename=pd.DataFrame(z1)
dataframename.to_csv('/users/dingding/desktop/z.csv',sep=',')
dataframename=pd.DataFrame(s1)
dataframename.to_csv('/users/dingding/desktop/s.csv',sep=',')
dataframename=pd.DataFrame(luminositydistance1)
dataframename.to_csv('/users/dingding/desktop/ld.csv',sep=',')
dataframename=
|
pd.DataFrame(egamma1)
|
pandas.DataFrame
|
from ib_insync import *
import numpy as np
import pandas as pd
import logging
import pickle
from config import *
ib = IB()
ib.connect('127.0.0.1', 7497, clientId=1)
logger = logging.getLogger(__name__)
class Portfolio:
def __init__(self,end_date='',duration='1 Y', sample_freq='W-MON'):
self.stocks = PORTFOLIO.keys()
self.failed = []
self.end_date = end_date
self.duration = duration
self.sample_freq = sample_freq
def import_data_from_IB(self, code):
contract = Stock(code, 'SMART', 'USD')
contract.primaryExchange = "NASDAQ"
bars = ib.reqHistoricalData(
contract, endDateTime='', durationStr=self.duration,
barSizeSetting='1 day', whatToShow='MIDPOINT', useRTH=True)
# convert to pandas dataframe:
df = util.df(bars)
if df is not None:
df = df.rename(columns={"close": code}) #close price is used
df.index=df['date']
return df[[code]]
else:
self.failed.append(code)
logger.info('-------------------\n "%s" not found in NASDAQ'%(code))
return pd.DataFrame()
def import_data_from_yahoo(self):
pass
def run(self, save_to=None):
df = [self.import_data_from_IB(stock) for stock in self.stocks]
df =
|
pd.concat(df,axis=1)
|
pandas.concat
|
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import codecademylib3_seaborn
import matplotlib.pyplot as plt
import pandas as pd
# Extract data from CSV file
streeteasy =
|
pd.read_csv("https://raw.githubusercontent.com/sonnynomnom/Codecademy-Machine-Learning-Fundamentals/master/StreetEasy/manhattan.csv")
|
pandas.read_csv
|
import os
import pandas as pd
from simulator import Simulator
def generate_data_csv(
path, days=365 * 3, n_wells=50, n_technicians=5, seed=17, datadrift_example=False,
):
print("generating data")
sim = Simulator(n_wells=n_wells, n_technicians=n_technicians, seed=seed,)
# get one year's worth:
times = []
wells = [[] for _ in range(n_wells)]
simdays = 28 if datadrift_example else days
for d in range(simdays):
print("simulating data for day", d, end="\r")
for h in range(24):
sim.step_hour()
state = sim.get_state()
times.append(state["time"])
for well in state["wells"]:
wells[well["id"]].append(well)
# for each well, get time to failure etc.
print()
first = True
for well in wells:
print("processing well", well[0]["id"], end="\r")
df =
|
pd.DataFrame(well)
|
pandas.DataFrame
|
import thunderbolt
import unittest
from os import path
import pandas as pd
import pickle
"""
requires:
python sample.py test.TestCaseTask --param=sample --number=1 --workspace-directory=./test_case --local-scheduler
running:
python -m unittest discover -s ./
"""
class SimpleLocalTest(unittest.TestCase):
def setUp(self):
self.tb = thunderbolt.Thunderbolt(self.get_test_case_path())
def test_init(self):
self.assertEqual(self.tb.client.workspace_directory, self.get_test_case_path())
task = self.tb.tasks[0]
self.assertEqual(task['task_name'], 'TestCaseTask')
self.assertEqual(task['task_hash'], 'c5b4a28a606228ac23477557c774a3a0')
self.assertListEqual(task['task_log']['file_path'], ['./test_case/sample/test_case_c5b4a28a606228ac23477557c774a3a0.pkl'])
self.assertDictEqual(task['task_params'], {'param': 'sample', 'number': '1'})
def get_test_case_path(self, file_name: str = ''):
p = path.abspath(path.join(path.dirname(__file__), 'test_case'))
if file_name:
return path.join(p, file_name)
return p
def test_get_task_df(self):
df = self.tb.get_task_df(all_data=True)
df = df.drop('last_modified', axis=1)
target_df = pd.DataFrame([{
'task_id': 0,
'task_name': 'TestCaseTask',
'task_params': {
'param': 'sample',
'number': '1'
},
'task_hash': 'c5b4a28a606228ac23477557c774a3a0',
'task_log': {
'file_path': ['./test_case/sample/test_case_c5b4a28a606228ac23477557c774a3a0.pkl']
}
}])
|
pd.testing.assert_frame_equal(df, target_df)
|
pandas.testing.assert_frame_equal
|
import json
import re
from datetime import datetime
from typing import Dict, List, Tuple, Union
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta
import StocksMA.utils as utils
def get_tickers() -> None:
"""Show available tickers with the full
name of the company
"""
for ticker, name in utils.COMPANIES.items():
print(ticker, "/", name[0])
def get_isin(company: str) -> Tuple:
"""Get International Securities Identification Number(ISIN)
of a given Moroccan company
Args:
company (str): Company name or ticker symbol(e.g. 'maroc telecom', 'MNG')
Raises:
ValueError: Company must be defined not empty
Exception: Company cannot be found
Exception: Found several companies with the same name
Returns:
Tuple: (Company full name, Company ISIN)
"""
if not company:
raise ValueError("Company must be defined not empty")
if company.upper() in utils.COMPANIES.keys():
company = utils.COMPANIES[company.upper()][1]
url = (
"https://www.leboursier.ma/api?method=searchStock&format=json&search=" + company
)
request_data = utils.get_request(url)
# r.encoding='utf-8-sig'
result = json.loads(request_data.content)["result"]
len_result = len(result)
if len_result == 0 or (len_result == 1 and len(result[0]["isin"]) == 0):
raise Exception(
f"Company {company} cannot be found, use get_tickers() to get a list of available tickers"
)
elif len_result > 1:
names = [
n["name"]
for n in result
if n["name"] in [t[1] for t in utils.COMPANIES.values()]
]
if company.upper() in map(str.upper, names) or len(names) == 1:
return result[0]["name"], result[0]["isin"]
else:
raise Exception(
f"Found several companies with the same name {company} \n {names}"
)
else:
return result[0]["name"], result[0]["isin"]
T_ed = Union[str, None]
def get_data_stock(company: str, start_date: str, end_date: T_ed) -> pd.DataFrame:
"""Get historical OHLCV data for a given symbol
Args:
company (str): Company name or ticker symbol(e.g. 'maroc telecom', 'MNG')
start_date (str): (YYYY-MM-DD) Starting date to pull data from, limited to a maximum of six year
end_date (T_ed): (YYYY-MM-DD) Ending date
Returns:
pd.DataFrame: Dataframe of historical OHLCV data
"""
name, isin = get_isin(company)
url = (
"https://www.leboursier.ma/api?method=getStockOHLC&ISIN="
+ isin
+ "&format=json"
)
request_data = utils.get_request(url)
data = json.loads(request_data.content)
data = pd.DataFrame(
data["result"], columns=["Date", "Open", "High", "Low", "Close", "Volume"]
)
data.index = pd.to_datetime(
data.Date.apply(lambda x: datetime.fromtimestamp(x / 1000.0).date())
)
data = data.loc[lambda x: (start_date <= x.index) & (x.index <= end_date)]
data.set_index(
|
pd.MultiIndex.from_product([[name], data.index], names=["Company", "Date"])
|
pandas.MultiIndex.from_product
|
#importing libs.
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import os
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
from scipy.stats import multivariate_normal
from scipy import stats
sns.set_style("white")
#The data was loaded from the Excel file provided by SOA, some columns that we do not need in this analysis were removed
#You can download data from github
os.chdir('C:\\Users\\M\\Desktop\\dataproj')
#our data
data1=
|
pd.read_excel('final_data1.xlsx')
|
pandas.read_excel
|
"""
sess_pupil_util.py
This module contains functions for extracting pupil diameter information from
the pupil tracking data generated by the Allen Institute OpenScope experiments
for the Credit Assignment Project.
Authors: <NAME>
Date: July, 2019
Note: this code uses python 3.7.
"""
import copy
import logging
from pathlib import Path
import numpy as np
import pandas as pd
from util import logger_util
from sess_util import sess_file_util
logger = logging.getLogger(__name__)
#############################################
def get_center_dist_diff(center_x, center_y):
"""
get_center_dist_diff(center_x, center_y)
Returns the change in pupil center between each pupil frame. All in pixels.
Required args:
- center_x (1D array): pupil center position in x at each pupil frame
- center_y (1D array): pupil center position in y at each pupil frame
Returns:
- center_dist_diff (1D array): change in pupil center between each
pupil frame
"""
center = np.stack([center_x, center_y])
center_diff = np.diff(center, axis=1)
center_dist_diff = np.sqrt(center_diff[0]**2 + center_diff[1]**2)
return center_dist_diff
#############################################
def _diam_no_blink(diam, thr=5):
"""
_diam_no_blink(diam)
Returns the diameter without large deviations likely caused by blinks
Required args:
- diam (1D array): array of diameter values
Optional args:
- thr (num): threshold diameter to identify blinks
default: 5
Returns:
- nan_diam (1D array): array of diameter values with aberrant values
removed
"""
nan_diam = copy.deepcopy(diam)
# Find aberrant blocks
diam_diff = np.append(0, np.diff(diam))
diam_thr = np.where(np.abs(diam_diff) > thr)[0]
diam_thr_diff = np.append(1, np.diff(diam_thr))
if len(diam_thr) == 0:
return nan_diam
diff_thr = 10 # how many consecutive frames should be non-aberrant
searching = True
i = 0
while(searching):
left = diam_thr[i]
w = np.where(diam_thr_diff[i + 1:] > diff_thr)[0]
if w.size: # i.e., non-empty array
right_i = np.min(w + i + 1) - 1
right = diam_thr[right_i]
else:
right = diam_thr[-1]
searching = False
i = right_i + 1
nan_diam[left:right + 1] = np.nan
return nan_diam
#############################################
def _eye_diam_center(df, thr=5):
"""
_eye_diam_center(df)
Returns the approximated pupil diameter, center, and frame-by-frame
center differences (approximate derivative). All in pixels.
Required args:
- data (pd DataFrame): dataframe with the following columns
("coords", "bodyparts", ordered by frame numbers)
Optional args:
- thr (num): threshold diameter to identify blinks
default: 5
Returns:
- nan_diam (1D array) : array of diameter values with aberrant
values removed
- center (2D array) : pupil center position at each pupil
frame, structured as
frame x coord (x, y)
- center_dist_diff (1D array): change in pupil center between each
pupil frame
"""
pupil = "--pupil"
pup_df = df.loc[(df["bodyparts"].str.contains(pupil))]
ds = [None, None]
all_vals = [None, None]
coords = ["x", "y"]
for c, coord in enumerate(coords):
coord_df = pup_df.loc[(pup_df["coords"].str.match(coord))]
col = [col_name.replace(pupil, "")
for col_name in coord_df["bodyparts"].tolist()]
# Remove "bodyparts" and "coords" columns
coord_df.pop("bodyparts")
coord_df.pop("coords")
vals = coord_df.to_numpy("float")
diffs = [["left", "right"], ["top", "bottom"],
["lower-left", "upper-right"], ["upper-left", "lower-right"]]
diff_vals = np.empty([vals.shape[1], len(diffs)])
# pairwise distances between points furthest apart (see diffs for pairs)
for d, diff in enumerate(diffs):
diff_vals[:, d] = np.abs(
vals[col.index(diff[0]), :] - vals[col.index(diff[1]), :])
ds[c] = diff_vals
all_vals[c] = vals
[dx, dy] = ds
[x, y] = all_vals
# find diameters (frames x diams (4))
diams = np.sqrt(dx**2 + dy**2)
median_diam = np.median(diams, axis=1)
nan_diam = _diam_no_blink(median_diam, thr)
# find centers and frame-to-frame differences
center = np.transpose([np.mean(x, axis=0), np.mean(y, axis=0)])
center_dist_diff = get_center_dist_diff(center[:, 0], center[:, 1])
nan_idx = np.where(np.isnan(nan_diam))[0]
center[nan_idx] = np.nan
nan_idx = np.where(np.isnan(np.diff(nan_diam)))[0]
center_dist_diff[nan_idx] = np.nan
return nan_diam, center, center_dist_diff
#############################################
def create_pup_h5(source_df_path, sessid, mouseid, date, savedir=".", thr=5,
log=True):
"""
create_pup_h5(source_df_path, sessid, mouseid, date)
Creates a pupil dataframe from a source dataframe, and saved as an h5 file.
Required args:
- source_df_path (Path): path to source dataframe, saved as a csv file,
and containing the following columns
("coords", "bodyparts", ordered by frame
numbers)
- sessid (int or str) : session ID to generate pup_h5_name from.
- mouseid (int or str) : mouse ID to generate pup_h5_name from.
- date (int or str) : date (YYYYMMDD) to generate pup_h5_name from.
Optional args:
- savedir (Path): main directory in which to save pupil data file.
default: "."
- thr (num) : threshold diameter to identify blinks
default: 5
- log (bool) : if True, target save path is logged to the console
default: True
"""
if log:
logger.info(f"Creating pupil data dataframe...")
df = pd.read_csv(source_df_path, index_col=0, dtype=object)
if "bodyparts" in df.index:
df = df.transpose()
nan_diam, center, _ = _eye_diam_center(df, thr=thr)
new_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Postprocessing module of the SQL-database of the Inland navigation tool BIVAS.
It will help in geting more specific results out of this huge database.
<NAME>, Deltares
"""
import pandas as pd
import networkx as nx
import sqlite3
import numpy as np
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
from shapely.geometry import Point, LineString
import geopandas
except:
logger.warning('Loading of shapely/geopandas failed. Geometric functions will not work')
class pyBIVAS:
"""
This class helps in postprocessing BIVAS data
"""
NSTR_shortnames = {
-2: 'Onbekend',
-1: 'Leeg (geen lading)',
0: '0 - Landbouw',
1: '1 - Voeding en vee',
2: '2 - Mineralen',
3: '3 - Aardolie',
4: '4 - Ertsen',
5: '5 - Staal',
6: '6 - Bouwmaterialen',
7: '7 - Meststoffen',
8: '8 - Chemische prod.',
9: '9 - Goederen'
}
appeareance_rename = {
'Overig': 'Leeg'
}
compute_route_statistics = """
SUM(trips.NumberOfTrips) AS "Aantal Vaarbewegingen (-)",
SUM(trips.TotalWeight__t * trips.NumberOfTrips) AS "Totale Vracht (ton)",
SUM(trips.TwentyFeetEquivalentUnits * trips.NumberOfTrips) AS "Totale TEU (-)",
SUM(route_statistics.TravelTime__min * trips.NumberOfTrips) AS "Totale Reistijd (min)",
SUM(route_statistics.VariableTimeCosts__Eur * trips.NumberOfTrips) + SUM(route_statistics.VariableDistanceCosts__Eur * trips.NumberOfTrips) + SUM(route_statistics.FixedCosts__Eur * trips.NumberOfTrips) AS "Totale Vaarkosten (EUR)",
SUM(route_statistics.VariableTimeCosts__Eur * trips.NumberOfTrips) + SUM(route_statistics.VariableDistanceCosts__Eur * trips.NumberOfTrips) AS "Totale Variabele Vaarkosten (EUR)",
SUM(route_statistics.VariableTimeCosts__Eur * trips.NumberOfTrips) AS "Totale Variabele-Tijd Vaarkosten (EUR)",
SUM(route_statistics.VariableDistanceCosts__Eur * trips.NumberOfTrips) AS "Totale Variabele-Afstand Vaarkosten (EUR)",
SUM(route_statistics.FixedCosts__Eur * trips.NumberOfTrips) AS "Totale Vaste Vaarkosten (EUR)",
SUM(route_statistics.Distance__km * trips.NumberOfTrips) AS "Totale Afstand (km)",
SUM((trips.TotalWeight__t * route_statistics.Distance__km) * trips.NumberOfTrips) AS "Totale TonKM (TONKM)"
"""
directions_dutch = {
'Downstream': 'Benedenstrooms',
'South-East': 'Zuid-Oost',
'South-West': 'Zuid-West',
'West': 'West',
'South': 'Zuid',
'North': 'Noord',
'East': 'Oost',
'North-East': 'Noord-Oost',
'North-West': 'Noord-West',
'Upstream': 'Bovenstrooms'
}
def __init__(self, databasefile=None):
"""
Initialise class
"""
if databasefile:
self.connectToSQLiteDatabase(databasefile)
def __repr__(self):
return f'pyBIVAS connection to: {self.databasefile}'
def connectToMySQLDatabase(self, host='localhost', user='root', password='', db='bivas'):
"""
Connect to MySQL database
By default connects to localhost
"""
import pymysql
self.connection = pymysql.connect(host=host,
user=user,
password=password,
db=db,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
return self.connection
def connectToSQLiteDatabase(self, databasefile):
"""
Connect to sqlite3 databasefile (.db)
"""
logger.info('Loading database: {}'.format(databasefile))
self.databasefile = Path(databasefile)
assert self.databasefile.exists(), 'Database does not exist'
self.connection = sqlite3.connect(self.databasefile)
return self.connection
def set_scenario(self, scenario=None):
"""
Set scenario to perform analysis
An int will be assumed to be the id
A string will be assumed to be the name
"""
scenarioOverview = self.scenario_parameters()
if isinstance(scenario, int):
self.scenarioID = scenario
self.scenarioName = scenarioOverview.loc[self.scenarioID, 'Name']
elif isinstance(scenario, str):
self.scenarioName = scenario
self.scenarioID = scenarioOverview[scenarioOverview['Name'] == self.scenarioName].index[0]
else:
finishedruns = scenarioOverview['Locked'] == 1
if finishedruns.any():
self.scenarioID = scenarioOverview[finishedruns].index[0]
self.scenarioName = scenarioOverview.loc[self.scenarioID, 'Name']
logger.info(f'ScenarioID not given. Assuming scenario with output: {self.scenarioID} - {self.scenarioName}')
else:
self.scenarioID = scenarioOverview.index[0]
self.scenarioName = scenarioOverview.loc[self.scenarioID, 'Name']
logger.info(f'ScenarioID not given. Assuming scenario without output: {self.scenarioID} - {self.scenarioName}')
self.trafficScenario = scenarioOverview.loc[self.scenarioID,
'TrafficScenarioID']
self.WaterScenarioID = scenarioOverview.loc[self.scenarioID,
'WaterScenarioID']
self.ReferenceTripSetID = scenarioOverview.loc[self.scenarioID,
'ReferenceTripSetID']
# Basic lists
def scenario_parameters(self):
"""Overview of all scenarios with parameters"""
sql = """
SELECT *
FROM scenarios
JOIN branching$branch_sets ON scenarios.ID = branching$branch_sets.BranchID
JOIN parameters ON branching$branch_sets.ID = parameters.BranchSetID
ORDER BY scenarios.ID
"""
df = self.sql(sql)
df = df.set_index('ID')
return df
def appearancetypes(self, rename_to_Leeg=True):
sql = """SELECT * FROM appearance_types ORDER BY Id"""
appearance_types = self.sql(sql).set_index('ID')
if rename_to_Leeg:
appearance_types.replace({'Description': self.appeareance_rename}, inplace=True)
return appearance_types
def CEMTclass(self):
sql = """SELECT * FROM cemt_class ORDER BY Id"""
cemt_class = self.sql(sql).set_index('Id')
return cemt_class
def shiptypes(self):
sql = """
SELECT ship_types.*, cemt_class.Description
FROM ship_types
LEFT JOIN cemt_class ON CEMTTypeID=cemt_class.ID
ORDER BY CEMTTypeID, Id"""
ship_types = self.sql(sql).set_index('ID')
return ship_types
def trafficscenario_numberoftrips(self):
"""Count trips per traffic scenario"""
sql = """
SELECT traffic_scenarios.ID, traffic_scenarios.Description, count(*)
FROM trips
LEFT JOIN traffic_scenarios ON TrafficScenarioID = traffic_scenarios.ID
GROUP BY TrafficScenarioID"""
df = self.sql(sql)
df.set_index('ID', inplace=True)
return df
def trafficscenario_timeseries(self):
"""Trips in trafficScenario per date"""
sql = """
SELECT DATE(trips.DateTime) AS date,
COUNT(*) AS nTrips,
AVG(TotalWeight__t) as AvgTotalWeight__t,
AVG(NumberOfTrips) as AvgNumberOfTrips
FROM trips
LEFT JOIN traffic_scenarios ON TrafficScenarioID = traffic_scenarios.ID
WHERE traffic_scenarios.ID = '{0}'
GROUP BY DATE(trips.DateTime)
""".format(self.trafficScenario)
df = self.sql(sql)
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
return df
def trips_timeseries(self):
"""Trips in scenario per date"""
sql = """
SELECT DATE(DateTime) AS date,
count(*) AS nTrips,
AVG(TotalWeight__t) as AvgTotalWeight__t,
SUM(TotalWeight__t * NumberOfTrips) as SumTotalWeight__t,
SUM(NumberOfTrips) as SumNumberOfTrips
FROM trips_{0}
GROUP BY DATE(DateTime)
""".format(self.scenarioID)
df = self.sql(sql)
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
return df
def trips_details(self):
"""Get all trip properties"""
sql = """
SELECT trips.*,
ship_types.Label,
ship_types.Description,
nstr_mapping.Description AS nstr_description,
appearance_types.Description AS appear_description,
dangerous_goods_levels.Description AS dangerous_description
FROM trips_{0} AS trips
LEFT JOIN ship_types ON trips.ShipTypeID = ship_types.ID
LEFT JOIN nstr_mapping ON trips.NstrGoodsClassification = nstr_mapping.GroupCode
LEFT JOIN appearance_types ON trips.AppearanceTypeID = appearance_types.ID
LEFT JOIN dangerous_goods_levels ON trips.DangerousGoodsLevelID = dangerous_goods_levels.ID
""".format(self.scenarioID)
df = self.sql(sql)
df = df.set_index('ID')
df['DateTime'] = pd.to_datetime(df['DateTime'])
df = df.drop(['SeasonID', 'ShipTypeID', 'DangerousGoodsLevelID', 'LoadTypeID'], axis=1)
return df
def trips_statistics(self, tripsArray: list):
"""
Get route statistics for an array of given trips
"""
listOfTrips = ",".join(str(t) for t in tripsArray)
sql = f"""
SELECT trips.ID,
{self.compute_route_statistics}
FROM trips_{self.scenarioID} AS trips
LEFT JOIN route_statistics_{self.scenarioID} AS route_statistics ON route_statistics.TripID = trips.ID
WHERE trips.ID IN ({listOfTrips})
GROUP BY trips.ID
"""
df2 = self.sql(sql)
df2 = df2.set_index('ID')
return df2
def routestatistics_advanced(self, group_by=['Vorm', 'NSTR']):
"""
Advanced query for requesting all results in combinations of groupable parameters. The following groupings are possible:
- TripsID: Generate statistics for all individual trips
- Days: Generate output on daily frequency
- NSTR: Classification of loads
- Vorm: Classification of appearance type
- Origin_Node: Group by node of origin port
- Destination_Node: Group by node of destination port
- Origin_Zone: Group by Zone area of origins
- Destination_Zone: Group by Zone area of destinations
"""
sql_select = ''
sql_groupby = ''
sql_leftjoin = ''
sql_where = ''
include_all_columns = False
if not group_by:
sql_groupby = 'Null'
else:
if 'TripsID' in group_by:
sql_select += 'trips.ID AS "TripsID",'
sql_groupby += 'trips.ID, '
include_all_columns = True
if 'Days' in group_by or include_all_columns:
sql_select += 'DATE(trips.DateTime) AS "Days",'
sql_groupby += 'Days, '
if 'NSTR' in group_by or include_all_columns:
sql_select += 'nstr_mapping.GroupCode AS "NSTR",'
sql_groupby += 'NstrGoodsClassification, '
sql_leftjoin += 'LEFT JOIN nstr_mapping ON trips.NstrGoodsClassification = nstr_mapping.GroupCode '
if 'NST2007' in group_by or include_all_columns:
sql_select += 'nst2007_mapping.Id || "] " || nst2007_mapping.GroupCode || " - " || nst2007_mapping.Description AS "NST2007",'
sql_groupby += 'Nst2007GoodsClassification, '
sql_leftjoin += 'LEFT JOIN nst2007_mapping ON trips.Nst2007GoodsClassification = nst2007_mapping.Id '
if 'Vorm' in group_by or include_all_columns:
sql_select += 'appearance_types.Description AS "Vorm",'
sql_groupby += 'trips.AppearanceTypeID, '
sql_leftjoin += 'LEFT JOIN appearance_types ON trips.AppearanceTypeID = appearance_types.ID '
if 'Origin_Node' in group_by or include_all_columns:
sql_select += 'trips.OriginTripEndPointNodeID AS "Origin_Node",'
sql_select += 'nodes_origin.XCoordinate AS "Origin_X",'
sql_select += 'nodes_origin.YCoordinate AS "Origin_Y",'
sql_groupby += 'trips.OriginTripEndPointNodeID, '
sql_leftjoin += 'LEFT JOIN nodes AS nodes_origin ON trips.OriginTripEndPointNodeID = nodes_origin.ID '
if 'Destination_Node' in group_by or include_all_columns:
sql_select += 'trips.DestinationTripEndPointNodeID AS "Destination_Node",'
sql_select += 'nodes_destination.XCoordinate AS "Destination_X",'
sql_select += 'nodes_destination.YCoordinate AS "Destination_Y",'
sql_groupby += 'trips.DestinationTripEndPointNodeID, '
sql_leftjoin += 'LEFT JOIN nodes AS nodes_destination ON trips.DestinationTripEndPointNodeID = nodes_destination.ID '
if 'Origin_Zone' in group_by or include_all_columns:
zone_definition_id = 9
sql_leftjoin += 'LEFT JOIN zone_node_mapping AS znm_Origin ON trips.OriginTripEndPointNodeID = znm_Origin.NodeID '
sql_leftjoin += 'LEFT JOIN zones AS zones_origin ON znm_Origin.ZoneID = zones_origin.ID '
sql_groupby += 'zones_origin.ID, '
sql_select += 'zones_origin.Name AS Origin_Zone, '
sql_where += f' zones_origin.ZoneDefinitionID = {zone_definition_id} AND znm_Origin.ZoneDefinitionID = {zone_definition_id} AND '
if 'Destination_Zone' in group_by or include_all_columns:
zone_definition_id = 9
sql_leftjoin += 'LEFT JOIN zone_node_mapping AS znm_Destination ON trips.DestinationTripEndPointNodeID = znm_Destination.NodeID '
sql_leftjoin += 'LEFT JOIN zones AS zones_destination ON znm_Destination.ZoneID = zones_destination.ID '
sql_groupby += 'zones_destination.ID, '
sql_select += 'zones_destination.Name AS Destination_Zone, '
sql_where += f' zones_destination.ZoneDefinitionID = {zone_definition_id} AND znm_Destination.ZoneDefinitionID = {zone_definition_id} AND '
sql_groupby = sql_groupby[:-2]
sql_where = sql_where[:-5]
if include_all_columns:
sql_select += f"""
trips.*,
route_statistics.*,
ship_types.Label AS ship_types_Label,
ship_types.Description AS ship_types_Description,
cemt_class.ID AS cemt_class_ID,
cemt_class.Description AS cemt_class_Description,
nst2007_mapping.*,
dangerous_goods_levels.Description AS dangerous_goods_levels_Description,"""
sql_leftjoin += f"""
LEFT JOIN ship_types ON trips.ShipTypeID = ship_types.ID
LEFT JOIN cemt_class ON ship_types.CEMTTypeID = cemt_class.Id
LEFT JOIN dangerous_goods_levels ON trips.DangerousGoodsLevelID = dangerous_goods_levels.ID
LEFT JOIN load_types ON trips.LoadTypeID = load_types.ID"""
sql_groupby = 'Trips.ID'
if not sql_where:
sql_where = '1'
sql = f"""
SELECT {sql_select}
{self.compute_route_statistics}
FROM route_statistics_{self.scenarioID} AS route_statistics
LEFT JOIN trips_{self.scenarioID} AS trips ON route_statistics.TripID = trips.ID
{sql_leftjoin}
WHERE {sql_where} AND trips.NumberOfTrips > 0
GROUP BY {sql_groupby}
"""
df = self.sql(sql)
# Use short strings for NSTR classes
df = df.replace({'NSTR': self.NSTR_shortnames})
df = df.replace({'Vorm': self.appeareance_rename})
# Extra kolommen:
if include_all_columns:
df['Beladingsgraad'] = df['TotalWeight__t'] / df['LoadCapacity__t']
C_w = 0.9 # could also be received from database, but it's constant anyway
df['TPCMI'] = 0.01 * df['Length__m'] * df['Width__m'] * C_w
df['Ledige_diepgang'] = df['Depth__m'] - df['TotalWeight__t'] / (df['TPCMI']*100)
df['Maximale_diepgang'] = df['Depth__m'] + (df['LoadCapacity__t'] - df['TotalWeight__t']) / (df['TPCMI']*100)
df['Totale Vaarkosten per TonKM'] = df['Totale Vaarkosten (EUR)'] / df['Totale TonKM (TONKM)']
# Format dates
if group_by and 'Days' in group_by:
df['Days'] = pd.to_datetime(df['Days'])
# TODO: Nog goede indexering (en volgorde) instellen
if group_by:
df = df.set_index(group_by).sort_index()
return df
def routestatistics_timeseries(self):
"""Routes in scenario per date"""
if self.sql_tableexists(f'route_statistics_{self.scenarioID}'):
sql = f"""
SELECT DATE(trips.DateTime) AS date,
COUNT(*) AS count,
{self.compute_route_statistics}
FROM route_statistics_{self.scenarioID} AS route_statistics
LEFT JOIN trips_{self.scenarioID} AS trips ON route_statistics.TripID = trips.ID
GROUP BY DATE(trips.DateTime)
"""
else:
sql = """
SELECT DATE(trips_{0}.DateTime) AS date,
COUNT(*) AS count
FROM routes_{0}
LEFT JOIN trips_{0} ON routes_{0}.TripID = trips_{0}.ID
WHERE RouteIndex=0
GROUP BY DATE(trips_{0}.DateTime)
""".format(self.scenarioID)
df = self.sql(sql)
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
return df
def route_arcs(self, routeID):
"""
Load route on arcs for a specified RouteID
requires the run of sqlArcs()
"""
sql = """
SELECT ArcID, OriginalArcDirection
FROM routes_{0}
WHERE TripID = {1}
ORDER BY RouteIndex
""".format(self.scenarioID, routeID)
route = self.sql(sql)
route = route.join(self.arcs, on='ArcID')
route = geopandas.GeoDataFrame(route)
return route
def route_stats(self, routeID):
"""
Load advanced route and shipstats for specified RouteID
"""
sql = """
SELECT trips.*,
route.*,
nstr_mapping.Description AS nstr_description,
nst2007_mapping.Description AS nst2007_description,
appearance_types.Description AS appear_description,
ship_types.Label AS ship_label,
ship_types.Description as ship_description,
cemt_class.Description as cemt_class,
dangerous_goods_levels.Description AS dangerous_description
FROM route_statistics_{0} AS route
LEFT JOIN trips_{0} AS trips ON route.TripID = trips.ID
LEFT JOIN nstr_mapping ON trips.NstrGoodsClassification = nstr_mapping.GroupCode
LEFT JOIN nst2007_mapping ON trips.Nst2007GoodsClassification = nst2007_mapping.Id
LEFT JOIN appearance_types ON trips.AppearanceTypeID = appearance_types.ID
LEFT JOIN ship_types ON trips.ShipTypeID = ship_types.ID
LEFT JOIN cemt_class ON ship_types.CEMTTypeID = cemt_class.Id
LEFT JOIN dangerous_goods_levels ON trips.DangerousGoodsLevelID = dangerous_goods_levels.ID
WHERE TripID = {1}
""".format(self.scenarioID, routeID)
routestats = self.sql(sql)
routestats['Beladingsgraad'] = routestats['TotalWeight__t'] / routestats['LoadCapacity__t']
# C_w = 0.9 # could also be received from database, but it's constant anyway
# routestats['TPCMI'] = 0.01 * routestats['Length__m'] * routestats['Width__m'] * C_w
# routestats['Ledige_diepgang'] = routestats['Depth__m'] - routestats['TotalWeight__t'] / (routestats['TPCMI']*100)
# routestats['Maximale_diepgang'] = routestats['Depth__m'] + (routestats['LoadCapacity__t'] - routestats['TotalWeight__t']) / (routestats['TPCMI']*100)
# routestats['Totale Vaarkosten per TonKM'] = routestats['Totale Vaarkosten (EUR)'] / routestats['Totale TonKM (TONKM)']
return routestats
def route_countingpoints(self, routeID, validateroute=None):
"""
Validate the route of a routeID versus the reference tripset
routeID = int
validateroute =
None: do not validate
route: input the route (output of BIVAS.route_arcs(arcID))
'route': run route_arcs first
ReferenceSetID = int (3 = IVS90_2014)
requires the run of sqlArcs()
"""
sql = """
SELECT DateTime, Name, ArcID
FROM reference_trip_set
LEFT JOIN counting_points ON reference_trip_set.CountingPointID = counting_points.ID
LEFT JOIN counting_point_arcs ON reference_trip_set.CountingPointID = counting_point_arcs.CountingPointID
WHERE ReferenceSetID = {0}
AND TripID = {1}
ORDER BY DateTime
""".format(self.ReferenceTripSetID, routeID)
referencestrips = self.sql(sql)
referencestrips = referencestrips.join(
self.arcs, on='ArcID', rsuffix='_r')
referencestrips = geopandas.GeoDataFrame(referencestrips)
# Validate points passed
if validateroute is not None:
if isinstance(validateroute, str) and validateroute == 'route':
validateroute = self.route_arcs(routeID)
referencestrips['PointPassed'] = np.in1d(
referencestrips['ArcID'], validateroute['ArcID'])
referencestrips['geometry'] = referencestrips['geometry'].representative_point()
return referencestrips
def route_computerelaxation(self, tripID, LengthPenalty=None, WidthPenalty=None):
"""
For a given tripID compute the relexation per arc and in total
:param tripID:
:param LengthPenalty:
:param WidthPenalty:
:return:
"""
if (LengthPenalty is None) or (WidthPenalty is None):
LengthPenalty, WidthPenalty = \
self.scenario_parameters()[['RestrictionRelaxationLengthPenalty__min_m_km',
'RestrictionRelaxationWidthPenalty__min_dm_km']].values[0]
# Get parameters of ship
routestats = self.route_stats(tripID)
ship_length, ship_width = routestats.loc[0, ['Length__m', 'Width__m']]
# Get route of ship and properties of this route
arcs = self.route_arcs(tripID)
arcs = arcs.set_index('ArcID')[['MaximumLength__m', 'MaximumWidth__m', 'Length__m']]
arcs = arcs.replace({0: np.nan})
# Compute relaxation
arcs['relaxation_op_lengte'] = (ship_length - arcs['MaximumLength__m'])
arcs_sel = arcs[arcs['relaxation_op_lengte'] > 0]
arcs['Relaxation_penalty_length'] = arcs_sel['relaxation_op_lengte'] * (
arcs_sel['Length__m'] / 1000) * LengthPenalty
arcs['relaxation_op_breedte'] = (ship_width - arcs['MaximumWidth__m'])
arcs_sel = arcs[arcs['relaxation_op_breedte'] > 0]
arcs['Relaxation_penalty_width'] = (arcs_sel['relaxation_op_breedte'] * 10) * (
arcs_sel['Length__m'] / 1000) * WidthPenalty
arcs = arcs.fillna(0)
arcs['Relaxation_penalty'] = arcs['Relaxation_penalty_length'] + arcs['Relaxation_penalty_width']
# Summarise
penalty_per_arc = arcs['Relaxation_penalty']
penalty_total = penalty_per_arc.sum()
return penalty_per_arc, penalty_total
def infeasibletrips_timeseries(self):
"""Infeasible Trips in scenario per date"""
sql = """
SELECT DATE(trips_{0}.DateTime) AS date,
count(*) AS nTrips,
SUM(trips_{0}.TotalWeight__t) AS SumTotalWeight__t
FROM infeasible_trips_{0} AS infeasible_trips
LEFT JOIN trips_{0} ON infeasible_trips.TripID = trips_{0}.ID
GROUP BY DATE(DateTime)
""".format(self.scenarioID)
df = self.sql(sql)
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
return df
def infeasibletrips_tripdetails(self):
sql = """
SELECT nstr_mapping.GroupCode AS "NSTR",
appearance_types.Description AS "Vorm",
DATE(trips.DateTime) AS "Days",
trips.ID AS "ID",
(trips.NumberOfTrips) AS "Aantal Vaarbewegingen (-)",
(trips.TotalWeight__t * trips.NumberOfTrips) AS "Totale Vracht (ton)",
(trips.TwentyFeetEquivalentUnits) AS "Totale TEU (-)"
FROM infeasible_trips_{0} AS infeasible_trips
LEFT JOIN trips_{0} AS trips ON infeasible_trips.TripID = trips.ID
LEFT JOIN nstr_mapping ON trips.NstrGoodsClassification = nstr_mapping.GroupCode
LEFT JOIN appearance_types ON trips.AppearanceTypeID = appearance_types.ID
WHERE infeasible_trips.NumberOfTrips > 0
""".format(self.scenarioID)
df = self.sql(sql)
df['Days'] = pd.to_datetime(df['Days'])
df = df.replace({'NSTR': self.NSTR_shortnames})
df = df.set_index('ID')
return df
def waterscenario_countarcs(self):
"""Count number of Arcs in each water scenario"""
sql = """
SELECT ID,Description,COUNT(*) AS nArcs
FROM water_scenario_values
LEFT JOIN water_scenarios ON water_scenario_values.WaterScenarioID = water_scenarios.ID
WHERE SeasonID=1
GROUP BY ID
"""
return self.sql(sql)
def waterscenario_arcs_waterdepth(self, ArcIDs):
"""
For a list of ArcIDs get the waterdepth
"""
ArcIDsStr = str(ArcIDs).strip('[]')
sql = f"""
SELECT SeasonID, WaterDepth__m, ArcID
FROM water_scenario_values
WHERE ArcID IN ({ArcIDsStr}) AND WaterScenarioID={self.WaterScenarioID}
"""
df = self.sql(sql)
df = df.set_index('SeasonID')
gp = df.groupby('ArcID')
ndf = pd.DataFrame()
for name, group in gp:
ndf[name] = group['WaterDepth__m']
return ndf
def arc_tripdetails(self, arcID, extended=True, group_by=None):
"""
This function requests all vessels passing a specified arc with
various information about those vessels
NOTE: Not all columns give proper info when using groupby
TODO: Add functionality to add multiple arcID and to include an exclude_ardID like with route_stats
"""
if not group_by:
group_by = 'trips.ID'
if extended:
sql = f"""
SELECT trips.*,
routes.OriginalArcDirection,
route_statistics.*,
ship_types.Label AS ship_types_Label,
ship_types.Description AS ship_types_Description,
cemt_class.ID AS cemt_class_ID,
cemt_class.Description AS cemt_class_Description,
nstr_mapping.GroupCode AS NSTR,
nstr_mapping.Description AS nstr_Description,
nst2007_mapping.*,
appearance_types.Description AS appearance_types_Description,
dangerous_goods_levels.Description AS dangerous_goods_levels_Description,
{self.compute_route_statistics}
FROM routes_{self.scenarioID} AS routes
LEFT JOIN trips_{self.scenarioID} AS trips ON routes.TripID = trips.ID
LEFT JOIN ship_types ON trips.ShipTypeID = ship_types.ID
LEFT JOIN nstr_mapping ON trips.NstrGoodsClassification = nstr_mapping.GroupCode
LEFT JOIN nst2007_mapping ON trips.Nst2007GoodsClassification = nst2007_mapping.Id
LEFT JOIN cemt_class ON ship_types.CEMTTypeID = cemt_class.Id
LEFT JOIN appearance_types ON trips.AppearanceTypeID = appearance_types.ID
LEFT JOIN dangerous_goods_levels ON trips.DangerousGoodsLevelID = dangerous_goods_levels.ID
LEFT JOIN route_statistics_{self.scenarioID} AS route_statistics ON route_statistics.TripID = routes.TripID
LEFT JOIN load_types ON trips.LoadTypeID = load_types.ID
WHERE ArcID = {arcID} AND trips.NumberOfTrips > 0
GROUP BY {group_by}
"""
else:
sql = f"""
SELECT trips.*,
routes.OriginalArcDirection
FROM routes_{self.scenarioID} AS routes
LEFT JOIN trips_{self.scenarioID} AS trips ON routes.TripID = trips.ID
WHERE ArcID = {arcID}
"""
df = self.sql(sql)
df = df.replace({'NSTR': self.NSTR_shortnames})
df = df.replace({'appearance_types_Description': self.appeareance_rename})
# Extra kolommen:
if extended:
df['Beladingsgraad'] = df['TotalWeight__t'] / df['LoadCapacity__t']
C_w = 0.9 # could also be received from database, but it's constant anyway
df['TPCMI'] = 0.01 * df['Length__m'] * df['Width__m'] * C_w
df['Ledige_diepgang'] = df['Depth__m'] - df['TotalWeight__t'] / (df['TPCMI']*100)
df['Maximale_diepgang'] = df['Depth__m'] + (df['LoadCapacity__t'] - df['TotalWeight__t']) / (df['TPCMI']*100)
df['Totale Vaarkosten per TonKM'] = df['Totale Vaarkosten (EUR)'] / df['Totale TonKM (TONKM)']
if group_by == 'trips.ID':
df = df.set_index('ID')
else:
df = df.set_index(group_by)
df['DateTime'] = pd.to_datetime(df['DateTime'])
df = df.drop(['SeasonID', 'ShipTypeID', 'DangerousGoodsLevelID', 'LoadTypeID'], axis=1)
return df
def arc_routestatistics(self, arcID):
"""
Compute route statistics for a specific ArcID
"""
sql = f"""
SELECT trips.ID,
(trips.NumberOfTrips) AS "Aantal Vaarbewegingen (-)",
(trips.TotalWeight__t * trips.NumberOfTrips) AS "Totale Vracht (ton)",
(trips.TwentyFeetEquivalentUnits * trips.NumberOfTrips) AS "Totale TEU (-)",
{self.compute_route_statistics}
FROM routes_{self.scenarioID} AS routes
LEFT JOIN trips_{self.scenarioID} AS trips ON routes.TripID = trips.ID
LEFT JOIN route_statistics_{self.scenarioID} AS route_statistics ON route_statistics.TripID = routes.TripID
WHERE ArcID = {arcID} AND trips.NumberOfTrips > 0
"""
df = self.sql(sql)
df = df.set_index('ID')
return df
def arc_usagestatistics(self):
sql = """
SELECT ArcID,
SUM(arcStats.NumberOfTrips) AS "Aantal Vaarbewegingen (-)",
SUM(arcStats.NumberOfTrips * arcStats.AverageLoadWeight__t) AS "Totale Vracht (ton)",
SUM(arcStats.AverageCosts__Eur) / SUM(arcStats.AverageDistance__km) AS "Gemiddelde kosten/km"
FROM arc_usage_statistics_details_{0} AS arcStats
GROUP BY ArcID
""".format(self.scenarioID)
df = self.sql(sql)
df = df.set_index('ArcID')
return df
def arc_routes_on_network(self, arcID, not_passing_arcID=None):
"""
For a given arcID (or list), this function analyses how all trips passing this Arc are distributed over the network.
By given an arcID (or list0 as not_passing_arcID it is returning all trips that do pass arcID, but not pass not_passing_arcID.
NOTE: It can happen that multiple ArcID are given, and one should expect that all have equal (maximum) trip count.
However, in BIVAS/IVS it can happen that a ship passes and arc multiple times. This can result in irregularities.
"""
if not not_passing_arcID and isinstance(arcID, int):
# All routes of ships passing 1 point
sql = f"""
SELECT routes.ArcID AS ArcID,
COUNT(*) AS "Aantal"
FROM routes_{self.scenarioID} AS routes_passing_arc
INNER JOIN routes_{self.scenarioID} AS routes ON routes_passing_arc.TripID = routes.TripID
WHERE routes_passing_arc.ArcID = {arcID}
GROUP BY routes.ArcID
"""
elif isinstance(arcID, int) and isinstance(not_passing_arcID, int):
# All routes of ships passing 1 point and not passing another point
sql = f"""
SELECT routes.ArcID AS ArcID,
COUNT(*) AS "Aantal"
FROM routes_{self.scenarioID} AS routes_passing_arc
LEFT JOIN
(SELECT ArcID, TripID FROM routes_{self.scenarioID} WHERE ArcID = {not_passing_arcID})
AS routes_passing_arc2 ON routes_passing_arc.TripID = routes_passing_arc2.TripID
INNER JOIN routes_{self.scenarioID} AS routes ON routes_passing_arc.TripID = routes.TripID
WHERE routes_passing_arc.ArcID = {arcID}
AND routes_passing_arc2.ArcID IS NULL
GROUP BY routes.ArcID"""
else:
# Either one of the input is a list of arcs. Lets make sure both are
if not isinstance(arcID, list): arcID = [arcID]
if not isinstance(not_passing_arcID, list):
if isinstance(not_passing_arcID, int):
not_passing_arcID = [not_passing_arcID]
else:
not_passing_arcID = []
leftjoins = ''
where = ''
join_id = 1
for a in not_passing_arcID:
join_id += 1
leftjoins += f"""
LEFT JOIN
(SELECT ArcID, TripID FROM routes_{self.scenarioID} WHERE ArcID = {a})
AS routes_passing_arc{join_id} ON routes_passing_arc.TripID = routes_passing_arc{join_id}.TripID
"""
where += f"""
AND routes_passing_arc{join_id}.ArcID IS NULL
"""
for a in arcID[1:]:
join_id += 1
leftjoins += f"""
INNER JOIN
(SELECT ArcID, TripID FROM routes_{self.scenarioID} WHERE ArcID = {a})
AS routes_passing_arc{join_id} ON routes_passing_arc.TripID = routes_passing_arc{join_id}.TripID
"""
sql = f"""
SELECT routes.ArcID AS ArcID,
COUNT(*) AS "Aantal"
FROM routes_{self.scenarioID} AS routes_passing_arc
{leftjoins}
INNER JOIN routes_{self.scenarioID} AS routes ON routes_passing_arc.TripID = routes.TripID
WHERE routes_passing_arc.ArcID = {arcID[0]}
{where}
GROUP BY routes.ArcID"""
df = self.sql(sql)
df = df.set_index('ArcID')
return df
def arcs_timeseries(self, ArcIDs):
"""
For a list of ArcIDs give the number of daily routes
"""
ArcIDsStr = str(ArcIDs).strip('[]')
sql = """
SELECT DATE(trips_{0}.DateTime) AS date,
COUNT(trips_{0}.NumberOfTrips ) AS SumNumberOfTrips,
ArcID
FROM routes_{0}
LEFT JOIN trips_{0} ON routes_{0}.TripID = trips_{0}.ID
WHERE ArcID IN ({1})
GROUP BY DATE(trips_{0}.DateTime), ARCID
""".format(self.scenarioID, ArcIDsStr)
df = self.sql(sql)
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
gp = df.groupby('ArcID')
ndf =
|
pd.DataFrame()
|
pandas.DataFrame
|
import sncosmo
import numpy as np
from lsst.sims.photUtils import Bandpass, Sed
from lsst.sims.photUtils import SignalToNoise
from lsst.sims.photUtils import PhotometricParameters
from astropy.table import Table, Column
from lsst.sims.catUtils.dust import EBV
from scipy.interpolate import griddata
import h5py
from sn_simu_wrapper.sn_object import SN_Object
import time
from sn_tools.sn_utils import SNTimer
from sn_tools.sn_calcFast import srand
import pandas as pd
import operator
from astropy import units as u
class SN(SN_Object):
def __init__(self, param, simu_param, reference_lc=None, gamma=None, mag_to_flux=None, dustcorr=None, snr_fluxsec='interp',error_model=True):
super().__init__(param.name, param.sn_parameters, param.gen_parameters,
param.cosmology, param.telescope, param.SNID, param.area, param.x0_grid,
mjdCol=param.mjdCol, RACol=param.RACol, DecCol=param.DecCol,
filterCol=param.filterCol, exptimeCol=param.exptimeCol,
nexpCol=param.nexpCol,
m5Col=param.m5Col, seasonCol=param.seasonCol,
seeingEffCol=param.seeingEffCol, seeingGeomCol=param.seeingGeomCol,
airmassCol=param.airmassCol, skyCol=param.skyCol, moonCol=param.moonCol,
salt2Dir=param.salt2Dir)
""" SN class - inherits from SN_Object
Parameters
--------------
param: dict
parameters requested for the simulation (SN_Object)
simu_param : dict
parameters for the simulation:
name: simulator name (str)
model: model name for SN (exempla: salt2-extended) (str)
version: version of the model (str)
reference_lc : griddata,opt
reference_light curves (default: None)
gamma: griddata, opt
reference gamma values (default: None)
mag_to_flux: griddata, opt
reference mag->flux values (default: None)
snr_fluxsec: str, opt
type of method to estimate snr and flux in pe.s-1:
lsstsim: estimated from lsstsims tools
interp: estimated from interpolation (default)
all : estimated from the two above-mentioned methods
"""
model = simu_param['model']
version = str(simu_param['version'])
self.model = model
self.version = version
self.gamma = gamma
self.mag_to_flux = mag_to_flux
self.snr_fluxsec = snr_fluxsec
self.error_model = error_model
if model == 'salt2-extended':
model_min = 300.
model_max = 180000.
wave_min = 3000.
wave_max = 11501.
if model == 'salt2':
model_min = 3400.
model_max = 11501.
wave_min = model_min
wave_max = model_max
self.wave = np.arange(wave_min, wave_max, 1.)
if not self.error_model:
source = sncosmo.get_source(model, version=version)
else:
SALT2Dir = 'SALT2.Guy10_UV2IR'
self.SALT2Templates(SALT2Dir=SALT2Dir, blue_cutoff=10.*self.sn_parameters['blue_cutoff'])
source = sncosmo.SALT2Source(modeldir=SALT2Dir)
self.dustmap = sncosmo.OD94Dust()
self.lsstmwebv = EBV.EBVbase()
self.SN = sncosmo.Model(source=source,
effects=[self.dustmap, self.dustmap],
effect_names=['host', 'mw'],
effect_frames=['rest', 'obs'])
self.SN.set(z=self.sn_parameters['z'])
self.SN.set(t0=self.sn_parameters['daymax'] +
self.gen_parameters['epsilon_daymax'])
self.SN.set(c=self.sn_parameters['color'] +
self.gen_parameters['epsilon_color'])
self.SN.set(x1=self.sn_parameters['x1'] +
self.gen_parameters['epsilon_x1'])
# need to correct X0 for alpha and beta
lumidist = self.cosmology.luminosity_distance(
self.sn_parameters['z']).value*1.e3
self.X0 = self.x0(lumidist)
self.dL = lumidist
self.SN.set(x0=self.X0)
"""
self.SN.set_source_peakabsmag(self.sn_parameters['absmag'],
self.sn_parameters['band'], self.sn_parameters['magsys'])
self.X0 = self.SN.get('x0')
"""
self.defname = dict(zip(['healpixID', 'pixRA', 'pixDec'], [
'observationId', param.RACol, param.DecCol]))
# names for metadata
self.names_meta = ['RA', 'Dec',
'x0', 'epsilon_x0',
'x1', 'epsilon_x1',
'color', 'epsilon_color',
'daymax', 'epsilon_daymax',
'z', 'survey_area',
'healpixID', 'pixRA', 'pixDec',
'season', 'dL', 'ptime', 'snr_fluxsec_meth', 'status', 'ebvofMW']
self.mag_inf = 100. # mag values to replace infs
# band registery in sncosmo
for band in 'grizy':
throughput = self.telescope.atmosphere[band]
bandcosmo = sncosmo.Bandpass(
throughput.wavelen, throughput.sb, name='LSST::'+band, wave_unit=u.nm)
sncosmo.registry.register(bandcosmo, force=True)
def x0(self, lumidist):
""""
Method to estimate x0 from a griddata
Parameters
---------------
lumidist: float
luminosity distance
"""
X0_grid = griddata((self.x0_grid['x1'], self.x0_grid['color']), self.x0_grid['x0_norm'], (
self.sn_parameters['x1'], self.sn_parameters['color']), method='nearest')
X0 = X0_grid / lumidist ** 2
alpha = 0.13
beta = 3.
X0 *= np.power(10., 0.4*(alpha *
self.sn_parameters['x1'] - beta *
self.sn_parameters['color']))
X0 += self.gen_parameters['epsilon_x0']
return X0
def SALT2Templates(self,SALT2Dir='SALT2.Guy10_UV2IR', blue_cutoff=3800.):
"""
Method to load SALT2 templates and apply cutoff on SED.
Parameters
--------------
SALT2Dir: str, opt
SALT2 directory (default: SALT2.Guy10_UV2IR)
blue_cutoff: float, opt
blue cut off to apply (in nm - default: 3800.)
"""
for vv in ['salt2_template_0', 'salt2_template_1']:
fName = '{}/{}_orig.dat'.format(SALT2Dir, vv)
data = np.loadtxt(fName, dtype={'names': ('phase', 'wavelength', 'flux'),
'formats': ('f8', 'i4', 'f8')})
#print(data)
data['flux'][data['wavelength'] <= blue_cutoff] = 0.0
#print(data)
np.savetxt('{}/{}.dat'.format(SALT2Dir, vv),
data, fmt=['%1.2f', '%4d', '%.7e', ])
def __call__(self, obs, display=False, time_display=0.):
""" Simulation of the light curve
Parameters
--------------
obs: array
a set of observations
display: bool, opt
if True: the simulated LC is displayed
default: False
time_display: float
duration(sec) for which the display is visible
default: 0
Returns
-----------
astropy table:
metadata:
# SNID: ID of the supernova(int)
RA: SN RA(float)
Dec: SN Dec(float)
daymax: day of the max luminosity(float)
epsilon_daymax: epsilon added to daymax for simulation(float)
x0: SN x0(float)
epsilon_x0: epsilon added to x0 for simulation(float)
x1: SN x1(float)
epsilon_x1: epsilon added to x1 for simulation(float)
color: SN color(float)
epsilon_color: epsilon added to color for simulation(float)
z: SN redshift(float)
survey_area: survey area for this SN(float)
pixID: pixel ID
pixRA: pixel RA
pixDec: pixel Dec
season: season
dL: luminosity distance
fields:
flux: SN flux(Jy)
fluxerr: EN error flux(Jy)
snr_m5: Signal-to-Noise Ratio(float)
gamma: gamma parameter(see LSST: From Science...data products eq. 5)(float)
m5: five-sigma depth(float)
seeingFwhmEff: seeing eff(float)
seeingFwhmGeom: seeing geom(float)
flux_e_sec: flux in pe.s-1 (float)
mag: magnitude(float)
exptime: exposure time(float)
magerr: magg error(float)
band: filter(str)
zp: zeropoint(float)
zpsys: zeropoint system(float)
time: time(days)(float)
phase: phase(float)
"""
ra = np.mean(obs[self.RACol])
dec = np.mean(obs[self.DecCol])
area = self.area
season = np.unique(obs['season'])[0]
pix = {}
for vv in ['healpixID', 'pixRA', 'pixDec']:
if vv in obs.dtype.names:
pix[vv] = np.unique(obs[vv])[0]
else:
pix[vv] = np.mean(obs[self.defname[vv]])
ebvofMW = self.sn_parameters['ebvofMW']
# apply dust here since Ra, Dec is known
if ebvofMW < 0.:
ebvofMW = self.lsstmwebv.calculateEbv(
equatorialCoordinates=np.array(
[[ra], [dec]]))[0]
self.SN.set(mwebv=ebvofMW)
# start timer
ti = SNTimer(time.time())
# Are there observations with the filters?
goodFilters = np.in1d(obs[self.filterCol],
np.array([b for b in 'grizy']))
if len(obs[goodFilters]) == 0:
return [self.nosim(ra, dec, pix, area, season, ti, self.snr_fluxsec, -1, ebvofMW)]
# Select obs depending on min and max phases
# blue and red cutoffs applied
if not self.error_model:
obs = self.cutoff(obs, self.sn_parameters['daymax'],
self.sn_parameters['z'],
self.sn_parameters['min_rf_phase'],
self.sn_parameters['max_rf_phase'],
self.sn_parameters['blue_cutoff'],
self.sn_parameters['red_cutoff'])
if len(obs) == 0:
return [self.nosim(ra, dec, pix, area, season, ti, self.snr_fluxsec, -1, ebvofMW)]
# Sort data according to mjd
obs.sort(order=self.mjdCol)
# preparing the results : stored in lcdf pandas DataFrame
outvals = [self.m5Col, self.mjdCol,
self.exptimeCol, self.nexpCol, self.filterCol]
for bb in [self.airmassCol, self.skyCol, self.moonCol, self.seeingEffCol, self.seeingGeomCol]:
if bb in obs.dtype.names:
outvals.append(bb)
lcdf =
|
pd.DataFrame(obs[outvals])
|
pandas.DataFrame
|
'''
Scripts for loading various experimental datasets.
Created on Jul 6, 2017
@author: <NAME>
'''
import os
import pandas as pd
import numpy as np
from evaluation.experiment import data_root_dir
all_root_dir = data_root_dir#os.path.expanduser('~/data/bayesian_sequence_combination')
data_root_dir = os.path.join(all_root_dir, 'data')
def _load_bio_folder(anno_path_root, folder_name):
'''
Loads one data directory out of the complete collection.
:return: dataframe containing the data from this folder.
'''
from data.pico.corpus import Corpus
DOC_PATH = os.path.join(data_root_dir, "bio-PICO/docs/")
ANNOTYPE = 'Participants'
anno_path = anno_path_root + folder_name
anno_fn = anno_path + '/PICO-annos-crowdsourcing.json'
gt_fn = anno_path + '/PICO-annos-professional.json'
corpus = Corpus(doc_path=DOC_PATH, verbose=False)
corpus.load_annotations(anno_fn, docids=None)
if os.path.exists(gt_fn):
corpus.load_groundtruth(gt_fn)
# get a list of the docids
docids = []
workerids = np.array([], dtype=str)
all_data = None
#all_fv = _load_pico_feature_vectors_from_file(corpus)
for d, docid in enumerate(corpus.docs):
docids.append(docid)
annos_d = corpus.get_doc_annos(docid, ANNOTYPE)
spacydoc = corpus.get_doc_spacydoc(docid)
text_d = spacydoc #all_fv[d]
doc_length = len(text_d)
doc_data = None
for workerid in annos_d:
print('Processing data for doc %s and worker %s' % (docid, workerid))
if workerid not in workerids:
workerids = np.append(workerids, workerid)
# add the worker to the dataframe if not already there
if doc_data is None or workerid not in doc_data:
doc_data_w = np.ones(doc_length, dtype=int) # O tokens
if doc_data is None:
doc_data = pd.DataFrame(doc_data_w, columns=[workerid])
else:
doc_data_w = doc_data[workerid]
for span in annos_d[workerid]:
start = span[0]
fin = span[1]
doc_data_w[start] = 2
doc_data_w[start + 1:fin] = 0
doc_data[workerid] = doc_data_w
if os.path.exists(gt_fn):
gold_d = corpus.get_doc_groundtruth(docid, ANNOTYPE)
if 'gold' not in doc_data:
doc_data['gold'] = np.ones(doc_length, dtype=int)
for spans in gold_d:
start = spans[0]
fin = spans[1]
doc_data['gold'][start] = 2
doc_data['gold'][start + 1:fin] = 0
else:
doc_data['gold'] = np.zeros(doc_length, dtype=int) - 1 # -1 for missing gold values
text_d = [spacytoken.text for spacytoken in text_d]
doc_data['features'] = text_d
doc_start = np.zeros(doc_length, dtype=int)
doc_start[0] = 1
doc_gaps = doc_data['features'] == '\n\n' # sentence breaks
doc_start[doc_gaps[doc_gaps].index[:-1] + 1] = 1
doc_data['doc_start'] = doc_start
# doc_data = doc_data.replace(r'\n', ' ', regex=True)
doc_data = doc_data[np.invert(doc_gaps)]
doc_data['docid'] = docid
if all_data is None:
all_data = doc_data
else:
all_data = pd.concat([all_data, doc_data], axis=0)
# print('breaking for fast debugging')
# break
return all_data, workerids
def load_biomedical_data(regen_data_files, debug_subset_size=None, data_folder='bio'):
savepath = os.path.join(data_root_dir, data_folder)
if not os.path.isdir(savepath):
os.mkdir(savepath)
if regen_data_files or not os.path.isfile(savepath + '/annos.csv'):
print(regen_data_files)
print(os.path.isfile(savepath + '/annos.csv'))
anno_path_root = os.path.join(data_root_dir, 'bio-PICO/annos/')
# There are four folders here:
# acl17-test: the only one containing 'professional' annos. 191 docs
# train: 3549 docs
# dev: 500 docs
# test: 500 docs
folders_to_load = ['acl17-test', 'train', 'test', 'dev']
all_data = None
all_workerids = None
for folder in folders_to_load:
print('Loading folder %s' % folder)
folder_data, workerids = _load_bio_folder(anno_path_root, folder)
if all_data is None:
all_data = folder_data
all_workerids = workerids
else:
all_data = pd.concat([all_data, folder_data])
all_workerids = np.unique(np.append(workerids.flatten(), all_workerids.flatten()))
all_data.to_csv(savepath + '/annos.csv', columns=all_workerids, header=False, index=False)
all_data.to_csv(savepath + '/gt.csv', columns=['gold'], header=False, index=False)
all_data.to_csv(savepath + '/doc_start.csv', columns=['doc_start'], header=False, index=False)
all_data.to_csv(savepath + '/text.csv', columns=['features'], header=False, index=False)
print('loading annos...')
annos = pd.read_csv(savepath + '/annos.csv', header=None, nrows=debug_subset_size)
annos = annos.fillna(-1)
annos = annos.values
#np.genfromtxt(savepath + '/annos.csv', delimiter=',')
print('loading features data...')
text = pd.read_csv(savepath + '/text.csv', skip_blank_lines=False, header=None, nrows=debug_subset_size)
text = text.fillna(' ').values
print('loading doc starts...')
doc_start = pd.read_csv(savepath + '/doc_start.csv', header=None, nrows=debug_subset_size).values #np.genfromtxt(savepath + '/doc_start.csv')
print('Loaded %i documents' % np.sum(doc_start))
print('loading ground truth labels...')
gt = pd.read_csv(savepath + '/gt.csv', header=None, nrows=debug_subset_size).values # np.genfromtxt(savepath + '/gt.csv')
if len(text) == len(annos) - 1:
# sometimes the last line of features is blank and doesn't get loaded into features, but doc_start and gt contain labels
# for the newline token
annos = annos[:-1]
doc_start = doc_start[:-1]
gt = gt[:-1]
print('Creating dev/test split...')
# since there is no separate validation set, we split the test set
ndocs = np.sum(doc_start & (gt != -1))
#testdocs = np.random.randint(0, ndocs, int(np.floor(ndocs * 0.5)))
ntestdocs = int(np.floor(ndocs * 0.5))
docidxs = np.cumsum(doc_start & (gt != -1)) # gets us the doc ids
# # testidxs = np.in1d(docidxs, testdocs)
ntestidxs = np.argwhere(docidxs == (ntestdocs+1))[0][0]
# The first half of the labelled data is used as dev, second half as test
gt_test = np.copy(gt)
gt_test[ntestidxs:] = -1
gt_dev = np.copy(gt)
gt_dev[:ntestidxs] = -1
doc_start_dev = doc_start[gt_dev != -1]
text_dev = text[gt_dev != -1]
gt_task1_dev = gt_dev
gt_dev = gt_dev[gt_dev != -1]
return gt_test, annos, doc_start, text, gt_task1_dev, gt_dev, doc_start_dev, text_dev
def _map_ner_str_to_labels(arr):
arr = arr.astype(str)
arr[arr == 'O'] = 1
arr[arr == 'B-ORG'] = 2
arr[arr == 'I-ORG'] = 0
arr[arr == 'B-PER'] = 4
arr[arr == 'I-PER'] = 3
arr[arr == 'B-LOC'] = 6
arr[arr == 'I-LOC'] = 5
arr[arr == 'B-MISC'] = 8
arr[arr == 'I-MISC'] = 7
arr[arr == '?'] = -1
try:
arr_ints = arr.astype(int)
except:
print("Could not map all annos to integers. The annos we found were:")
uannos = []
for anno in arr:
if anno not in uannos:
uannos.append(anno)
print(uannos)
return arr_ints
def _load_rodrigues_annotations(dir, worker_str, gold_char_idxs=None, gold_tokens=None, skip_imperfect_matches=False):
worker_data = None
for f in os.listdir(dir):
if not f.endswith('.txt'):
continue
doc_str = f.split('.')[0]
f = os.path.join(dir, f)
#print('Processing %s' % f)
new_data = pd.read_csv(f, names=['features', worker_str], skip_blank_lines=False,
dtype={'features':str, worker_str:str}, na_filter=False, delim_whitespace=True)
doc_gaps = (new_data['features'] == '') & (new_data[worker_str] == '')
doc_start = np.zeros(doc_gaps.shape[0], dtype=int)
doc_start[doc_gaps[:-1][doc_gaps[:-1]].index + 1] = 1 # the indexes after the gaps
doc_content = new_data['features'] != ''
new_data['doc_start'] = doc_start
new_data = new_data[doc_content]
new_data['doc_start'].iat[0] = 1
annos_to_keep = np.ones(new_data.shape[0], dtype=bool)
for t, tok in enumerate(new_data['features']):
if len(tok.split('/')) > 1:
tok = tok.split('/')[0]
new_data['features'].iat[t] = tok
if len(tok) == 0:
annos_to_keep[t] = False
# compare the tokens in the worker annos to the gold labels. They are misaligned in the dataset. We will
# skip labels in the worker annos that are assigned to only a part of a token in the gold dataset.
char_counter = 0
gold_tok_idx = 0
skip_sentence = False
sentence_start = 0
if gold_char_idxs is not None:
gold_chars = np.array(gold_char_idxs[doc_str])
last_accepted_tok = ''
last_accepted_idx = -1
for t, tok in enumerate(new_data['features']):
if skip_imperfect_matches and skip_sentence:
new_data[worker_str].iloc[t] = -1
if new_data['doc_start'].iat[t]:
skip_sentence = False
if new_data['doc_start'].iat[t]:
sentence_start = t
gold_char_idx = gold_chars[gold_tok_idx]
gold_tok = gold_tokens[doc_str][gold_tok_idx]
#print('tok = %s, gold_tok = %s' % (tok, gold_tok))
if not annos_to_keep[t]:
continue # already marked as skippable
if char_counter < gold_char_idx and \
(last_accepted_tok + tok) in gold_tokens[doc_str][gold_tok_idx-1]:
print('Correcting misaligned annos (split word in worker data): %i, %s' % (t, tok))
skip_sentence = True
last_accepted_tok += tok
annos_to_keep[last_accepted_idx] = False # skip the previous ones until the end
new_data['features'].iat[t] = last_accepted_tok
new_data['doc_start'].iat[t] = new_data['doc_start'].iat[last_accepted_idx]
last_accepted_idx = t
char_counter += len(tok)
elif tok not in gold_tok or (tok == '' and gold_tok != ''):
print('Correcting misaligned annos (spurious features in worker data): %i, %s vs. %s' % (t, tok, gold_tok))
skip_sentence = True
annos_to_keep[t] = False # skip the previous ones until the end
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
elif tok == gold_tok[:len(tok)]: # needs to match the first characters in the string, not just be there somewhere
gold_tok_idx += 1
if tok != gold_tok:
skip_sentence = True
while char_counter > gold_char_idx:
print('error in features alignment between worker and gold!')
len_to_skip = gold_chars[gold_tok_idx - 1] - gold_chars[gold_tok_idx - 2]
# move the gold counter along to the next token because gold is behind
gold_tok_idx += 1
gold_chars[gold_tok_idx:] -= len_to_skip
gold_char_idx = gold_chars[gold_tok_idx]
gold_char_idxs[doc_str] = gold_chars
last_accepted_tok = tok
last_accepted_idx = t
char_counter += len(tok)
else:
skip_sentence = True
annos_to_keep[t] = False
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
# no more features in this document, but the last sentence must be skipped
if skip_imperfect_matches and skip_sentence:
# annos_to_keep[sentence_start:t+1] = False
new_data[worker_str].iloc[sentence_start:t+1] = -1
new_data = new_data[annos_to_keep]
new_data[worker_str] = _map_ner_str_to_labels(new_data[worker_str])
new_data['doc_id'] = doc_str
new_data['tok_idx'] = np.arange(new_data.shape[0])
# add to data from this worker
if worker_data is None:
worker_data = new_data
else:
worker_data = pd.concat([worker_data, new_data])
return worker_data
def _load_rodrigues_annotations_all_workers(annotation_data_path, gold_data, skip_dirty=False):
worker_dirs = os.listdir(annotation_data_path)
data = None
annotator_cols = np.array([], dtype=str)
char_idx_word_starts = {}
chars = {}
char_counter = 0
for t, tok in enumerate(gold_data['features']):
if gold_data['doc_id'].iloc[t] not in char_idx_word_starts:
char_counter = 0
starts = []
toks = []
char_idx_word_starts[gold_data['doc_id'].iloc[t]] = starts
chars[gold_data['doc_id'].iloc[t]] = toks
starts.append(char_counter)
toks.append(tok)
char_counter += len(tok)
for widx, dir in enumerate(worker_dirs):
if dir.startswith("."):
continue
worker_str = dir
annotator_cols = np.append(annotator_cols, worker_str)
dir = os.path.join(annotation_data_path, dir)
print('Processing dir for worker %s (%i of %i)' % (worker_str, widx, len(worker_dirs)))
worker_data = _load_rodrigues_annotations(dir, worker_str,
char_idx_word_starts, chars, skip_dirty)
print("Loaded a dataset of size %s" % str(worker_data.shape))
# now need to join this to other workers' data
if data is None:
data = worker_data
else:
data = data.merge(worker_data, on=['doc_id', 'tok_idx', 'features', 'doc_start'], how='outer', sort=True, validate='1:1')
return data, annotator_cols
def IOB_to_IOB2(seq):
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in I_labels:
typeidx = np.argwhere(I_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] and seq[i-1] != label):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = B_labels[typeidx]
return seq
def IOB2_to_IOB(seq):
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in B_labels:
typeidx = np.argwhere(B_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] or seq[i-1] != I_labels[typeidx]):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = I_labels[typeidx]
return seq
def load_ner_data(regen_data_files, skip_sen_with_dirty_data=False):
# In Nguyen et al 2017, the original data has been separated out for task 1, aggregation of crowd labels. In this
# task, the original training data is further split into val and test -- to make our results comparable with Nguyen
# et al, we need to test on the test split for task 1, but train our model on both.
# To make them comparable with Rodrigues et al. 2014, we need to test on all data (check this in their paper).
# Task 2 is for prediction on a test set given a model trained on the training set and optimised on the validation
# set. It would be ideal to show both these results...
savepath = os.path.join(data_root_dir, 'ner') # location to save our csv files to
if not os.path.isdir(savepath):
os.mkdir(savepath)
# within each of these folders below is an mturk_train_data folder, containing crowd labels, and a ground_truth
# folder. Rodrigues et al. have assigned document IDs that allow us to match up the annos from each worker.
# Nguyen et al. have split the training set into the val/test folders for task 1. Data is otherwise the same as in
# the Rodrigues folder under mturk/extracted_data.
task1_val_path = os.path.join(data_root_dir, 'crf-ma-NER-task1/val/')
task1_test_path = os.path.join(data_root_dir, 'crf-ma-NER-task1/test')
# These are just two files that we use for features features + ground truth labels.
task2_val_path = os.path.join(data_root_dir, 'English NER/eng.testa')
task2_test_path = os.path.join(data_root_dir, 'English NER/eng.testb')
if regen_data_files or not os.path.isfile(savepath + '/task1_val_annos.csv'):
# Steps to load data (all steps need to map annos to consecutive integer labels).
# 1. Create an annos.csv file containing all the annos in task1_val_path and task1_test_path.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(os.path.join(task1_val_path, 'ground_truth/'), 'gold')
# load the validation data
data, annotator_cols = _load_rodrigues_annotations_all_workers(
os.path.join(task1_val_path, 'mturk_train_data/'),
gold_data, skip_sen_with_dirty_data)
# 2. Create ground truth CSV for task1_val_path (for tuning the LSTM)
# merge gold with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'features'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annos per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annos
if len(np.unique(counts)) > 1:
print('Validation data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annos.' % doc)
# remove any lines with no annos
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_val_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the features in same order
data.to_csv(savepath + '/task1_val_text.csv', columns=['features'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_val_gt.csv', columns=['gold'], header=False, index=False)
# 3. Load worker annos for test set.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(
os.path.join(task1_test_path, 'ground_truth/'), 'gold')
# load the test data
data, annotator_cols = _load_rodrigues_annotations_all_workers(
os.path.join(task1_test_path, 'mturk_train_data/'),
gold_data, skip_sen_with_dirty_data)
# 4. Create ground truth CSV for task1_test_path
# merge with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'features'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annos per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annos
if len(np.unique(counts)) > 1:
print('Test data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annos.' % doc)
# remove any lines with no annos
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_test_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the features in same order
data.to_csv(savepath + '/task1_test_text.csv', columns=['features'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_test_gt.csv', columns=['gold'], header=False, index=False)
# 5. Create a file containing only the words for the task 2 validation set, i.e. like annos.csv with no annos.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_val_path but blank out the task_1 labels
# (for tuning the LSTM for task 2)
import csv
eng_val = pd.read_csv(task2_val_path, delimiter=' ', usecols=[0,3], names=['features', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_val.shape[0])
docstart_token = eng_val['features'][0]
doc_starts[1:] = (eng_val['features'] == docstart_token)[:-1]
eng_val['doc_start'] = doc_starts
eng_val['tok_idx'] = eng_val.index
eng_val = eng_val[eng_val['features'] != docstart_token] # remove all the docstart labels
eng_val['gold'] = _map_ner_str_to_labels(eng_val['gold'])
eng_val['gold'] = IOB_to_IOB2(eng_val['gold'].values)
eng_val.to_csv(savepath + '/task2_val_gt.csv', columns=['gold'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_text.csv', columns=['features'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 6. Create a file containing only the words for the task 2 test set, i.e. like annos.csv with no annos.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_test_path but blank out the task_1 labels/
eng_test = pd.read_csv(task2_test_path, delimiter=' ', usecols=[0,3], names=['features', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_test.shape[0])
docstart_token = eng_test['features'][0]
doc_starts[1:] = (eng_test['features'] == docstart_token)[:-1]
eng_test['doc_start'] = doc_starts
eng_test['tok_idx'] = eng_test.index
eng_test = eng_test[eng_test['features'] != docstart_token] # remove all the docstart labels
eng_test['gold'] = _map_ner_str_to_labels(eng_test['gold'])
eng_test['gold'] = IOB_to_IOB2(eng_test['gold'].values)
eng_test.to_csv(savepath + '/task2_test_gt.csv', columns=['gold'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_text.csv', columns=['features'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 7. Reload the data for the current run...
print('loading annos for task1 test...')
annos = pd.read_csv(savepath + '/task1_test_annos.csv', skip_blank_lines=False)
print('loading features data for task1 test...')
text = pd.read_csv(savepath + '/task1_test_text.csv', skip_blank_lines=False, header=None)
print('loading doc_starts for task1 test...')
doc_start = pd.read_csv(savepath + '/task1_test_doc_start.csv', skip_blank_lines=False, header=None)
print('loading ground truth for task1 test...')
gt_t = pd.read_csv(savepath + '/task1_test_gt.csv', skip_blank_lines=False, header=None)
print('Unique labels: ')
print(np.unique(gt_t))
print(gt_t.shape)
print('loading annos for task1 val...')
annos_v = pd.read_csv(savepath + '/task1_val_annos.csv', skip_blank_lines=False)
# remove any lines with no annos
# annotated_idxs = np.argwhere(np.any(annos_v != -1, axis=1)).flatten()
# annos_v = annos_v.iloc[annotated_idxs, :]
annos = pd.concat((annos, annos_v), axis=0)
annos = annos.fillna(-1)
annos = annos.values
print('loaded annos for %i tokens' % annos.shape[0])
print('loading features data for task1 val...')
text_v =
|
pd.read_csv(savepath + '/task1_val_text.csv', skip_blank_lines=False, header=None)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import xgboost as xgb
from lifelines import WeibullAFTFitter
from sklearn.neighbors import BallTree
# lib utils
from xgbse._base import XGBSEBaseEstimator
from xgbse.converters import convert_data_to_xgb_format, convert_y
# at which percentiles will the KM predict
from xgbse.non_parametric import get_time_bins, calculate_interval_failures
KM_PERCENTILES = np.linspace(0, 1, 11)
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
DEFAULT_PARAMS_WEIBULL = {}
class XGBSEStackedWeibull(XGBSEBaseEstimator):
"""
Perform stacking of a XGBoost survival model with a Weibull AFT parametric model.
The XGBoost fits the data and then predicts a value that is interpreted as a risk metric.
This risk metric is fed to the Weibull regression which uses it as its only independent variable.
Thus, we can get the benefit of XGBoost discrimination power alongside the Weibull AFT
statistical rigor (calibrated survival curves, confidence intervals)
"""
def __init__(
self,
xgb_params=DEFAULT_PARAMS,
weibull_params=DEFAULT_PARAMS_WEIBULL,
n_jobs=-1,
):
"""
Construct XGBSEStackedWeibull instance
Args:
xgb_params (Dict): parameters for XGBoost model, see
https://xgboost.readthedocs.io/en/latest/parameter.html
weibull_params (Dict): parameters for Weibull Regerssion model, see
https://lifelines.readthedocs.io/en/latest/fitters/regression/WeibullAFTFitter.html
"""
self.xgb_params = xgb_params
self.weibull_params = weibull_params
self.persist_train = False
def fit(
self,
X,
y,
num_boost_round=1000,
validation_data=None,
early_stopping_rounds=None,
verbose_eval=0,
persist_train=False,
index_id=None,
time_bins=None,
):
"""
Fit XGBoost model to predict a value that is interpreted as a risk metric.
Fit Weibull Regression model using risk metric as only independent variable.
Args:
X ([pd.DataFrame, np.array]): features to be used while fitting XGBoost model
y (structured array(numpy.bool_, numpy.number)): binary event indicator as first field,
and time of event or time of censoring as second field.
num_boost_round (Int): Number of boosting iterations.
validation_data (Tuple): Validation data in the format of a list of tuples [(X, y)]
if user desires to use early stopping
early_stopping_rounds (Int): Activates early stopping.
Validation metric needs to improve at least once
in every **early_stopping_rounds** round(s) to continue training.
See xgboost.train documentation.
verbose_eval ([Bool, Int]): level of verbosity. See xgboost.train documentation.
persist_train (Bool): whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): user defined index if intended to use explainability
through prototypes
time_bins (np.array): specified time windows to use when making survival predictions
Returns:
XGBSEStackedWeibull: Trained XGBSEStackedWeibull instance
"""
E_train, T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(T_train, E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# converting validation data to xgb format
evals = ()
if validation_data:
X_val, y_val = validation_data
dvalid = convert_data_to_xgb_format(
X_val, y_val, self.xgb_params["objective"]
)
evals = [(dvalid, "validation")]
# training XGB
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
evals=evals,
verbose_eval=verbose_eval,
)
# predicting risk from XGBoost
train_risk = self.bst.predict(dtrain)
# replacing 0 by minimum positive value in df
# so Weibull can be fitted
min_positive_value = T_train[T_train > 0].min()
T_train = np.clip(T_train, min_positive_value, None)
# creating df to use lifelines API
weibull_train_df = pd.DataFrame(
{"risk": train_risk, "duration": T_train, "event": E_train}
)
# fitting weibull aft
self.weibull_aft = WeibullAFTFitter(**self.weibull_params)
self.weibull_aft.fit(weibull_train_df, "duration", "event", ancillary=True)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
index_leaves = self.bst.predict(dtrain, pred_leaf=True)
self.tree = BallTree(index_leaves, metric="hamming")
self.index_id = index_id
return self
def predict(self, X, return_interval_probs=False):
"""
Predicts survival probabilities using the XGBoost + Weibull AFT stacking pipeline.
Args:
X (pd.DataFrame): Dataframe of features to be used as input for the
XGBoost model.
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Default is False.
Returns:
pd.DataFrame: A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
risk = self.bst.predict(d_matrix)
weibull_score_df =
|
pd.DataFrame({"risk": risk})
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.