prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Tools helping with the TIMIT dataset.
Based on the version from:
https://www.kaggle.com/mfekadu/darpa-timit-acousticphonetic-continuous-speech
"""
import re
from os.path import join, splitext, dirname
from pathlib import Path
import numpy as np
import pandas as pd
import soundfile as sf
from audio_loader.ground_truth.challenge import Challenge
PHON = ['b', 'd', 'g', 'p', 't', 'k', 'dx', 'q', # Stops
'bcl', 'dcl', 'gcl', 'kcl', 'pcl', 'tcl', # Closure
'jh', 'ch', # Affricates
's', 'sh', 'z', 'zh', 'f', 'th', 'v', 'dh', # Fricatives
'm', 'n', 'ng', 'em', 'en', 'eng', 'nx', # Nasals
'l', 'r', 'w', 'y', 'hh', 'hv', 'el', # Semivowels and Glides
'iy', 'ih', 'eh', 'ey', 'ae', 'aa', 'aw', 'ay', # Vowels
'ah', 'ao', 'oy', 'ow', 'uh', 'uw', 'ux', 'er',
'ax', 'ix', 'axr', 'ax-h',
'pau', 'h#', 'epi' # Non-speech event
]
SILENCES = ['pau', 'epi', 'h#']
CLOSURES = ['bcl', 'vcl', 'dcl', 'gcl', 'kcl', 'pcl', 'tcl']
DF_PHON = pd.read_csv(join(dirname(__file__), 'timit_map.csv'), names=["original", "phon_class1", "phon_class2", "phon_class3"])
class TimitGroundTruth(Challenge):
"""Ground truth getter for TIMIT like datasets."""
def __init__(self, timit_like_root_folderpath, datapath="data", gtpath="data", gt_grouped_file=None, with_silences=True, phon_class="original", fuse_closures=True, return_original_gt=False):
"""Compatible with the TIMIT DARPA dataset available on kaggle.
To use the TIMIT DARPA dataset leave the default arguments as is.
"""
super().__init__(timit_like_root_folderpath, datapath, gtpath)
self.with_silences = with_silences
self.phon_class = phon_class
self.fuse_closures = fuse_closures
self.return_original_gt = return_original_gt
if gt_grouped_file is None:
df_train = pd.read_csv(join(self.root_folderpath, "train_data.csv"))
df_train = df_train[ | pd.notnull(df_train['path_from_data_dir']) | pandas.notnull |
import os
import warnings
import pprint
from shutil import rmtree
import numpy as np
import textwrap
from astropy.convolution import convolve, Gaussian1DKernel
from astropy.modeling.functional_models import Gaussian2D
from scipy.interpolate import interp1d
from scipy.stats import circmean, wilcoxon, levene
from scipy.signal import convolve2d, savgol_filter
from copy import deepcopy
from scipy.stats import kruskal, mannwhitneyu, friedmanchisquare
from itertools import combinations
import pandas as pd
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.gridspec import GridSpecFromSubplotSpec
import seaborn as sns
from barrylab_ephys_analysis.blea_utils import euclidean_distance_between_rows_of_matrix
from barrylab_ephys_analysis.recording_io import Recording
from barrylab_ephys_analysis.scripts.exp_scales import snippets
from barrylab_ephys_analysis.scripts.exp_scales.params import Params
from barrylab_ephys_analysis.external.statannot import add_stat_annotation
from barrylab_ephys_analysis.external import ptitprince
from barrylab_ephys_analysis.spatial.fields import compute_field_contour
from barrylab_ephys_analysis.spatial.measures import ratemap_gradient, ratemap_fisher_information
from barrylab_ephys_analysis.spatial.ratemaps import SpatialRatemap
from barrylab_ephys_analysis.spatial.similarity import spatial_correlation
from barrylab_ephys_analysis.spikes.utils import count_spikes_in_sample_bins
pvalue_thresholds = [[0.001, '***'], [0.01, '**'], [0.05, '*'], [1, None]]
main_experiment_ids = ('exp_scales_a', 'exp_scales_b', 'exp_scales_c', 'exp_scales_d')
experiment_id_substitutes = {
'exp_scales_a': 'A',
'exp_scales_b': 'B',
'exp_scales_c': 'C',
'exp_scales_d': 'D',
'exp_scales_a2': "A'"
}
experiment_id_substitutes_inverse = {value: key for key, value in experiment_id_substitutes.items()}
spatial_windows = {
'exp_scales_a': (0, 87.5, 0, 125),
'exp_scales_b': (0, 175, 0, 125),
'exp_scales_c': (0, 175, 0, 250),
'exp_scales_d': (0, 350, 0, 250),
'exp_scales_a2': (0, 87.5, 0, 125)
}
def compute_distances_to_short_and_long_walls(df, xy):
longwallmap = {
'exp_scales_a': (xy[:, 0], spatial_windows['exp_scales_a'][0:2]),
'exp_scales_b': (xy[:, 1], spatial_windows['exp_scales_b'][2:4]),
'exp_scales_c': (xy[:, 0], spatial_windows['exp_scales_c'][0:2]),
'exp_scales_d': (xy[:, 1], spatial_windows['exp_scales_d'][2:4])
}
shortwallmap = {
'exp_scales_a': (xy[:, 1], spatial_windows['exp_scales_a'][2:4]),
'exp_scales_b': (xy[:, 0], spatial_windows['exp_scales_b'][0:2]),
'exp_scales_c': (xy[:, 1], spatial_windows['exp_scales_c'][2:4]),
'exp_scales_d': (xy[:, 0], spatial_windows['exp_scales_d'][0:2])
}
if np.any(df['experiment_id'] == 'exp_scales_a2'):
raise ValueError('exp_scales_a2 not handled')
df['distance to long wall (cm)'] = np.nan
df['distance to short wall (cm)'] = np.nan
df['distance_to_long_wall_prop'] = np.nan
df['distance_to_short_wall_prop'] = np.nan
for experiment_id in longwallmap:
idx = df['experiment_id'] == experiment_id
position, edges = longwallmap[experiment_id]
df.loc[idx, 'distance to long wall (cm)'] = \
np.min(np.abs(position[idx, np.newaxis] - np.array(edges)[np.newaxis, :]), axis=1)
short_wall_half_length = (edges[1] - edges[0]) / 2.
position, edges = shortwallmap[experiment_id]
df.loc[idx, 'distance to short wall (cm)'] = \
np.min(np.abs(position[idx, np.newaxis] - np.array(edges)[np.newaxis, :]), axis=1)
df.loc[idx, 'distance_to_short_wall_prop'] = \
df.loc[idx, 'distance to short wall (cm)'] / short_wall_half_length
df.loc[idx, 'distance_to_long_wall_prop'] = \
df.loc[idx, 'distance to long wall (cm)'] / short_wall_half_length
def compute_distance_to_nearest_corner(df, xy):
arena_sizes = {
'exp_scales_a': (spatial_windows['exp_scales_a'][1], spatial_windows['exp_scales_a'][3]),
'exp_scales_b': (spatial_windows['exp_scales_b'][1], spatial_windows['exp_scales_b'][3]),
'exp_scales_c': (spatial_windows['exp_scales_c'][1], spatial_windows['exp_scales_c'][3]),
'exp_scales_d': (spatial_windows['exp_scales_d'][1], spatial_windows['exp_scales_d'][3])
}
if np.any(df['experiment_id'] == 'exp_scales_a2'):
raise ValueError('exp_scales_a2 not handled')
df['distance_from_corner_prop'] = np.nan
df['distance_from_corner (cm)'] = np.nan
for experiment_id, arena_size in arena_sizes.items():
idx = df['experiment_id'] == experiment_id
df.loc[idx, 'distance_from_corner (cm)'] = \
snippets.compute_distance_to_nearest_corner_for_array(xy[idx, :], arena_size)
df.loc[idx, 'distance_from_corner_prop'] = \
df.loc[idx, 'distance_from_corner (cm)'] / (np.min(arena_size) / 2)
def compute_distances_to_landmarks(df, xy):
compute_distances_to_short_and_long_walls(df, xy)
compute_distance_to_nearest_corner(df, xy)
def cut_df_rows_by_distance_to_wall(df, bin_width, environment_column='environment',
distance_column='distance to wall (cm)',
reset_index=True):
match_to_exp_scales = np.any(np.array([x in experiment_id_substitutes for x in df[environment_column]]))
if match_to_exp_scales:
max_bin_centers = {
'exp_scales_a': (87.5 / 2. // bin_width) * bin_width - bin_width / 2.,
'exp_scales_b': (125 / 2. // bin_width) * bin_width - bin_width / 2.,
'exp_scales_c': (175 / 2. // bin_width) * bin_width - bin_width / 2.,
'exp_scales_d': (250 / 2. // bin_width) * bin_width - bin_width / 2.,
'exp_scales_a2': (87.5 / 2. // bin_width) * bin_width - bin_width / 2.
}
else:
max_bin_centers = {
'A': (87.5 / 2. // bin_width) * bin_width - bin_width / 2.,
'B': (125 / 2. // bin_width) * bin_width - bin_width / 2.,
'C': (175 / 2. // bin_width) * bin_width - bin_width / 2.,
'D': (250 / 2. // bin_width) * bin_width - bin_width / 2.,
'A*': (87.5 / 2. // bin_width) * bin_width - bin_width / 2.,
"A'": (87.5 / 2. // bin_width) * bin_width - bin_width / 2.
}
for environment_name in list(df[environment_column].unique()):
df.drop(df[(df[environment_column] == environment_name)
& (np.array(df[distance_column]) > max_bin_centers[environment_name] + 0.0001)].index,
inplace=True)
if reset_index:
df.reset_index(drop=True, inplace=True)
class SpatialFilteringLegend(object):
def __init__(self):
self._selection_maps = {}
for experiment_id in main_experiment_ids:
selection_map_shape = (
np.int16(spatial_windows[experiment_id][3] / Params.spatial_ratemap['bin_size']),
np.int16(spatial_windows[experiment_id][1] / Params.spatial_ratemap['bin_size']),
3
)
self._selection_maps[experiment_id] = 0.7 * np.ones(selection_map_shape)
dfs = []
for experiment_id in main_experiment_ids:
x_ind, y_ind = np.meshgrid(np.arange(self._selection_maps[experiment_id].shape[1]),
np.arange(self._selection_maps[experiment_id].shape[0]))
x_ind = x_ind.flatten()
y_ind = y_ind.flatten()
x_coord = (x_ind + 0.5) * Params.spatial_ratemap['bin_size']
y_coord = (y_ind + 0.5) * Params.spatial_ratemap['bin_size']
df = pd.DataFrame({'x_ind': x_ind, 'y_ind': y_ind, 'x_coord': x_coord, 'y_coord': y_coord})
df['experiment_id'] = experiment_id
self.compute_distance_to_wall(df, self._selection_maps[experiment_id].shape[:2],
spatial_windows[experiment_id])
dfs.append(df)
df = | pd.concat(dfs, axis=0, ignore_index=True) | pandas.concat |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import collections
from nltk.corpus import stopwords
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from textblob import Word
from textblob import TextBlob
# Import data from linkedin_data.db
import sqlite3
# Create connection
connection = sqlite3.connect("/data/linkedin_data.db3")
# Load data as data frame
job_details = pd.read_sql_query("SELECT position, company, location, details from positions", connection)
# Verify that result of SQL query is stored in the dataframe
print(job_details.head())
connection.close()
# Clean Data - Job Titles
counter = collections.Counter(job_details['position'])
print(counter)
print(counter.most_common(5))
# We understand the most common job titles in the data set are:
# Data Scientist, Senior Data Scientist, Data Scientist I, Machine Learning Engineer, Data Scientist II
# Let's group job titles by keyword - "Data Scientist" or "Machine Learning"
job_details['position_group'] = np.where(job_details.position.str.contains("Data Scientist"), "Data Scientist",
np.where(job_details.position.str.contains("Machine Learning"),
"Machine Learning Engineer", "Other Analytics"))
counter = collections.Counter(job_details["position_group"])
print(counter)
# Plot the Results
plt.figure()
job_details.position_group.hist() # improve visualizations
# plt.show()
# Let's group job titles by experience - i.e.(Senior v. Entry Level)
job_details['experience'] = np.where(job_details.position.str.contains("Senior"), "Senior",
np.where(job_details.position.str.contains("Sr."), "Senior",
np.where(job_details.position.str.contains("Director"), "Senior",
"Entry Level")))
counter = collections.Counter(job_details["experience"])
print(counter)
# Plot the Results
plt.figure()
job_details.experience.hist() # improve visualizations
# plt.show()
# Preprocess Text Data
# Lower case
job_details['details'] = job_details['details'].apply(lambda x: " ".join(x.lower() for x in x.split()))
# remove tabulation and punctuation
job_details['details'] = job_details['details'].str.replace('[^\w\s]', ' ')
# digits
job_details['details'] = job_details['details'].str.replace('\d+', '')
# remove stop words
stop = stopwords.words('english')
job_details['details'] = job_details['details'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
# lemmatization
job_details['details'] = job_details['details'].apply(
lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
print("Preprocessed data: \n")
print(job_details.head())
other_stop_words = ['junior', 'senior', 'experience', 'etc', 'job', 'work', 'company', 'technique',
'candidate', 'skill', 'skills', 'language', 'menu', 'inc', 'new', 'plus', 'years',
'technology', 'organization', 'ceo', 'cto', 'account', 'manager', 'data', 'scientist', 'mobile',
'developer', 'product', 'revenue', 'strong', 'business', 'team', 'science', 'e', 'sexual',
'orientation', 'equal', 'opportunity']
job_details['details'] = job_details['details'].apply(
lambda x: " ".join(x for x in x.split() if x not in other_stop_words))
# Visualize the Data
total_words = job_details.groupby(['position_group']).sum().reset_index()
total_words = total_words[["position_group", "details"]]
print("Aggregated job descriptions: \n")
print(total_words)
# Word Clouds
# Visualize data
jobs_list = total_words.position_group.unique().tolist()
for job in jobs_list:
# Start with one review:
text = total_words[total_words.position_group == job].iloc[0].details
# Create and generate a word cloud image:
wordcloud = WordCloud().generate(text)
print("\n***", job, "***\n")
# Display the generated image:
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
# plt.show()
# Converting text to features
vectorizer = TfidfVectorizer()
# Tokenize and build vocabulary
top_5 = ('Data Scientist', 'Senior Data Scientist', 'Data Scientist I', 'Machine Learning Engineer',
'Data Scientist II') # try to pull value direct from counter, so it always pulls in top 5 job titles
# selecting rows based on condition
top_science = job_details[job_details['position'].isin(top_5)]
X = vectorizer.fit_transform(top_science.details)
y = top_science.position
# split data into 80% training and 20% test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=109)
print("train data shape: ", X_train.shape)
print("test data shape: ", X_test.shape)
# Fit model
clf = MultinomialNB()
clf.fit(X_train, y_train)
# Predict
y_predicted = clf.predict(X_test)
y_train.hist()
y_test.hist()
# evaluate the predictions
print("Accuracy score is: ", accuracy_score(y_test, y_predicted))
print("Classes: (to help read Confusion Matrix)\n", clf.classes_)
print("Confusion Matrix: ")
print(confusion_matrix(y_test, y_predicted))
print("Classification Report: ")
print(classification_report(y_test, y_predicted))
print(clf.coef_)
print(clf.coef_.shape)
technical_skills = ['python', 'c', 'r', 'c++', 'java', 'hadoop', 'scala', 'flask', 'pandas', 'spark', 'scikit-learn',
'numpy', 'php', 'sql', 'mysql', 'css', 'mongdb', 'nltk', 'fastai', 'keras', 'pytorch', 'tensorflow',
'linux', 'Ruby', 'JavaScript', 'django', 'react', 'reactjs', 'ai', 'ui', 'tableau']
feature_array = vectorizer.get_feature_names()
# number of overall model features
features_numbers = len(feature_array)
# max sorted features number
n_max = int(features_numbers * 0.1)
# initialize output dataframe
output = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
#
# extract_reduced_data_set.py: extracts a smaller subset of all patients
# with a pre-defined size for a train, test, and validation data set.
#
# Example call: ./extract_reduced_data_set.py --Xy-directory /cluster/work/grlab/clinical/Inselspital/DataReleases/01-19-2017/InselSpital/7_ml_input/180830/reduced/temporal_5/AllLabels_0.0_8.0 /cluster/work/grlab/clinical/Inselspital/DataReleases/01-19-2017/InselSpital/misc_derived/temporal_split_180827.tsv --imputed-directory /cluster/work/grlab/clinical/Inselspital/DataReleases/01-19-2017/InselSpital/5_imputed/imputed_180827/reduced/temporal_5/ --output-directory /cluster/work/borgw/Bern_ICU_Sanctuary/v6/reduced/
import argparse
import glob
import logging
import os
import re
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from collections import defaultdict
from tqdm import tqdm
def decompose_path(path):
'''
Recursively decomposes a path into individual folders and file names
that are encountered while traversing it.
'''
folders = []
# Initialize the list of folders with the basename, provided such
# a thing exists.
basename = os.path.basename(path)
if basename:
folders.append(basename)
while True:
path = os.path.dirname(path)
basename = os.path.basename(path)
if basename:
folders.append(basename)
else:
break
folders.reverse()
return folders
def get_date_from_path(path, prefix=''):
'''
Attempts to parse a date portion, specified as YYMMDD, from a path.
This function looks for one folder within the decomposed path that
matches the specification. An optional prefix can be used to search
for folders of the form `prefixYYMMDD`.
Raises an exception if no date can be found.
'''
folders = decompose_path(path)
re_date = r'^' + prefix + '(\d{6})$'
for folder in folders:
m = re.match(re_date, folder)
if m:
return m.group(1)
raise RuntimeError('Unable to find expected date portion in path {}'.format(path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--n-train', type=int, help='Number of patients in training data set', default=1000)
parser.add_argument('--n-test', type=int, help='Number of patients in test data set', default=200)
parser.add_argument('--n-val', type=int, help='Number of patients in validation data set', default=200)
parser.add_argument('--Xy-directory', type=str, help='Directory with X/y matrices')
parser.add_argument('--imputed-directory', type=str, help='Directory with imputed data')
parser.add_argument('--output-directory', type=str, help='Output directory', default='~/tmp')
parser.add_argument('SPLIT', type=str, help='Split file to use')
args = parser.parse_args()
# Ensures that '~' can be used to denote the user's home directory
# when specifying an output path.
args.output_directory = os.path.expanduser(args.output_directory)
# Create output directory if it does not already exist; all errors
# concerning this will be ignored.
os.makedirs(args.output_directory, exist_ok=True)
logging.basicConfig(level=logging.INFO)
logging = logging.getLogger(os.path.basename(__file__))
logging.info('Reading split file {}'.format(args.SPLIT))
split_date = get_date_from_path(os.path.splitext(os.path.basename(args.SPLIT))[0], prefix='temporal_split_')
logging.info('Date of split information is {}'.format(split_date))
full_split_df = pd.read_csv(args.SPLIT, sep='\t')
full_split_df = full_split_df[['pid', 'temporal_5']]
logging.info('Grouping patients according to "temporal_5" status')
# The key of this dictionary will be either 'train', 'test', or
# 'val'
split_data = {
split: data for split, data in full_split_df.groupby('temporal_5')
}
assert 'train' in split_data.keys()
assert 'test' in split_data.keys()
assert 'val' in split_data.keys()
train_patients = split_data['train']['pid'].values
test_patients = split_data['test']['pid'].values
val_patients = split_data['val']['pid'].values
np.random.seed(42)
logging.info('Selecting patients at random (seed = 42)')
train_patients = list(np.random.choice(train_patients, args.n_train))
test_patients = list(np.random.choice(test_patients, args.n_test))
val_patients = list(np.random.choice(val_patients, args.n_val))
patients = set(train_patients + test_patients + val_patients)
assert len(train_patients) == args.n_train
assert len(test_patients) == args.n_test
assert len(val_patients) == args.n_val
logging.info('Extracted {} patients (training), {} patients (test), and {} patients (validation)'.format(args.n_train, args.n_test, args.n_val))
# Reduce split data frame and store it in the output directory such
# that it can be picked up by subsequent scripts.
full_split_df = full_split_df.query('pid in @patients')
if not os.path.isdir(os.path.join(args.output_directory, split_date)):
os.makedirs(os.path.join(args.output_directory, split_date))
full_split_df_out = os.path.join(args.output_directory, split_date, 'split.tsv')
logging.info('Writing split file to {}'.format(full_split_df_out))
full_split_df.to_csv(full_split_df_out, sep='\t', index=False)
# Date portion of the feature matrix path; will be used in the
# subsequent steps to check validity.
Xy_date = None
# Keyword arguments for storing HDF5 files. These should be used
# whenever an HDF5 file has to be written.
hdf_kwargs = {
'complevel': 5,
}
# Prepare a new set of feature matrices (full feature matrices)
# based on the selected split.
if args.Xy_directory:
# The date portion of the X/y directory is *not* supposed to
# contain any prefix, so we can easily extract its date.
Xy_date = get_date_from_path(args.Xy_directory)
Xy_directory_out = os.path.join(args.output_directory, Xy_date, 'reduced', os.path.basename(args.Xy_directory))
if Xy_date != split_date:
logging.warning('Split date {} does not match X/y date {}; will continue nonetheless'.format(split_date, Xy_date))
logging.info('Using output directory {} and subordinate directories for X/y data'.format(Xy_directory_out))
os.makedirs(Xy_directory_out, exist_ok=True)
os.makedirs(os.path.join(Xy_directory_out, 'X'), exist_ok=True)
os.makedirs(os.path.join(Xy_directory_out, 'y'), exist_ok=True)
X_files = [f for f in sorted(glob.glob(os.path.join(args.Xy_directory, 'X/batch_*.h5')))]
y_files = [f for f in sorted(glob.glob(os.path.join(args.Xy_directory, 'y/batch_*.h5')))]
# Check that each `X` file has a corresponding `y` file and
# remove files that have no counterpart.
n_X_files = len(X_files)
n_y_files = len(y_files)
X_files = [X_file for X_file in X_files if os.path.basename(X_file) in map(os.path.basename, y_files)]
y_files = [y_file for y_file in y_files if os.path.basename(y_file) in map(os.path.basename, X_files)]
if n_X_files != len(X_files):
logging.info('Removed {} X files because they have no matching y file'.format(n_X_files - len(X_files)))
if n_y_files != len(y_files):
logging.info('Removed {} y files because they have no matching X file'.format(n_y_files - len(y_files)))
assert len(X_files) == len(y_files)
logging.info('Processing {} X/y files'.format(len(X_files)))
X = pd.HDFStore(os.path.join(Xy_directory_out, 'X/batch_0_reduced.h5'), mode='w', **hdf_kwargs)
y = pd.HDFStore(os.path.join(Xy_directory_out, 'y/batch_0_reduced.h5'), mode='w', **hdf_kwargs)
# Stores the patient IDs of all *processed* patients. The difference
# between this set and `patients` should hopefully be small, or even
# zero in the best case.
processed_patients = set()
for X_file, y_file in zip(X_files, y_files):
# The `HDFStore` class does *not* open these files in
# read-only mode by default, which may cause problems
# with locking.
y_store = pd.HDFStore(y_file, mode='r')
X_store = pd.HDFStore(X_file, mode='r')
logging.info('Processing {}...'.format(os.path.basename(X_file)))
# Only take patients that are represented in *both* files
# because we will be writing spurious data otherwise.
batch_patients = set(X_store.keys()).intersection(set(y_store.keys()))
# Convert patient IDs to `str` and prepend a '/' in order to match
# the format of the keys in the imputed file.
patients_str_keys = ['/' + str(patient) for patient in patients]
for patient_id in sorted(batch_patients.intersection(patients_str_keys)):
# Need to *remove* the leading '/' again in order to be
# consistent with the key format.
processed_patients.add(int(patient_id[1:]))
logging.debug('Storing patient {} in new matrix'.format(patient_id[1:]))
X_tmp = X_store.get(patient_id)
y_tmp = y_store.get(patient_id)
if X is None:
X = pd.DataFrame().reindex_like(X_tmp)
logging.info('Created columns for X from first entry')
logging.info('Columns: {}'.format(X.columns))
if y is None:
y = pd.DataFrame().reindex_like(y_tmp)
logging.info('Created columns for y from first entry')
logging.info('Columns: {}'.format(y.columns))
X_tmp.to_hdf(X, patient_id, **hdf_kwargs)
y_tmp.to_hdf(y, patient_id, **hdf_kwargs)
X_store.close()
y_store.close()
X.close()
y.close()
n_patients = len(patients)
n_processed_patients = len(processed_patients)
assert n_processed_patients <= n_patients
logging.info('Processed {}/{} patients for reduced feature matrix creation'.format(n_processed_patients, n_patients))
if n_patients != n_processed_patients:
missing_patients = patients.difference(processed_patients)
logging.warning('The following patients could not be processed because they were not found: {}'.format(missing_patients))
# Prepare a new set of imputed data files based on the selected
# split.
if args.imputed_directory:
imputed_date = get_date_from_path(args.imputed_directory, prefix='imputed_')
imputed_directory_out = os.path.join(args.output_directory, imputed_date, 'imputed')
if Xy_date and imputed_date != Xy_date:
logging.warning('X/y date {} does not match date {} of imputed data; will continue nonetheless'.format(Xy_date, imputed_date))
if imputed_date != split_date:
logging.warning('Split date {} does not match date {} of imputed data; will continue nonetheless'.format(split_date, imputed_date))
os.makedirs(imputed_directory_out, exist_ok=True)
logging.info('Using output directory {} for imputed data'.format(imputed_directory_out))
batch_files = [f for f in sorted(glob.glob(os.path.join(args.imputed_directory, 'batch_*.h5')))]
static_file = os.path.join(args.imputed_directory, 'static.h5')
################################################################
# Store batch information
################################################################
logging.info('Extracting data from batches')
X = pd.HDFStore(os.path.join(imputed_directory_out, 'batch_0_reduced.h5'), mode='w', **hdf_kwargs)
# Stores data over all batches; this is required because
# individual patients are collated within a single group
# so that we cannot write them per batch
df_all_batches = []
# Stores the patient IDs of all *processed* patients. The difference
# between this set and `patients` should hopefully be small, or even
# zero in the best case.
processed_patients = set()
for batch_file in batch_files:
X_store = pd.HDFStore(batch_file, mode='r')
logging.info('Processing {}...'.format(os.path.basename(batch_file)))
X_batch = X_store.get('imputed')
X_batch_grouped = X_batch.groupby('PatientID')
for patient_id, data in X_batch_grouped:
if patient_id in patients:
logging.debug('Storing patient {} for inclusion in new imputed matrix'.format(patient_id))
df_all_batches.append(data)
# Mark patient as being processed; note that this
# uses the original patient ID because we use the
# column of the respective data frame.
processed_patients.add(patient_id)
# Finally, store the data frame in the specified group;
# I have not found a way to append instead.
if df_all_batches:
pd.concat(df_all_batches).to_hdf(X, 'imputed', format='table', data_columns=['PatientID'], **hdf_kwargs)
X_store.close()
X.close()
n_patients = len(patients)
n_processed_patients = len(processed_patients)
assert n_processed_patients <= n_patients
logging.info('Processed {}/{} patients for reduced imputed matrix creation'.format(n_processed_patients, n_patients))
if n_patients != n_processed_patients:
missing_patients = patients.difference(processed_patients)
logging.warning('The following patients could not be processed because they were not found: {}'.format(missing_patients))
################################################################
# Store static information
################################################################
logging.info('Extracting data from static file')
X_static = pd.HDFStore(os.path.join(imputed_directory_out, 'static.h5'), mode='w', **hdf_kwargs)
static_data = | pd.read_hdf(static_file, 'data', mode='r') | pandas.read_hdf |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 12:41:09 2019
@author: sdenaro
"""
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime as dt
from datetime import timedelta
import numpy as np
from numpy import matlib as matlib
import seaborn as sns
import statsmodels.api as sm
sns.set(style='whitegrid')
import matplotlib.cm as cm
#from sklearn.metrics import mean_squared_error, r2_score
from scipy import stats
def r2(x, y):
return stats.pearsonr(x, y)[0] ** 2
#Set Preference Customers reduction percent ('' or '_minus10' or '_minus20')
redux='_NEW'
##Load results
Results_d= pd.read_excel('BPA_net_rev_stoc_d' + redux + '.xlsx', sheet_name='Results_d')
#for e in range (1,60):
# Result_ensembles_d['ensemble' + str(e)]=pd.read_excel(BPA_net_rev_stoc_d' + redux + '.xlsx', sheet_name='ensemble' + str(e))
# print(str(e))
#
#for e in range (1,60):
# Result_ensembles_y['ensemble' + str(e)]=pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e))
# print(str(e))
#
#costs_y=pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx',sheet_name='Costs_y')
#PF_rates_avg=35.460833
#IP_rates_avg=44.030833
#Results Yearly Aggregates
Calendar_year=np.reshape(matlib.repmat(np.arange(1,1189),365,1), 1188*365, 'C' )
#PF_rev_y=Results_d.PF_rev.groupby(Calendar_year).sum()
#IP_rev_y=Results_d.IP_rev.groupby(Calendar_year).sum()
#SS_y=Results_d.SS.groupby(Calendar_year).sum()
#P_y=Results_d.P.groupby(Calendar_year).sum()
#BPA_hydro_y=Results_d.BPA_hydro.groupby(Calendar_year).sum()
PF_load_y=Results_d.PF_load.groupby(Calendar_year).sum()
IP_load_y=Results_d.IP_load.groupby(Calendar_year).sum()
MidC_y=Results_d.MidC.groupby(Calendar_year).mean()
CAISO_y=Results_d.CAISO.groupby(Calendar_year).mean()
Net_rev=pd.DataFrame(columns=['Net_Rev'])
for e in range (1,60):
Net_rev=Net_rev.append(pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[7]))
Net_rev.reset_index(inplace=True, drop=True)
Net_rev['positive']=Net_rev['Net_Rev']>0
Net_rev['negative']=Net_rev['Net_Rev']<0
####Weather data
#df_weather=pd.read_csv('../../CAPOW/CAPOW_SD/Stochastic_engine/Synthetic_weather/INDEX_synthetic_temp_wind.csv')
#df_weather.index=pd.DatetimeIndex(data=(t for t in dates if not isleap(t.year)))
#df_weather=df_weather[BPA_hydro.index[0]:BPA_hydro.index[-1]]
#
#Temp_Wind_y=df_weather.resample('D').sum()
#Temp_Wind_y=Temp_Wind_y.drop(index=pd.DatetimeIndex(data=(t for t in dates if isleap(t.year))))
#Temp_Wind_y=Temp_Wind_y.groupby(Calendar_year).max()
############ PLOTS ################################################
#Net revenue Bar plot
plt.rcParams.update({'font.size': 18})
plt.figure()
ax1 = plt.subplot()
ax1 = Net_rev['Net_Rev'].plot(kind="bar",
linewidth=0,
ax=ax1, color=Net_rev.positive.map({True:'blue', False:'red'})) # make bar plots
ax1.set_xticklabels(Net_rev.index, rotation = 0)
ax1.set_title('Yearly Net Revenue')
ax1.xaxis.set_ticks(np.arange(1, 1188, 20))
#ax1.set_xticklabels([i for i in range(1,1200,59)])
ax1.set_xticklabels([],[])
ax1.set_yticklabels([i for i in np.arange(-1,2,0.5)])
ax1.set_ylabel('B$')
ax1.grid(linestyle='-', linewidth=0.2)
#axbis = ax1.twinx()
#axbis.plot(TDA_y, 'steelblue')
#axbis.set_yticks([], [])
#plt.xlim(-1.7 ,20)
plt.tight_layout()
plt.savefig('figures/NetRev1200' + redux)
## Draw the density plot
plt.figure()
ax_pdf=sns.kdeplot(pow(10,-6)*Net_rev['Net_Rev'], shade=True)
# Plot formatting
ax_pdf.legend().set_visible(False)
plt.title('Yearly Net Revenue')
plt.xlabel('$MIllion per year')
ax_pdf.set_ylabel('density')
line = ax_pdf.get_lines()[-1]
x, y = line.get_data()
mask = x < 0
x, y = x[mask], y[mask]
ax_pdf.fill_between(x, y1=y, alpha=0.5, facecolor='red')
ax_pdf.ticklabel_format(style='sci', axis='y', scilimits=(-3,0))
#plt.text(0.5,1.5, 'mean=$M'+str(round(pow(10,-6)*Net_rev['Net_Rev'].mean()))\
# +'\n'+'std=$M'+str(pow(10,-6)*round(Net_rev['Net_Rev'].std())))
ax_pdf.set_xlim(-850,700)
#ax_pdf.set_ylim(0,3)
plt.show()
plt.savefig('figures/Rev_PDF' + redux, format='eps')
#Calculate VaR
#sort the net revs
Net_rev_sorted=Net_rev['Net_Rev'].sort_values(ascending=True)
Net_rev_sorted.reset_index(drop=True, inplace=True)
VaR_90 = Net_rev_sorted.quantile(0.1)
VaR_95 = Net_rev_sorted.quantile(0.05)
VaR_99 = Net_rev_sorted.quantile(0.01)
from tabulate import tabulate
print (tabulate([['90%', VaR_90],['95%', VaR_95], ['99%', VaR_99]], headers=['Confidence Level', 'Value at Risk']))
plt.axvline(x=VaR_90*pow(10,-6),color= 'yellow')
plt.text(VaR_90*pow(10,-6),1.5*pow(10,-3) , "VaR 90 %d" % VaR_90, rotation=90, verticalalignment='center')
plt.axvline(x=VaR_95*pow(10,-6),color= 'orange')
plt.text(VaR_95*pow(10,-6),1.5*pow(10,-3) , "VaR 95 %d" % VaR_95, rotation=90, verticalalignment='center')
plt.axvline(x=VaR_99*pow(10,-6),color= 'red')
plt.text(VaR_99*pow(10,-6),1.5*pow(10,-3) , "VaR 99 %d" % VaR_99, rotation=90, verticalalignment='center')
idx=np.where(np.diff(np.sign(Net_rev_sorted)))[0]
Negative_percent = 100*((idx+1)/len(Net_rev_sorted))
print ('Percent of negative net revs: %.2f' % Negative_percent )
plt.text(-700,1.5*pow(10,-3) , "perc negatives %f" % Negative_percent, rotation=90, verticalalignment='center')
Net_rev_avg=Net_rev['Net_Rev'].mean()
print('Average Net Revenue: %.2f' % Net_rev_avg)
plt.axvline(x=Net_rev_avg*pow(10,-6))
plt.text(Net_rev_avg*pow(10,-6),1.5*pow(10,-3) , "Average %d" % Net_rev_avg, rotation=90, verticalalignment='center')
plt.savefig('figures/Rev_PDF_lines' + redux + '.eps', format='eps')
plt.savefig('figures/Rev_PDF_lines' + redux, format='png')
#####################################################################
#### ENSEMBLE ANALYSIS ##############
#Create single ensemble horizonatal panels plot
plt.rcParams.update({'font.size': 12})
for e in range (1,60):
Net_rev_e=pow(10,-9)*pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[7])['Net_Rev']
Positive=Net_rev_e>0
fig, axes = plt.subplots(nrows=4, ncols=1)
ax1=axes[0]
Net_rev_e.plot(kind="bar",
linewidth=0.2,
ax=ax1,
color=Positive.map({True:'blue', False:'red'})) # make bar plots
ax1.set_title('Net Revenue Ensemble '+str(e), pad=0.6)
ax1.xaxis.set_ticks(range(1, 21, 1))
ax1.set_xticklabels([],[])
#ax1.set_xticklabels([i for i in np.arange(1,21,1)])
ax1.set_ylabel('B$')
ax1.set_xlim(-0.5,19.5)
ax1.grid(linestyle='-', linewidth=0.2, axis='x')
ax1.get_yaxis().set_label_coords(-0.08,0.5)
Reserves_e=pow(10,-9)*pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[1])
Reserves_e=Reserves_e.append(pd.Series(Reserves_e.iloc[19]))
Reserves_e.reset_index(inplace=True, drop=True)
Treas_fac1=320*pow(10,-3) # Treasury facility (1)
ax2 = axes[1]
ax2.axhline(0.608691000-Treas_fac1, color='r')
ax2.axhline(0, color='r')
ax2.plot(Reserves_e )
ax2.set_title('Reserves', pad=0.6)
ax2.xaxis.set_ticks(range(1, 21, 1))
ax2.set_xticklabels([],[])
#ax2.set_xticklabels([i for i in np.arange(1,21,1)])
ax2.set_ylabel('B$')
ax2.set_xlim(0.5,20.5)
ax2.grid(linestyle='-', linewidth=0.2, axis='x')
ax2.get_yaxis().set_label_coords(-0.08,0.5)
TF1=pow(10,-9)*pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[4])
TF2=pow(10,-9)*pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[5])
TF_e=pd.concat([TF1, TF2], axis=1)
TF_e.append(TF_e.iloc[19,:])
TF_e.reset_index(inplace=True, drop=True)
ax3 = axes[2]
TF_e.plot(ax=ax3, kind='bar', stacked=True, color=['g','y'], linewidth=0.2)
ax3.set_title('Treasury Facility', pad=0.6)
ax3.set_xticklabels([],[])
#ax3.set_xticklabels([i for i in np.arange(1,21,1)])
ax3.set_ylabel('B$')
ax3.xaxis.set_ticks(range(1, 21, 1))
ax3.set_ylabel('B$')
ax3.set_xlim(0.5,20.5)
ax3.grid(linestyle='-', linewidth=0.2, axis='x')
ax3.get_yaxis().set_label_coords(-0.08,0.5)
CRAC_e=pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[6])
CRAC_e=CRAC_e.append(pd.Series(CRAC_e.iloc[19]))
CRAC_e.reset_index(inplace=True, drop=True)
ax4 = axes[3]
#plot percent increase
#ax4.plot(CRAC_e*100/PF_rates_avg, 'darkviolet')
#plot $/MWh increase
ax4.plot(CRAC_e, 'darkviolet')
ax4.set_title('Surcharge', pad=0.6)
ax4.xaxis.set_ticks(range(1, 21, 1))
ax4.set_xticklabels([i for i in np.arange(1,21,1)])
#ax4.set_ylabel('%')
ax4.set_ylabel('$/MWh')
ax4.set_xlim(0.5,20.5)
ax4.grid(linestyle='-', linewidth=0.2, axis='x')
ax4.get_yaxis().set_label_coords(-0.08,0.5)
plt.subplots_adjust(left=0.11, bottom=0.065, right=0.985, top=0.945, wspace=0.2, hspace=0.345)
plt.savefig('figures/ensembles/Ensembles'+ redux + '/Ensemble'+ str(e))
########### QuantilePlots
# CRAC distribution
CRAC_e=pd.DataFrame()
for e in range (1,60):
CRAC_e=pd.concat([CRAC_e, pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[6])], axis=1)
#Qc=(100/PF_rates_avg)*CRAC_e.T
Qc=CRAC_e.T
Qc.reset_index(inplace=True, drop=True)
#CRAC distribution
count=np.sum(CRAC_e.any())
percent1=100*count/59 #BAU=11.86%
print ('Percent of CRAC ensembles: %.2f' % percent1 )
#Reserves ensembles
Reserves_e=pd.DataFrame()
for e in range (1,60):
Reserves_e=(pd.concat([Reserves_e, pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[1])['Reserves'] -
pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[4])['TF1']-
pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[5])['TF2'] ], axis=1))
Qr=pow(10,-9)*Reserves_e.T
Qr.reset_index(inplace=True, drop=True)
#Revenues ensembles
Revs_e=pd.DataFrame()
for e in range (1,60):
Revs_e=pd.concat([Revs_e, pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[7])['Net_Rev']], axis=1)
Qrev=pow(10,-9)*Revs_e.T
Qrev.reset_index(inplace=True, drop=True)
TTP_e=pd.DataFrame()
for e in range (1,60):
TTP_e=pd.concat([TTP_e, pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[2])], axis=1)
count=sum(-TTP_e.any()) #0% for both BAU and minus 10% and minus20%
## QuantilePlot ensembles function
def quantileplot(Q, ax, color, ci, name, start_day, end_day, realization, tick_interval, log):
# plot a selected streamflow realization (realization arg) over the
# quantiles of all streamflow realizations
if log:
Q = np.log10(Q)
ps = np.arange(0,1.01,0.05)*100
for j in range(1,len(ps)):
u = np.percentile(Q.iloc[:, start_day:end_day], ps[j], axis=0)
l = np.percentile(Q.iloc[:, start_day:end_day], ps[j-1], axis=0)
if ax == ax1:
ax.fill_between(np.arange(0,len(Q.iloc[0,start_day:end_day])), l, u, \
color=cm.twilight_shifted(ps[j-1]/100.0), alpha=0.75, edgecolor='none', label=[str(int(ps[j-1]))+'% to '+ str(int(ps[j])) +'%'])
#color=cm.PuOr(ps[j-1]/100.0), alpha=0.75, edgecolor='none')
else:
ax.fill_between(np.arange(0,len(Q.iloc[0,start_day:end_day])), l, u, \
color=cm.GnBu(ps[j-1]/100.0), alpha=0.75, edgecolor='none', label=[str(int(ps[j-1]))+'% to '+ str(int(ps[j])) +'%'])
#color=cm.RdYlBu_r(ps[j-1]/100.0), alpha=0.75, edgecolor='none')
ax.set_xlim([0, end_day-start_day])
ax.set_xticks(np.arange(0, end_day-start_day+tick_interval, tick_interval))
ax.set_xticklabels(np.arange(start_day+1, end_day+tick_interval, tick_interval))
ax.plot(np.arange(0,len(Q.iloc[0,start_day:end_day])), Q.median(), color='k', linewidth=2, label='median')
#ax.plot(np.arange(0,len(Q.iloc[0,start_day:end_day])), Q.iloc[(realization-1), \
# start_day:end_day], color='k', linewidth=2)
#ax.set_ylim([0, 5])
#ax.set_yticks(np.arange(6))
#ax.set_yticklabels([0, '', '', '', '', 5])
#ax.set_xticklabels(['Jan', 'Apr', 'Jul', 'Oct', 'Jan', 'Apr', 'Jul', 'Oct'])
ax.set_ylabel(name, fontsize=12)
#ax.set_xlabel('Simulation Day')
#for xl,yl in zip(ax.get_xgridlines(), ax.get_ygridlines()):
# xl.set_linewidth(0.5)
# yl.set_linewidth(0.5)
plt.legend()
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.subplots_adjust(left=0.075, right=0.82, top=0.96, bottom=0.055)
fig, ax1 = plt.subplots(1, 1)
quantileplot(Qr, color='k', ax=ax1, ci=90, name='B$', \
start_day=0, end_day=20, realization=59, tick_interval=1, log=False)
ax1.axhline(0, color='r', linestyle='--')
ax1.set_title('Net Reserves', size=15)
plt.xlim(0,19)
plt.ylim(-0.67,0.35)
plt.subplots_adjust(left=0.105, bottom=0.055, right=0.735, top=0.95)
plt.savefig('figures/Ensembles/Reserves' + redux, format='eps')
plt.savefig('figures/Ensembles/Reserves' + redux )
fig, ax1 = plt.subplots(1, 1)
quantileplot(Qrev, color='k', ax=ax1, ci=90, name='B$', \
start_day=0, end_day=20, realization=59, tick_interval=1, log=False)
ax1.axhline(0, color='r', linestyle='--')
ax1.set_title('Net Revenue', size=15)
plt.xlim(0,19)
plt.ylim(-0.8, 0.8)
plt.subplots_adjust(left=0.105, bottom=0.055, right=0.735, top=0.95)
plt.savefig('figures/Ensembles/Net_Rev' + redux, format='eps')
fig, ax1 = plt.subplots(1, 1)
quantileplot(Qc, color='k', ax=ax1, ci=90, name='$/MWh', \
start_day=0, end_day=20, realization=59, tick_interval=1, log=False)
ax1.axhline(0, color='r', linestyle='--')
ax1.set_title('Rate increase', size=15)
plt.xlim(0,19)
plt.ylim(0,6)
plt.subplots_adjust(left=0.105, bottom=0.055, right=0.735, top=0.95)
plt.savefig('figures/Ensembles/CRAC' + redux , format='eps')
#####################################################################
#### CORRELATION ANALYSIS ##############
# Load Streamflows
df_streamflow=pd.read_csv('../../CAPOW/CAPOW_SD/Stochastic_engine/Synthetic_streamflows/synthetic_streamflows_FCRPS.csv', header=None)
#cut to fit
df_streamflow = df_streamflow.iloc[365:len(df_streamflow)-(2*365),:]
df_streamflow.reset_index(inplace=True, drop=True)
##Total daily streamflow
FCRPS_d=pd.DataFrame(df_streamflow.sum(axis=1),columns=['FCRPS']).loc[0:365*1200 -1]
#remove missing years
FCRPS_d=pd.DataFrame(np.reshape(FCRPS_d.values, (365,1200), order='F'))
FCRPS_d.drop([82, 150, 374, 377, 540, 616, 928, 940, 974, 980, 1129, 1191],axis=1, inplace=True)
FCRPS_d=pd.DataFrame(np.reshape(FCRPS_d.values, (365*1188), order='F'))
#Cut from October (Water years)
#FCRPS_d=FCRPS_d[273:-92]
#FCRPS_d.reset_index(drop=True,inplace=True)
#same with The Dalles
TDA_d= | pd.DataFrame(df_streamflow[47].values,columns=['TDA']) | pandas.DataFrame |
from pydaisi import Daisi
import streamlit as st
import pandas as pd
import io
import base64
import numpy as np
import PIL
from PIL import Image
import pydeck as pdk
import time
import pickle
daisi_prophet = Daisi("Simple Prophet", base_url = "https://app.daisi.io")
geo = Daisi("GeoEncoder", base_url = "https://app.daisi.io")
def hello(name):
return name
@st.cache
def data_prep():
store_dataset = pd.read_csv("daily_sales_by_store-2.csv")
product_dataset = pd.read_csv("daily_sales_by_product_category.csv")
store_dataset.rename(columns={'Date':'ds'}, inplace=True)
store_dataset.rename(columns={'Sale_Dollars':'y'}, inplace=True)
product_dataset.rename(columns={'Date':'ds'}, inplace=True)
product_dataset.rename(columns={'Sale_Dollars':'y'}, inplace=True)
zip_codes = np.asarray(store_dataset['ZipCode'].tolist())
print(zip_codes)
zip_codes = np.unique(zip_codes)
zip_codes= np.array(zip_codes[~np.isnan(zip_codes)], dtype = np.int32)
print(zip_codes)
zip_codes = np.insert(zip_codes, -1, 0)
stores_names = ['Kum & Go', 'Casey', 'Fareway', 'Quik Trip', 'Walgreens', 'Hy-Vee', 'Wal-Mart', 'Smokin\' Joe', 'Sam\'s Club']
# new_names = [s if s in row['StoreName'] else 'other' for _, row in store_dataset.iterrows() for s in stores_names]
new_names = []
for _, row in store_dataset.iterrows():
exit = False
for s in stores_names:
if not exit:
if s in row['StoreName']:
new_names.append(s)
exit = True
if not exit:
new_names.append('other')
store_dataset['CleanNames'] = new_names
# result = latlon_per_zip_code(zip_codes)
result = pickle.load(open( "latlon.p", "rb" ))
# print(result)
zip_dict = dict()
for i, z in enumerate(zip_codes):
print(i, z)
try:
zip_dict[z] = {"lat": result[i]['latitude'], "lon": result[i]['longitude']}
except:
zip_dict[z] = {"lat": 0.0, "lon": 0.0}
print("problem with ", i, z, result[i])
lat = []
lon = []
for _, row in store_dataset.iterrows():
try:
z = int(row['ZipCode'])
lat.append(zip_dict[z]['lat'])
lon.append(zip_dict[z]['lon'])
except:
lat.append(np.nan)
lon.append(np.nan)
store_dataset['lat'] = lat
store_dataset['lon'] = lon
return store_dataset, product_dataset, zip_codes, stores_names, zip_dict
def filter_df(store_dataset, store_choice, zip_choice):
zip_select = (store_dataset['ZipCode'] == zip_choice)
store_select = (store_dataset['CleanNames'] == store_choice)
if len(zip_select.loc[zip_select == True]) + len(store_select.loc[store_select == True]) > 0:
filter = (zip_select & store_select)
message = "Forecasting for zip code " + str(zip_choice) + " and store " + store_choice
if zip_choice == 0:
zip_select = None
filter = store_select
message = "Forecasting for store " + store_choice + " in all zip codes"
if store_choice == 'All Stores':
store_select = None
filter = zip_select
print("Filter", filter)
message = "Forecasting for all stores in zip code " + str(zip_choice)
df = store_dataset.loc[filter]
else:
df = store_dataset
message = "Forecasting for all stores in all zip codes"
return df, message
@st.cache
def get_all_adresses(df):
addresses = [row['Address'] + ',' + row['City'] + ',' + row['ZipCode'] for i, row in df.iterrows()]
print(len(addresses))
futures = [geo.address_to_coordinates_(address=a) for a in addresses]
while futures[-1].get_status() == 'RUNNING':
time.sleep(0.1)
result = [latlon.value for latlon in futures]
lat = [result['latitude'] for latlon in futures]
lon = [result['longitude'] for latlon in futures]
df['lat'] = lat
df['lon'] = lon
return df
def get_store_zips(df):
store_zips = np.asarray(df['ZipCode'].tolist())
store_zips = np.unique(store_zips)
store_zips= np.array(store_zips[~np.isnan(store_zips)], dtype = np.int32)
print(store_zips)
return store_zips
def latlon_per_zip_code(store_zips):
futures = [geo.address_to_coordinates_(address=str(z) + ',USA') for z in store_zips]
while futures[-1].get_status() == 'RUNNING':
time.sleep(0.1)
result = [res.value for res in futures]
return result
def predict_per_zip_code(df, store_choice, store_zips, period):
dfs = []
for z in store_zips:
d, _ = filter_df(df, store_choice, z)
dfs.append(d)
futures = [daisi_prophet.predict_(d, period = period) for d in dfs]
while futures[-1].get_status() == 'RUNNING':
time.sleep(0.1)
result = []
for i, res in enumerate(futures):
try:
result.append([store_zips[i], np.sum(np.array(res[0]['yhat']).flatten())])
except:
result.append([store_zips[i],[0]])
return result
def get_geo_df(per_zip_code, lat_lon, zip_dict):
lat = [latlon['latitude'] for latlon in lat_lon]
lon = [latlon['longitude'] for latlon in lat_lon]
data = [np.sum(np.array(res[0]['yhat']).flatten()) for res in per_zip_code]
geo_df = pd.DataFrame()
geo_df['lat'] = lat
geo_df['lon'] = lon
geo_df['data'] = data
return geo_df
def get_geo_df_all(store_zips, zip_dict):
lat = [float(zip_dict[z]['lat']) for z in store_zips]
lon = [float(zip_dict[z]['lon']) for z in store_zips]
geo_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 23 17:57:37 2020
@author: matt
"""
import pandas as pd
import argparse
# reading in kraken2 reports
def reading_kraken2(basepath, metadata, level):
# filenames become the index
kraken_total = pd.concat(
map(lambda file:
pd.read_csv(file,
sep='\t',
names=('rel_ab', file[:-4], 'assigned', 'rank', 'tax_id', 'sci_name'),
usecols=(1,3,4,5),
index_col=('tax_id','sci_name','rank')).T,
basepath.split()))
if 'HV1' in kraken_total.index:
kraken_total.index=kraken_total.index.str.replace('V','V-')
if 'MetaHIT-MH0001' in kraken_total.index:
kraken_total.index=kraken_total.index.str.replace('MetaHIT-M','M')
# total values of abundances (unassigned+root)
total_ab_kraken = kraken_total.loc[:,[0, 1]].sum(axis=1)
# relative abundances
kraken_total = kraken_total.div(total_ab_kraken, axis=0)
if level == 'species':
# filter so that only species remain and drop rank column afterwards
kraken_total = kraken_total.loc[:,kraken_total.columns.get_level_values(2).isin(['S'])].droplevel('rank', axis=1)
if level == 'genus':
# filter so that only species remain and drop rank column afterwards
kraken_total = kraken_total.loc[:,kraken_total.columns.get_level_values(2).isin(['G'])].droplevel('rank', axis=1)
if metadata:
df_metadata = pd.read_csv(metadata, index_col=0)
kraken_total = pd.concat([kraken_total, df_metadata], axis=1)
kraken_total = kraken_total.set_index([kraken_total.index, 'disease'])
else:
kraken_total.columns = kraken_total.columns.to_series().apply(lambda x: "".join(str(x)))
# rename columns for XGBoost
kraken_total.columns = kraken_total.columns.to_series().apply(lambda x: "".join(str(x)).replace('[','(').replace(']',')').replace('<','_'))
return kraken_total.dropna()
# reading in metaphlan reports
def reading_metaphlan(basepath, metadata, level):
# clade names become column names, filenames the index
metaphlan_total = pd.concat(
map(lambda file:
pd.read_csv(file,
sep='\t',
skiprows=4,
names=('clade_name', 'path', file[:-8], 'add_clades'),
usecols=(0,2),
index_col='clade_name').T,
basepath.split()))
if 'HV1' in metaphlan_total.index:
metaphlan_total.index=metaphlan_total.index.str.replace('V','V-')
if 'MetaHIT-MH0001' in metaphlan_total.index:
metaphlan_total.index=metaphlan_total.index.str.replace('MetaHIT-M','M')
if metadata:
df_metadata = pd.read_csv(metadata, index_col=0)
metaphlan_total = pd.concat([metaphlan_total, df_metadata], axis=1)
metaphlan_total = metaphlan_total.set_index([metaphlan_total.index, 'disease'])
metaphlan_total = metaphlan_total[metaphlan_total.k__Bacteria.notnull()]
if level == 'species':
# filter that only species remain
metaphlan_total = metaphlan_total.filter(like='|s__')
# rename columns for better readability
metaphlan_total = metaphlan_total.rename(columns=lambda x: x.split('|s__')[1])
if level == 'genus':
# filter that only genus remain
metaphlan_total = metaphlan_total.filter(like='|g__')
metaphlan_total = metaphlan_total.drop(columns=metaphlan_total.filter(like='|s__'))
# rename columns for better readability
metaphlan_total = metaphlan_total.rename(columns=lambda x: x.split('|g__')[1])
# rename columns for XGBoost
metaphlan_total = metaphlan_total.rename(columns=lambda x: x.replace('[','(').replace(']',')').replace('<','_'))
return metaphlan_total.fillna(0)
# reading in marker based metaphlan reports
def reading_mpa_marker(basepath, metadata):
# clade names become column names, filenames the index
metaphlan_total = pd.concat(
map(lambda file:
pd.read_csv(file,
sep='\t',
skiprows=4,
names=('marker_name', file[:-11]),
index_col='marker_name').T,
basepath.split()))
if 'HV1' in metaphlan_total.index:
metaphlan_total.index=metaphlan_total.index.str.replace('V','V-')
if 'MetaHIT-MH0001' in metaphlan_total.index:
metaphlan_total.index=metaphlan_total.index.str.replace('MetaHIT-M','M')
if metadata:
df_metadata = pd.read_csv(metadata, index_col=0)
metaphlan_total = | pd.concat([metaphlan_total, df_metadata], axis=1) | pandas.concat |
import copy
import logging
import multiprocessing
import shutil
from multiprocessing import Pool
import traceback
import numpy as np
import matplotlib.pyplot as plt
import yaml
from astroquery.mast import TesscutClass
from lcbuilder.lcbuilder_class import LcBuilder
from lightkurve import TessLightCurve, KeplerLightCurve
from argparse import ArgumentParser
import sys
import pandas as pd
import os
import ast
import triceratops.triceratops as tr
from watson.watson import Watson
class Validator:
"""
This class intends to provide a statistical validation tool for SHERLOCK Candidates.
"""
def __init__(self, object_dir, validation_dir):
self.object_dir = os.getcwd() if object_dir is None else object_dir
self.data_dir = validation_dir
def validate(self, candidate, star, cpus, contrast_curve_file, bins=100, scenarios=5, sigma_mode="flux_err"):
"""
@param candidate: a candidate dataframe containing TICID, period, duration, t0, transits, depth, rp_rs, number,
curve and sectors data.
@param star: the star dataframe.
@param cpus: the number of cpus to be used.
@param contrast_curve_file: the auxiliary contrast curve file to give more information to the validation engine.
@param bins: the number of bins to resize the light curve
@param scenarios: the number of scenarios to compute the validation and get the average
@param sigma_mode: whether to compute the sigma for the validation from the 'flux_err' or the 'binning'.
"""
object_id = candidate["id"]
period = candidate.loc[candidate['id'] == object_id]['period'].iloc[0]
duration = candidate.loc[candidate['id'] == object_id]['duration'].iloc[0]
t0 = candidate.loc[candidate['id'] == object_id]['t0'].iloc[0]
transit_depth = candidate.loc[candidate['id'] == object_id]['depth'].iloc[0]
run = int(candidate.loc[candidate['id'] == object_id]['number'].iloc[0])
curve = int(candidate.loc[candidate['id'] == object_id]['curve'].iloc[0])
rp_rstar = candidate.loc[candidate['id'] == object_id]['rp_rs'].iloc[0]
a_rstar = candidate.loc[candidate['id'] == object_id]['a'].iloc[0] / star["R_star"]
logging.info("------------------")
logging.info("Candidate info")
logging.info("------------------")
logging.info("Period (d): %.2f", period)
logging.info("Epoch (d): %.2f", t0)
logging.info("Duration (min): %.2f", duration)
logging.info("Depth (ppt): %.2f", transit_depth)
logging.info("Run: %.0f", run)
logging.info("Detrend curve: %.0f", curve)
logging.info("Contrast curve file %s", contrast_curve_file)
lc_file = "/" + str(run) + "/lc_" + str(curve) + ".csv"
lc_file = self.data_dir + lc_file
try:
sectors_in = ast.literal_eval(str((candidate.loc[candidate['id'] == object_id]['sectors']).values[0]))
if (type(sectors_in) == int) or (type(sectors_in) == float):
sectors = [sectors_in]
else:
sectors = list(sectors_in)
except:
sectors = [0]
self.data_dir = validation_dir
object_id = object_id.iloc[0]
try:
Validator.execute_triceratops(cpus, validation_dir, object_id, sectors, lc_file, transit_depth,
period, t0, duration, rp_rstar, a_rstar, bins, scenarios, sigma_mode,
contrast_curve_file)
except Exception as e:
traceback.print_exc()
# try:
# self.execute_vespa(cpus, validation_dir, object_id, sectors, lc_file, transit_depth, period, t0, duration, rprs)
# except Exception as e:
# traceback.print_exc()
@staticmethod
def execute_triceratops(cpus, indir, object_id, sectors, lc_file, transit_depth, period, t0,
transit_duration, rp_rstar, a_rstar, bins, scenarios, sigma_mode, contrast_curve_file):
""" Calculates probabilities of the signal being caused by any of the following astrophysical sources:
TP No unresolved companion. Transiting planet with Porb around target star. (i, Rp)
EB No unresolved companion. Eclipsing binary with Porb around target star. (i, qshort)
EBx2P No unresolved companion. Eclipsing binary with 2 × Porb around target star. (i, qshort)
PTP Unresolved bound companion. Transiting planet with Porb around primary star. (i, Rp, qlong)
PEB Unresolved bound companion. Eclipsing binary with Porb around primary star. (i, qshort, qlong)
PEBx2P Unresolved bound companion. Eclipsing binary with 2 × Porb around primary star. (i, qshort, qlong)
STP Unresolved bound companion. Transiting planet with Porb around secondary star. (i, Rp, qlong)
SEB Unresolved bound companion. Eclipsing binary with Porb around secondary star. (i, qshort, qlong)
SEBx2P Unresolved bound companion. Eclipsing binary with 2 × Porb around secondary star. (i, qshort, qlong)
DTP Unresolved background star. Transiting planet with Porb around target star. (i, Rp, simulated star)
DEB Unresolved background star. Eclipsing binary with Porb around target star. (i, qshort, simulated star)
DEBx2P Unresolved background star. Eclipsing binary with 2 × Porb around target star. (i, qshort, simulated star)
BTP Unresolved background star. Transiting planet with Porb around background star. (i, Rp, simulated star)
BEB Unresolved background star. Eclipsing binary with Porb around background star. (i, qshort, simulated star)
BEBx2P Unresolved background star. Eclipsing binary with 2 × Porb around background star. (i, qshort, simulated star)
NTP No unresolved companion. Transiting planet with Porb around nearby star. (i, Rp)
NEB No unresolved companion. Eclipsing binary with Porb around nearby star. (i, qshort)
NEBx2P No unresolved companion. Eclipsing binary with 2 × Porb around nearby star. (i, qshort)
FPP = 1 - (TP + PTP + DTP)
NFPP = NTP + NEB + NEBx2P
Giacalone & Dressing (2020) define validated planets as TOIs with NFPP < 10−3 and FPP < 0.015 (or FPP ≤ 0.01,
when rounding to the nearest percent)
@param cpus: number of cpus to be used
@param indir: root directory to store the results
@param id_int: the object id for which the analysis will be run
@param sectors: the sectors of the tic
@param lc_file: the light curve source file
@param transit_depth: the depth of the transit signal (ppts)
@param period: the period of the transit signal /days)
@param t0: the t0 of the transit signal (days)
@param transit_duration: the duration of the transit signal (minutes)
@param rp_rstar: radius of planet divided by radius of star
@param a_rstar: semimajor axis divided by radius of star
@param bins: the number of bins to average the folded curve
@param scenarios: the number of scenarios to validate
@param sigma_mode: the way to calculate the sigma for the validation ['flux_err' | 'binning']
@param contrast_curve_file: the auxiliary contrast curve file to give more information to the validation engine.
"""
save_dir = indir + "/triceratops"
if os.path.exists(save_dir):
shutil.rmtree(save_dir, ignore_errors=True)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
duration = transit_duration / 60 / 24
logging.info("----------------------")
logging.info("Validation procedures")
logging.info("----------------------")
logging.info("Pre-processing sectors")
mission, mission_prefix, id_int = LcBuilder().parse_object_info(object_id)
if mission == "TESS":
sectors = np.array(sectors)
sectors_cut = TesscutClass().get_sectors("TIC " + str(id_int))
sectors_cut = np.array([sector_row["sector"] for sector_row in sectors_cut])
if len(sectors) != len(sectors_cut):
logging.warning("WARN: Some sectors were not found in TESSCUT")
logging.warning("WARN: Sherlock sectors were: " + str(sectors))
logging.warning("WARN: TESSCUT sectors were: " + str(sectors_cut))
sectors = np.intersect1d(sectors, sectors_cut)
if len(sectors) == 0:
logging.warning("There are no available sectors to be validated, skipping TRICERATOPS.")
return save_dir, None, None
logging.info("Will execute validation for sectors: " + str(sectors))
logging.info("Acquiring triceratops target")
target = tr.target(ID=id_int, mission=mission, sectors=sectors)
# TODO allow user input apertures
logging.info("Reading apertures from directory")
apertures = yaml.load(open(object_dir + "/apertures.yaml"), yaml.SafeLoader)
apertures = apertures["sectors"]
valid_apertures = {}
for sector, aperture in apertures.items():
if sector in sectors:
valid_apertures[sector] = aperture
target.plot_field(save=True, fname=save_dir + "/field_S" + str(sector), sector=sector,
ap_pixels=aperture)
apertures = np.array([aperture for sector, aperture in apertures.items()])
valid_apertures = np.array([aperture for sector, aperture in valid_apertures.items()])
depth = transit_depth / 1000
if contrast_curve_file is not None:
logging.info("Reading contrast curve %s", contrast_curve_file)
plt.clf()
cc = | pd.read_csv(contrast_curve_file, header=None) | pandas.read_csv |
import asyncio
import logging
import pandas as pd
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
from portfolio.portfolio import Portfolio
from clients.websocket_base import WebsocketBase
from clients.api_client_base import APIClientBase
from execution.base_execution_engine import BaseExecutionEngine
from core.instrument import Instrument
from core.events import Event, EventType, BarEvent, TradeExecutedEvent
rootLogger = logging.getLogger()
class BarStrategyBase(ABC):
def __init__(self, config: Dict, strategy_name: str):
super().__init__()
# General settings
self.strategy_name: str = strategy_name
self._instruments: List[Instrument] = config['instruments']
self._strategy_params: Dict = config['strategy_params']
self._trading_volume: float = config['trading_volume'] # Total value to trade volume in USD
# API keys and initialization of exchange clients
self._api_keys: Dict = config['exchange']['api_keys']
if 'subaccount' in config['exchange'].keys():
self._api_client: APIClientBase = config['exchange']['api_client'](api_keys=self._api_keys, subaccount=config['exchange']['subaccount'])
else:
self._api_client: APIClientBase = config['exchange']['api_client'](api_keys=self._api_keys)
self._websocket_client: WebsocketBase = config['exchange']['websocket_client'](
api_keys=self._api_keys,
subaccount=config['exchange']['subaccount']
)
# Initialize portfolio manager and execution engine
self._portfolio_manager: Portfolio = config['portfolio_manager'](
self._instruments,
save_path=config['position_save_path']
)
self._execution_engine: BaseExecutionEngine = config['execution_engine'](save_path=config['execution_save_path'])
self._execution_engine.set_api_client(self._api_client)
self._execution_engine.set_ws_client(self._websocket_client)
# Initialize class_variables to store price data
self._price_dfs: Dict[str, pd.DataFrame] = {}
self._price_df_rolled: Dict[str, bool] = {instrument.name: False for instrument in self._instruments}
self._last_roll_ts: Optional[pd.Timestamp] = None
async def start(self) -> None:
asyncio.create_task(self._websocket_client.start())
while not self._websocket_client.is_running:
await asyncio.sleep(0.1)
asyncio.create_task(self._execution_engine.start())
await self._subscribe_data_streams()
async def close(self):
await self._execution_engine.close()
await self._websocket_client.close()
async def _subscribe_data_streams(self) -> None:
await asyncio.gather(
*(
self._websocket_client.subscribe_bars(
instrument=instrument,
consumer=self,
freq=self._strategy_params['bar_freq']
)
for instrument in self._instruments
)
)
def _get_historical_price_data(self) -> None:
raise NotImplementedError('Loading of historical price data is not supported yet.')
def handle_event(self, event: Event) -> None:
try:
if isinstance(event, BarEvent):
self._handle_bar_update(event)
elif isinstance(event, TradeExecutedEvent):
self._handle_execution(event)
else:
raise ValueError(f'Received event with unknown key in {self.strategy_name}-strategy.')
except Exception as e:
rootLogger.error(f'Error in handle_event method of {self.strategy_name}-strategy: {e}')
def _handle_bar_update(self, event: BarEvent) -> None:
self._update_price_dfs(event)
if any(self._price_df_rolled.values()) and self._do_rebalance(event):
self._last_roll_ts = pd.Timestamp(event.data.timestamp, unit='s', tz='utc')
self._price_df_rolled = {instrument.name: False for instrument in self._instruments}
rootLogger.info("Candle rolled: {}".format(self._last_roll_ts))
# Get last common timestamp of all price dataframes
last_ts = min([v.index[-1] for v in self._price_dfs.values()])
price_dfs = {k: v.loc[:last_ts] for k, v in self._price_dfs.items()}
# Calculate position
target_position = self._calculate_target_position(price_dfs)
target_position = (target_position * self._trading_volume)
self._place_trades(target_position, price_dfs)
def _place_trades(self, target_position: pd.Series, price_dfs: Dict[str, pd.DataFrame]) -> None:
last_prices = pd.Series({k: price_dfs[k]['close'].iloc[-1] for k in target_position.keys()})
position_deltas = target_position / last_prices - self._portfolio_manager.get_current_position()
rootLogger.info('Target position {}'.format(target_position))
rootLogger.info('Initiating execution of position deltas: {}'.format(position_deltas))
for instrument in self._instruments:
position_delta = position_deltas.loc[instrument.name]
if position_delta != 0:
asyncio.create_task(
self._execution_engine.execute_trade(
instrument=instrument,
size=position_delta,
exec_callback=self.handle_event)
)
def _handle_execution(self, event: TradeExecutedEvent):
self._portfolio_manager.handle_execution(event)
def _do_rebalance(self, event: BarEvent) -> bool:
if self._last_roll_ts is None:
return True
# Only rebalance if difference between current event timestamp and last roll timestamp is larger than bar_freq
ts_delta = pd.Timestamp(event.data.timestamp, unit="s", tz="utc") - self._last_roll_ts
return ts_delta >= pd.Timedelta(self._strategy_params['bar_freq'])
def _update_price_dfs(self, bar_event: BarEvent) -> None:
bar = bar_event.data
# get data-frame for a given symbol (or create it if it is not there yet)
price_df = self._price_dfs.get(bar.instrument.name, pd.DataFrame(columns=['open', 'high', 'low', 'close', 'volume']))
# create index for the new row
candle_ts = | pd.Timestamp(bar.timestamp, unit="s", tz="utc") | pandas.Timestamp |
# thought for the paper and the high frequency/other variables validation we
# could limit ourselves (at least until we see what happens with this first
# exploration) to look at one stitched output for a model for which the
# emulation is very good, and one for a model for which the emulation shows
# big jumps. For these two cases I would like to get all the gridded output
# (but monthly only) TAS/PR and SLP.
# I think I would need both emulated and real stuff to compare. In the case of
# the bad one we only have one stitched result; in the case of the good one
# we have two, and either would do.
# #############################################################################
# General setup
# #############################################################################
# Import packages
import pandas as pd
import numpy as np
import stitches as stitches
import pkg_resources
import os
from pathlib import Path
# pd.set_option('display.max_columns', None)
# OUTPUT_DIR = pkg_resources.resource_filename('stitches', 'data/created_data')
OUTPUT_DIR = '/pic/projects/GCAM/stitches_pic/paper1_outputs'
# #############################################################################
# Experiment setup
# #############################################################################
# experiment parameters
tolerances = [0.075] # np.round(np.arange(0.07, 0.225, 0.005), 3)
Ndraws = 1
error_threshold = 0.1
# pangeo table of ESMs for reference
pangeo_path = pkg_resources.resource_filename('stitches', 'data/pangeo_table.csv')
pangeo_data = pd.read_csv(pangeo_path)
pangeo_data = pangeo_data[((pangeo_data['variable'] == 'tas') | (pangeo_data['variable'] == 'pr') | (pangeo_data['variable'] == 'psl'))
& ((pangeo_data['domain'] == 'Amon') | (pangeo_data['domain'] == 'day')) ].copy()
# Keep only the runs that have data for all vars X all timesteps:
pangeo_good_ensembles =[]
for name, group in pangeo_data.groupby(['model', 'experiment', 'ensemble']):
df = group.drop_duplicates().copy()
if len(df) == 6:
pangeo_good_ensembles.append(df)
del(df)
pangeo_good_ensembles = pd.concat(pangeo_good_ensembles)
pangeo_good_ensembles = pangeo_good_ensembles[['model', 'experiment', 'ensemble']].drop_duplicates().copy()
pangeo_good_ensembles = pangeo_good_ensembles.reset_index(drop=True).copy()
# won't use idealized runs
pangeo_good_ensembles = pangeo_good_ensembles[~((pangeo_good_ensembles['experiment'] == '1pctCO2') |
(pangeo_good_ensembles['experiment'] == 'abrupt-4xCO2')) ].reset_index(drop=True).copy()
esms = ['CMCC-CM2-SR5', 'NorESM2-MM']
# ['ACCESS-CM2', 'ACCESS-ESM1-5', 'AWI-CM-1-1-MR', 'BCC-CSM2-MR',
# 'BCC-ESM1', 'CESM2', 'CESM2-FV2', 'CESM2-WACCM', 'CMCC-CM2-HR4',
# 'CMCC-CM2-SR5', 'CMCC-ESM2', 'CanESM5', 'HadGEM3-GC31-LL',
# 'HadGEM3-GC31-MM', 'IITM-ESM', 'MIROC-ES2L', 'MIROC6',
# 'MPI-ESM-1-2-HAM', 'MPI-ESM1-2-HR', 'MPI-ESM1-2-LR', 'MRI-ESM2-0',
# 'NorESM2-LM', 'NorESM2-MM', 'SAM0-UNICON', 'TaiESM1',
# 'UKESM1-0-LL']
# #############################################################################
# Load full archive and target data
# #############################################################################
# Load the full archive of all staggered windows, which we will be matching on
full_archive_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_archive_data = pd.read_csv(full_archive_path)
# Keep only the entries that appeared in pangeo_good_ensembles:
keys =['model', 'experiment', 'ensemble']
i1 = full_archive_data.set_index(keys).index
i2 = pangeo_good_ensembles.set_index(keys).index
full_archive_data= full_archive_data[i1.isin(i2)].copy()
del(i1)
del(i2)
# Load the original archive without staggered windows, which we will draw
# the target trajectories from for matching
full_target_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_target_data = pd.read_csv(full_target_path)
# Keep only the entries that appeared in pangeo_good_ensembles:
keys =['model', 'experiment', 'ensemble']
i1 = full_target_data.set_index(keys).index
i2 = pangeo_good_ensembles.set_index(keys).index
full_target_data = full_target_data[i1.isin(i2)].copy()
del(i1)
del(i2)
del(keys)
# #############################################################################
# Some helper functions
# #############################################################################
def prep_target_data(target_df):
if not target_df.empty:
grped = target_df.groupby(['experiment', 'variable', 'ensemble', 'model'])
for name, group in grped:
df1 = group.copy()
# if it isn't a complete time series (defined as going to 2099 or 2100),
# remove it from the target data frame:
if max(df1.end_yr) < 2099:
target_df = target_df.loc[(target_df['ensemble'] != df1.ensemble.unique()[0])].copy().reset_index(
drop=True)
del (df1)
del (grped)
target_df = target_df.reset_index(drop=True).copy()
return(target_df)
def get_orig_data(target_df):
if not target_df.empty:
esm_name = target_df.model.unique()[0]
scn_name = target_df.experiment.unique()[0]
full_rawtarget_path = pkg_resources.resource_filename('stitches', ('data/tas-data/' + esm_name + '_tas.csv'))
full_rawtarget_data = | pd.read_csv(full_rawtarget_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
This module contains all the methods required to request the data from
a particular object, obtain it from the ESA NEOCC portal and parse it
to show it properly. The information of the object is shows in the
ESA NEOCC in different tabs that correspond to the different classes
within this module.
* Project: NEOCC portal Python interface
* Property: European Space Agency (ESA)
* Developed by: Elecnor Deimos
* Author: <NAME>
* Issue: 2.1.0
* Date: 01-03-2021
* Purpose: Module which request and parse list data from ESA NEOCC
* Module: tabs.py
* History:
======== =========== =====================================================
Version Date Change History
======== =========== =====================================================
1.0 26-02-2021 Initial version
1.1 26-03-2021 Physical properties and summary funcionalities added
1.2 17-05-2021 Adding *help* property for dataframes.\n
Parsing of diameter property in *summary* and
*physical_properties* has been modified to add
robustness.\n
In *physical_properties* the parsing of properties
has been modified to include cases with more
information.\n
Adding timeout of 90 seconds.
1.3 16-06-2021 URLs and timeout from configuration file for
astroquery implementation.\n
Change time format to datetime ISO format.\n
Change to correct types in attributes (e.g.,
matrices, etc.)\n
Change ephemerides skyfooter to fix bug.\n
Change *get_matrix* from *orbit_properties* for
objects with 2 non-gravitational parameters.
1.3.1 29-06-2021 No changes
1.4.0 29-10-2021 Tab physical_properties has been recoded to parse the
information through a request in the portal instead
of parsing the html.\n
Get URL function now contains the file extension for
physical properties.\n
Parsing of ephemerides has been change to adapt new
format.\n
Orb_type attribute added in tab *orbit_properties*.\n
Bug fix in tab *observations*.\n
Adding redundancy for tab *summary* parsing.
2.0.0 21-01-2022 Prepare module for Astroquery integration
2.1.0 01-03-2022 Remove *parse* dependency
======== =========== =====================================================
© Copyright [European Space Agency][2022]
All rights reserved
"""
import io
import logging
import time
import re
from datetime import datetime, timedelta
import pandas as pd
import requests
from bs4 import BeautifulSoup
from astroquery.esa.neocc import conf
# Import URLs and TIMEOUT
API_URL = conf.API_URL
EPHEM_URL = conf.EPHEM_URL
SUMMARY_URL = conf.SUMMARY_URL
TIMEOUT = conf.TIMEOUT
VERIFICATION = conf.SSL_CERT_VERIFICATION
def get_object_url(name, tab, **kwargs):
"""Get url from requested object and tab name.
Parameters
----------
name : str
Name of the requested object.
tab : str
Name of the request tab. Valid names are: *summary,
orbit_properties, physical_properties, observations,
ephemerides, close_approaches and impacts*.
**kwargs : str
orbit_properties and ephemerides tabs required additional
arguments to work:
* *orbit_properties*: the required additional arguments are:
* *orbital_elements* : str (keplerian or equinoctial)
* *orbit_epoch* : str (present or middle)
* *ephemerides*: the required additional arguments are:
* *observatory* : str (observatory code, e.g. '500', 'J04', etc.)
* *start* : str (start date in YYYY-MM-DD HH:MM)
* *stop* : str (end date in YYYY-MM-DD HH:MM)
* *step* : str (time step, e.g. '2', '15', etc.)
* *step_unit* : str (e.g. 'days', 'minutes', etc.)
Returns
-------
url : string
Final url from which data is requested.
Raises
------
KeyError
If the requested tab is not in the dictionary.
ValueError
If the elements requested are not valid.
"""
# Define the parameters of each list
tab_dict = {"impacts": '.risk',
"close_approaches": '.clolin',
"physical_properties" : '.phypro',
"observations": '.rwo',
"orbit_properties": ['.ke0', '.ke1', '.eq0', '.eq1']}
# Raise error is input is not in dictionary
if tab not in tab_dict:
raise KeyError('Valid list names are impacts, close_approaches'
' observations and orbit_properties')
# Check if orbital_elements is an input
if 'orbital_elements' in kwargs:
# Check if the elements are Keplerian or Equinoctial
if kwargs['orbital_elements'] == "keplerian":
#Check if the epoch is present day or middle obs. arch
if kwargs['orbit_epoch'] == "present":
url = str(name).replace(' ', '%20') + tab_dict[tab][1]
elif kwargs['orbit_epoch'] == "middle":
url = str(name).replace(' ', '%20') + tab_dict[tab][0]
elif kwargs['orbital_elements'] == "equinoctial":
if kwargs['orbit_epoch'] == "present":
url = str(name).replace(' ', '%20') + tab_dict[tab][3]
elif kwargs['orbit_epoch'] == "middle":
url = str(name).replace(' ', '%20') + tab_dict[tab][2]
else:
raise ValueError('The introduced file type does not exist.'
'Check that orbit elements (keplerian or '
'equinoctial) and orbit epoch (present or '
'middle).')
else:
url = str(name).replace(' ', '%20') + tab_dict[tab]
return url
def get_object_data(url):
"""Get object in byte format from requested url.
Parameters
----------
url : str
URL of the requested data.
Returns
-------
data_obj : object
Object in byte format.
"""
# Get data from URL
data_obj = requests.get(API_URL + url, timeout=TIMEOUT,
verify=VERIFICATION).content
# Parse data and assign attributes to object
return data_obj
def get_indexes(dfobj, value):
"""Get a list with location index of a value or string in the
DataFrame requested.
Parameters
----------
dfobj : pandas.DataFrame
Data frame where the value will be searched.
value : str, int, float
String, integer or float to be searched.
Returns
-------
listofpos : list
List which contains the location of the value in the Data
frame. The first elements will correspond to the index and
the second element to the columns
"""
# Empty list
listofpos = []
# isin() method will return a dataframe with boolean values,
# True at the positions where element exists
result = dfobj.isin([value])
# any() method will return a boolean series
seriesobj = result.any()
# Get list of column names where element exists
columnnames = list(seriesobj[seriesobj].index)
# Iterate over the list of columns and extract the row index
# where element exists
for col in columnnames:
rows = list(result[col][result[col]].index)
for row in rows:
listofpos.append((row, col))
return listofpos
class Impacts:
"""This class contains information of object possible impacts.
Attributes
---------
impacts : pandas.DataFrame
Data frame where are listed all the possible impactors.
arc_start : str
Starting date for optical observations.
arc_end : str
End date for optical observations.
observation_accepted : int
Total number of observations subtracting rejected
observations.
observation_rejected : int
Number of observations rejected.
computation : str
Date of computation (in format YYYYMMDD MJD TimeSys)
info : str
Information from the footer of the requested file.
additional_note : str
Additional information. Some objects (e.g. 99942 Apophis)
have an additional note after the main footer.
"""
def __init__(self):
"""Initialization of class attributes
"""
self.impacts = []
self.arc_start = []
self.arc_end = []
self.observation_accepted = []
self.observation_rejected = []
self.computation = []
self.info = []
self.additional_note = []
@staticmethod
def _get_footer(data_obj):
"""Get footer information for impacts content.
Parameters
----------
data_obj : object
Object in byte format.
Returns
-------
obs : list
Number of observations (total and rejected).
arc : list
Start and end dates.
comp : str
Computation date.
info : str
Additional information.
add_note : str
Addition note.
"""
# Decode data using UTF-8 and store in new space of memory
df_txt_d = io.StringIO(data_obj.decode('utf-8'))
# Read data as txt
df_txt = pd.read_fwf(df_txt_d, header=None)
# Check that there is not additonal note
index = get_indexes(df_txt, '<p> </p>')
# Assign the index for obtaining the rest of attributes and
# additional note value
if not index:
j = 0
add_note = 'There is no additional note for this object'
else:
j = 6
index = index[0][0]
add_note = df_txt.iloc[index+1, 0] + '\n' +\
df_txt.iloc[index+2, 0] + '\n' +\
df_txt.iloc[index+3, 0] + '\n' +\
df_txt.iloc[index+4, 0] + '\n' +\
df_txt.iloc[index+5, 0]
# Remove unnecessary words
add_note = add_note.replace('<p>','').replace('</p>','').\
replace('<span style="color: #0000CD;"><strong>','').\
replace('</strong></span>','').replace('<sup>','^').\
replace('</sup>','')
# Drop NaN values if necessary
df_txt = df_txt.dropna(how='all')
# Template for observations data:
# Based on {total} optical observations (of which {rejected}
# are rejected as outliers)
obs_total = df_txt.iloc[-7-j][0].split('on ')[1].\
split('optical')[0].strip()
obs_rejected = df_txt.iloc[-7-j][0].split('which ')[1].\
split('are')[0].strip()
obs = [obs_total, obs_rejected]
# Template for date of observations: from {start} to {end}.
arc_start = df_txt.iloc[-6-j][0].split('from ')[1].\
split('to ')[0].strip()
arc_end = df_txt.iloc[-6-j][0].split('to ')[1].\
split('.')[0] + '.' + df_txt.iloc[-6-j][0].\
split('to ')[1].split('.')[1]
arc = [arc_start, arc_end]
# Computation date
comp = df_txt.iloc[-1-j][0].split('=')[2].strip()
# Get information text
info = df_txt.iloc[-5-j][0] + '\n\n' + df_txt.iloc[-4-j][0] +\
'\n' + df_txt.iloc[-3-j][0] + '\n\n' + df_txt.iloc[-2-j][0]
return obs, arc, comp, info, add_note
def _impacts_parser(self, data_obj):
"""Parse and arrange the possible impacts data
Parameters
----------
data_obj : object
Object in byte format.
Raises
------
ValueError
If there is not risk file available for requested
object
"""
# Check that there is not additonal note
df_check_d = io.StringIO(data_obj.decode('utf-8'))
# Read as txt file
df_check = pd.read_fwf(df_check_d, engine='python')
index = get_indexes(df_check, '<p> </p>')
# Assign the skipfooter if there is or not additional note
if not index:
footer_num = 12
else:
footer_num = 21
# Decode data using UTF-8 and store in memory
df_impacts_d = io.StringIO(data_obj.decode('utf-8'))
# Read data as csv
df_impacts = pd.read_csv(df_impacts_d, skiprows=[0, 2, 3, 4],
skipfooter=footer_num,
delim_whitespace=True, engine='python')
# Check if there are information for the object
if len(df_impacts.index) == 0:
logging.warning('Required risk file is not '
'available for this object')
raise ValueError('Required risk file is not '
'available for this object')
# The previous skipfooter allow strange cases to show proper
# impacts table. For the rest of the cases an additional row
# must be dropped
if df_impacts.iloc[-1,0] == 'Based':
# Drop last row
df_impacts = df_impacts.iloc[:-1]
# Reassign numeric types to columns
df_impacts['MJD'] = pd.to_numeric(df_impacts['MJD'])
df_impacts['sigimp'] = pd.to_numeric(df_impacts['sigimp'])
df_impacts['dist'] = pd.to_numeric(df_impacts['dist'])
df_impacts['width'] = pd.to_numeric(df_impacts['width'])
df_impacts['p_RE'] = pd.to_numeric(df_impacts['p_RE'])
df_impacts['exp.'] = pd.to_numeric(df_impacts['exp.'])
df_impacts['en.'] = | pd.to_numeric(df_impacts['en.']) | pandas.to_numeric |
# Test script for Uncertainty Calibration in presence of common corruptions
# Evaluate calibration on clean and corrupted data
#
# Last updated: Dec 30 2021
import sys
import numpy as np
import torch
from torchvision import datasets, transforms
from DataLoad import *
from DiGN import DiGN
args = sys.argv[1:]
dataset = args[0]
architecture = args[1]
batch_size = int(args[2])
ensemble_eval = (args[3]=="True")
train_alg = args[4]
eval_noise = (args[5]=="True")
print('Dataset: %s | Architecture: %s | Batch size: %d' % (dataset, architecture, batch_size))
print('Ensemble_eval: %s | Train alg: %s' % (ensemble_eval, train_alg))
print('Evaluate noise only: %s' % (eval_noise))
if dataset in ['cifar10','cifar100']:
ensemble_stddev = 0.1 # CIFAR10/100
else:
ensemble_stddev = 0.3 # Tiny-ImageNet
if dataset=='cifar10':
data_path = './cifar10'
n_classes = 10
get_loaders = get_loaders_cifar10
corrupt_path = './CIFAR-10-C/'
elif dataset=='cifar100':
data_path = './cifar100'
n_classes = 100
get_loaders = get_loaders_cifar100
corrupt_path = './CIFAR-100-C/'
elif dataset=='tinyimagenet':
data_path = './tiny-imagenet-200'
n_classes = 200
get_loaders = get_loaders_tinyimagenet
corrupt_path = './Tiny-ImageNet-C/'
else:
raise ValueError('dataset not supported.')
if architecture=='resnet18':
arch = 'RN18'
elif architecture=='resnet18wide':
arch = 'WRN18'
elif architecture=='resnet18_64':
arch = 'RN18_64'
elif architecture=='resnet18wide_64':
arch = 'RN18W_64'
elif architecture=='densenet121':
arch = 'DN121'
elif architecture=='inceptionv3':
arch = 'IncV3'
else:
raise ValueError('architecture not supported.')
# data loader for training, eval
train_loader, val_loader = get_loaders(data_path=data_path,
batch_size_train=batch_size, batch_size_val=batch_size, num_workers=4)
print('# train batches = ', len(train_loader), ', # val batches = ', len(val_loader))
# architecture
dign = DiGN(architecture, n_classes=n_classes, dataset=dataset)
# number of runs
M = 3
# ======== Auxiliary Functions ===========
def get_corrupt_loader_cifar(corruption_path_base):
labels = np.load(corruption_path_base+'labels.npy')
if eval_noise:
corruption_list=['speckle_noise','impulse_noise','shot_noise']
else:
corruption_list=['saturate','spatter','gaussian_blur','speckle_noise','jpeg_compression','pixelate','elastic_transform','contrast','brightness','fog','frost','snow','zoom_blur','motion_blur','glass_blur','defocus_blur','impulse_noise','shot_noise'] #,'gaussian_noise']
corruption_list.sort()
x_all = np.zeros((50000*len(corruption_list),3,32,32))
labels_all = np.zeros((50000*len(corruption_list)))
start = 0
for i in range(len(corruption_list)):
x_corruption_i = np.load(corruption_path_base+corruption_list[i]+'.npy')
x_corruption_i = np.moveaxis(x_corruption_i, 3, 1)
x_all[start:start+50000] = x_corruption_i
labels_all[start:start+50000] = labels
start += 50000
corrupt_loader = get_loader_from_numpy(x_all, labels_all, batch_size=500)
return corrupt_loader
def get_corrupt_loader_tinyimagenet(corruption_path_base):
# 14 corruptions
if eval_noise:
corruption_list=['impulse_noise','shot_noise']
else:
corruption_list=['brightness','contrast','defocus_blur','elastic_transform','fog','frost','glass_blur','impulse_noise','jpeg_compression','motion_blur','pixelate','shot_noise','snow','zoom_blur'] #,'gaussian_noise']
corruption_list.sort()
# construct list of datasets
Datasets = []
for i in range(len(corruption_list)):
corruption = corruption_list[i]
for j in range(5):
path = corruption_path_base+'/'+corruption+'/'+str(j+1)+'/'
dataset = datasets.ImageFolder(path, transform=TEST_TRANSFORMS_DEFAULT(64))
Datasets.append(dataset)
# concatenate datasets
# from: https://discuss.pytorch.org/t/how-does-concatdataset-work/60083/2
all_datasets = torch.utils.data.ConcatDataset(Datasets)
# all_datasets = torch.utils.data.ConcatDataset([d for d in Datasets])
# construct dataloader for all corruptions, levels
corrupt_loader = torch.utils.data.DataLoader(all_datasets, batch_size=500, shuffle=False)
return corrupt_loader
# Measure how well prediction scores match the actual likelihood of a correct prediction
def calibr_metrics(p_pred, y_true, thres=0.5):
y_true = y_true.astype('int')
n = p_pred.shape[0]
Delta = 0.0666 # bin resolution
nbins = np.ceil(1/Delta).astype('int')
p_range = np.linspace(0,1,nbins+1)
# compute max-prob scores
p_max = np.max(p_pred, axis=1)
y_pred = np.argmax(p_pred, axis=1)
# for each bin, compute accuracy and confidence
acc = []
conf = []
ECE = 0.0 # expected calibration error (ECE)
RMSE = 0.0 # RMS calibration error (RMSE)
OE = 0.0 # overconfidence error (OE)
for m in range(nbins):
p_low, p_high = p_range[m], p_range[m+1]
idx_m = np.where((p_max>p_low) & (p_max<=p_high))[0]
card_Bm = len(idx_m)
if card_Bm>0:
conf_m = np.mean(p_max[idx_m])
acc_m = np.sum(y_true[idx_m]==y_pred[idx_m])/card_Bm
acc.append(acc_m)
conf.append(conf_m)
ECE += card_Bm/n*np.abs(acc_m-conf_m)
RMSE += card_Bm/n*((acc_m-conf_m)**2)
OE += card_Bm/n*conf_m*np.max([conf_m-acc_m,0])
conf = np.array(conf).reshape((len(conf),1))
acc = np.array(acc).reshape((len(acc),1))
RMSE = np.sqrt(RMSE)
return ECE, RMSE, OE, acc, conf
def aggregate_cal_results(dataset, arch, train_alg):
import pandas as pd
# Load results
ece1, rmse1, oe1, acc1, conf1, ece_cor1, rmse_cor1, oe_cor1, acc_cor1, conf_cor1 = np.load('./results/cal_'+arch+'_'+dataset+'_'+train_alg+'_run1.npy', allow_pickle=True)
ece2, rmse2, oe2, acc2, conf2, ece_cor2, rmse_cor2, oe_cor2, acc_cor2, conf_cor2 = np.load('./results/cal_'+arch+'_'+dataset+'_'+train_alg+'_run2.npy', allow_pickle=True)
ece3, rmse3, oe3, acc3, conf3, ece_cor3, rmse_cor3, oe_cor3, acc_cor3, conf_cor3 = np.load('./results/cal_'+arch+'_'+dataset+'_'+train_alg+'_run3.npy', allow_pickle=True)
# Average RMSE calibration errors
mean_clean_rmse, std_clean_rmse = np.mean([rmse1,rmse2,rmse3]), np.std([rmse1,rmse2,rmse3])
mean_corr_rmse, std_corr_rmse = np.mean([rmse_cor1,rmse_cor2,rmse_cor3]), np.std([rmse_cor1,rmse_cor2,rmse_cor3])
# [Metric] [Mean] [Std]
metrics = ['Clean RMSE', 'Corrupt RMSE']
mean_array = [mean_clean_rmse, mean_corr_rmse]
std_array = [std_clean_rmse, std_corr_rmse]
data = {
'Metric':metrics,
'Mean':list(100*np.array(mean_array)),
'Std':list(100*np.array(std_array))
}
results_df = | pd.DataFrame(data) | pandas.DataFrame |
import argparse
import plotly
import os
import numpy as np
import sys
import pandas as pd
import pickle
import time
import pyodbc
from pathlib import Path
from typing import List
import matplotlib.pyplot as plt
"""
Notes:
URM : under-represented minority = American Indian or Alaska Native total + Black or African American total +
Hispanic or Latino total + Native Hawaiian or Other Pacific Islander total
URM does not include Asian total or Race/ethnicity unknown total
degree_level_codes: { 1: less than 1 academic year, 2: at least 1 but less than 4 academic years,
3: Associate's degree, 5: Bachelor's degree, 7: Master's degree, 9: Doctor's degree,
10: Postbaccalaureate or Post-master's certificate }
"""
CIP_CODES_OF_INTEREST = {
'13.1311': 'Mathematics Teacher Education',
'27.0101': 'Mathematics, General',
'27.0199': 'Mathematics, Other',
'27.0301': 'Applied Mathematics, General',
'27.0303': 'Computational Mathematics',
'27.0304': 'Computational And Applied Mathematics',
'27.0305': 'Financial Mathematics',
'27.0306': 'Mathematical Biology',
'27.0399': 'Applied Mathematics, Other',
'27.0502': 'Mathematical Statistics And Probability',
'27.0503': 'Mathematics And Statistics',
'27.9999': 'Mathematics And Statistics, Other',
'30.0801': 'Mathematics And Computer Science',
'40.0810': 'Theoretical And Mathematical Physics'
}
CIP_ED = {'13.1311': 'Mathematics Teacher Education'}
CIP_GEN = {'27.0101': 'Mathematics, General'}
CIP_OTHER = {'27.0199': 'Mathematics, Other'}
CIP_APP_MATH = {'27.0301': 'Applied Mathematics, General',
'27.0303': 'Computational Mathematics',
'27.0304': 'Computational And Applied Mathematics',
'27.0305': 'Financial Mathematics',
'27.0306': 'Mathematical Biology',
'27.0399': 'Applied Mathematics, Other'}
CIP_MATH_STAT = {'27.0502': 'Mathematical Statistics And Probability',
'27.0503': 'Mathematics And Statistics',
'27.9999': 'Mathematics And Statistics, Other'}
CIP_MATH_CS = {'30.0801': 'Mathematics And Computer Science',}
CIP_MATH_PHYS = {'40.0810': 'Theoretical And Mathematical Physics'}
# Bureau of Economic Analysis (BEA) regions CODES
OBEREG = {
0: 'U.S. Service schools',
1: 'New England (CT, ME, MA, NH, RI, VT)',
2: 'Mid East (DE, DC, MD, NJ, NY, PA)',
3: 'Great Lakes (IL, IN, MI, OH, WI)',
4: 'Plains (IA, KS, MN, MO, NE, ND, SD)',
5: 'Southeast (AL, AR, FL, GA, KY, LA, MS, NC, SC, TN, VA, WV)',
6: 'Southwest (AZ, NM, OK, TX)',
7: 'Rocky Mountains (CO, ID, MT, UT, WY)',
8: 'Far West (AK, CA, HI, NV, OR, WA)',
9: 'Other U.S. jurisdictions (AS, FM, GU, MH, MP, PR, PW, VI)'
}
def parse_args(args: List[str]) -> argparse.Namespace:
# Argument Parser
parser = argparse.ArgumentParser()
parser.add_argument('--gender', default='all', type=str, help='What genders to include', choices=['all', 'male', 'female'])
parser.add_argument('--degree', default='all', nargs='+', type=str, help='Degrees to include')
parser.add_argument('--start_year', default='2019', type=int, help="First year in analysis range")
parser.add_argument('--end_year', default='2019', type=int, help="Last year in analysis range")
parser.add_argument('--min_awards_granted', default=0, type=int, help='Minimum number of degrees granted by institution per year')
parser.add_argument('--citizenship', default='US', type=str, help="Citizenships to consider", choices=['US', 'non-resident', 'all'])
parser.add_argument('--degree_cip', default='all', type=str, help='Concentration-type of degree granted')
parser.add_argument('--university_type', default='all', type=str, help='University type', choices = ['all','public','private not-for-profit', 'private for-profit'])
parser.add_argument('--uni_degree_type', default='any', type=str, help='Highest degree granted by university', choices=['PhD', 'MS', 'BS', 'A', 'any'])
parser.add_argument('--geo_region_max', default='all', type=str, help='Largest geographic region of the US', choices=['all', 'states', 'contiguous_48'])
parser.add_argument('--bea_region', default='all', nargs='+',
type=str, help='BEA-designated regions of the 50 states ',
choices=['all', 'New England', 'Mid East', 'Great Lakes', 'Plains',
'Southeast', 'Southwest','Rocky Mountains', 'Far West', 'Other U.S. jurisdictions'])
parser.add_argument('--state', default='', type=str, nargs='+', help='Individual state(s) to investigate')
return parser.parse_args(args[1:])
#Line plot. Probably not using at all as there is a plotly specific to react
def visualize_line_time_series(data_frame, y_values, y_label, time_label):
#line plot visualization
data_tuples = list(zip(data_frame[time_label],y_values))
frame = | pd.DataFrame(data_tuples, columns=['Date',y_label]) | pandas.DataFrame |
from lib import constants
from dataclasses import dataclass
from enum import Enum
from typing import List
from datetime import datetime
import json
import time
import json
import pandas as pd
import numpy as np
from scipy.signal import find_peaks
from scipy.special import expit
class AnomalyType(str, Enum):
ANTENNA_DEGRADATION = "ANTENNA_DEGRADATION"
INTERFERENCE = "INTERFERENCE"
@dataclass
class Anomaly:
lat1: float
lon1: float
lat2: float
lon2: float
type: AnomalyType
severeness: float
distanceOnTrack: int
peak: bool
detectedOn: datetime = datetime.now()
class DataAnalyzer:
@classmethod
def analyzeData(cls) -> None:
"""This function shall analyze the events file and write the found
anomalies to the anomalies file
"""
print("Analysis in progress")
nTimeChunks = 10
distThresh = 30
with open('dataInspection/mean_variance.json', "r") as f:
mean_var = json.load(f)
df = cls._mergeData(veloPath=constants.VELOCITY_FILE, rssiPath=constants.RSSI_FILE, disrPath=constants.DISRUPTIONS_FILE)
df['telDiff'] = df.deltaValidTel - mean_var['tel_mean']
df['rssiDiff'] = df.RSSI - mean_var['RSSI_mean']
meanGroupVals = df.loc[:,['telDiff', 'rssiDiff','TimeChunk', 'posChunk']].groupby(['TimeChunk', 'posChunk']).mean()
meanGroupVals['telDiff'] = meanGroupVals['telDiff'] / np.sqrt(mean_var['tel_var'])
meanGroupVals['rssiDiff'] = meanGroupVals['rssiDiff'] / np.sqrt(mean_var['RSSI_var'])
timeChunks = pd.Series(meanGroupVals.index.get_level_values(0))
timeChunks = timeChunks- timeChunks.max() + nTimeChunks
timeChunks.loc[timeChunks < 0] = 0
meanGroupVals['telDiff'] = (meanGroupVals.telDiff * timeChunks.values).abs()
meanGroupVals['rssiDiff'] = (meanGroupVals.rssiDiff * timeChunks.values).abs()
meanGroupVals = meanGroupVals.groupby(level=1).mean()/nTimeChunks
meanGroupVals.loc[0:1] = 0
# find peaks of anomalies:
telPeaks = find_peaks(meanGroupVals.telDiff.values, height=1, distance=distThresh)
rssiPeaks = find_peaks(meanGroupVals.rssiDiff.values, height=1, distance=distThresh)
# check if telPeak is actually also a rssi peak
peakDuplicate = []
for tp in telPeaks[0]:
if (np.abs(rssiPeaks[0]-tp) < (distThresh/distThresh)).any():
peakDuplicate.append(tp)
meanGroupVals['telDiff'] = expit(meanGroupVals.telDiff)*2 -1
meanGroupVals['rssiDiff'] = expit(meanGroupVals.rssiDiff)*2 -1
anomalies: List[Anomaly] = []
for idx, row in meanGroupVals.iterrows():
dfC = df.loc[(df.posChunk == idx)]
an = Anomaly(
lat1 = dfC.loc[dfC.Position == dfC.Position.min(), 'Latitude'].values[0],
lon1 = dfC.loc[dfC.Position == dfC.Position.min(), 'Longitude'].values[0],
lat2 = dfC.loc[dfC.Position == dfC.Position.max(), 'Latitude'].values[0],
lon2 = dfC.loc[dfC.Position == dfC.Position.max(), 'Longitude'].values[0],
type = AnomalyType.ANTENNA_DEGRADATION,
severeness = np.sum(row.values)/2,
distanceOnTrack = int(dfC.Position.mean()),
peak=False,
)
if (idx in telPeaks[0]) and (idx not in peakDuplicate):
an.type = AnomalyType.INTERFERENCE
an.peak = True
if idx in rssiPeaks[0]:
an.peak = True
anomalies.append(an)
print("Analysis finished")
cls._writeAnomalyFile(anomalies)
@staticmethod
def _writeAnomalyFile(anomalies: List[Anomaly]) -> None:
with constants.ANOMALIES_FILE.open("w") as f:
## Ugly hack to serialize dataclass
f.write(json.dumps({
"anomalies": [{
"lat1": anomaly.lat1,
"lon1": anomaly.lon1,
"lat2": anomaly.lat2,
"lon2": anomaly.lon2,
"type": anomaly.type,
"peak": anomaly.peak,
"severeness": anomaly.severeness,
"distanceOnTrack": anomaly.distanceOnTrack,
"detectedOn": anomaly.detectedOn.isoformat(),
} for anomaly in anomalies]
}))
@staticmethod
def _mergeData(rssiPath, veloPath, disrPath):
rssi = pd.read_csv(rssiPath)
rssi = rssi.loc[:, ['DateTime', 'PositionNoLeap','Latitude', 'Longitude',
'A1_ValidTel', 'A2_ValidTel', 'A2_RSSI']]
rssi.rename(columns={'PositionNoLeap':'Position'}, inplace=True)
# deltas
rssi['RSSI'] = rssi.A2_RSSI
rssi['deltaValidTel'] = (rssi.A1_ValidTel + rssi.A2_ValidTel).diff()
rssi.loc[0, 'deltaValidTel'] = 0
rssi.loc[rssi.deltaValidTel > 11, 'deltaValidTel'] = 5
rssi.loc[rssi.deltaValidTel < 0, 'deltaValidTel'] = 0
rssi.drop(['A2_RSSI', 'A1_ValidTel', 'A2_ValidTel'],
axis='columns',
inplace=True)
# import velocities
velo = | pd.read_csv(veloPath) | pandas.read_csv |
import os
import pandas as pd
from beholder.utils import (
BLogger,
)
LOG = BLogger()
def enqueue_rpu_calculation(
rpu_input_fp: str,
autofluorescence_input_fp: str,
):
rpu_segmentation_output_top_level = os.path.join(rpu_input_fp, 'segmentation_output')
af_segmentation_output_top_level = os.path.join(autofluorescence_input_fp, 'autofluorescence_correlation_value.csv')
top_level_dirs = os.listdir(rpu_segmentation_output_top_level)
top_level_dirs = list(filter(lambda x: len(x) < 10, top_level_dirs))
top_level_dirs = sorted(top_level_dirs, key=lambda x: int(x))
df = pd.DataFrame()
correction_df = | pd.read_csv(af_segmentation_output_top_level) | pandas.read_csv |
# run with python3
# General data manipulation packages
import numpy as np
import pandas as pd
import sqlite3 as sql
# Useful function to split data sets
from sklearn.model_selection import train_test_split
# Random Forest class:
from sklearn.ensemble import RandomForestClassifier
# For pickling results
import pickle
def reformat_as_dataframe(db_filename, labels=True, table="features"):
"""Concatenate features tables from many results.db files."""
con = None
con = sql.connect(db_filename)
cur = con.cursor()
# Gather combined features
cur.execute("SELECT * FROM %s" % table)
rows = cur.fetchall()
# Save in a different format
np_features = np.array([[hash(r[1]), hash(r[2])] + list(r[3:14]) for r in rows])
df_categories = | pd.DataFrame(
np_features[:,0:2], columns=['resname_hash', 'short_name_hash'], dtype=int) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2016-11-16 16:23:55
# @Last Modified by: <NAME>
# @Last Modified time: 2017-01-18 14:30:20
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import glob
import os
| pd.set_option("display.width", None) | pandas.set_option |
import xlsxwriter, io, time
from django.http import HttpResponse, request, response, HttpResponseRedirect
from django.template import RequestContext, context
from django.shortcuts import redirect, render
from .forms import TFCCForm
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import colors
from openpyxl import load_workbook
from .models import tfcc
def OnlyTFCC(request):
page_title = 'TFCC'
print('Počela je TFCC funkcija...')
tfcc_count = tfcc.objects.all().count()
if request.method == "POST":
form = TFCCForm(request.POST, request.FILES)
if form.is_valid():
p_start = time.time()
output = io.BytesIO()
newdoc = request.FILES['file']
wb = load_workbook(newdoc)
if 'Instructions_READ_FIRST' in wb.sheetnames:
wb.remove(wb['Instructions_READ_FIRST'])
print ('Obrisan je sheet "Instructions_READ_FIRST" ukoliko je postojao!')
dfs = pd.read_excel(wb, sheet_name=None, index_col=[0], engine='openpyxl')
writer = | pd.ExcelWriter(output) | pandas.ExcelWriter |
import sys
sys.path.append('.')
import pandas as pd
from trading_tool.db import create_connection, select_query
from trading_tool.binance import get_symbols
from trading_tool.client import CLIENT
def main():
conn = create_connection("trading_tool.db")
bi_symbols = get_symbols(CLIENT)
db_symbols = select_query(conn, table_name="symbols")["symbol"].values.tolist()
symbols_to_load = list(set(s["symbol"] for s in bi_symbols) - set(db_symbols))
df_assets = select_query(conn, table_name="assets")
df_symbols = | pd.DataFrame(bi_symbols) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
import sentry_sdk
from solarforecastarbiter import utils
def _make_aggobs(obsid, ef=pd.Timestamp('20191001T1100Z'),
eu=None, oda=None):
return {
'observation_id': obsid,
'effective_from': ef,
'effective_until': eu,
'observation_deleted_at': oda
}
nindex = pd.date_range(start='20191004T0000Z',
freq='1h', periods=10)
@pytest.fixture()
def ids():
return ['f2844284-ea0a-11e9-a7da-f4939feddd82',
'f3e310ba-ea0a-11e9-a7da-f4939feddd82',
'09ed7cf6-ea0b-11e9-a7da-f4939feddd82',
'0fe9f2ba-ea0b-11e9-a7da-f4939feddd82',
'67ea9200-ea0e-11e9-832b-f4939feddd82']
@pytest.fixture()
def aggobs(ids):
return tuple([
_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191004T0501Z')),
_make_aggobs(ids[2], eu=pd.Timestamp('20191004T0400Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0700Z'),
eu=pd.Timestamp('20191004T0800Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0801Z')),
_make_aggobs(ids[3], oda=pd.Timestamp('20191005T0000Z')),
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191003T0000Z'))
])
def test_compute_aggregate(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_missing_from_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
aggobs = list(aggobs[:-2]) + [
_make_aggobs('09ed7cf6-ea0b-11e9-a7da-f4939fed889')]
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_empty_data(aggobs, ids):
data = {}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:2], nindex)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_missing_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
data[ids[-1]] = pd.DataFrame({'value': [1] * 8, 'quality_flag': [0] * 8},
index=nindex[:-2])
aggobs = list(aggobs[:-2]) + [_make_aggobs(ids[-1])]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series(
[3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, None, None],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_deleted_not_removed(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids}
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_not_removed_yet(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# with last aggobs, would try and get data before effective_until,
# but was deleted, so raise error
aggobs = list(aggobs[:-2]) + [
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191004T0700Z'))]
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_but_removed_before(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# aggobs[-1] properly removed
aggobs = list(aggobs[:-2]) + [aggobs[-1]]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}))
def test_compute_aggregate_mean(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'mean', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([1.0] * 10, index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_no_overlap(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3], 'quality_flag': [2, 10, 338]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0230Z'])),
ids[1]: pd.DataFrame(
{'value': [3, 2, 1], 'quality_flag': [9, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0200Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'median', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, None, 2.5, None],
'quality_flag': [2, 10, 9, 338 | 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_before_effective(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3, 0, 0], 'quality_flag': [2, 10, 338, 0, 0]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z'])),
ids[1]: pd.DataFrame(
{'value': [None, 2.0, 1.0], 'quality_flag': [0, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0201Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'max', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, 3.0, 2.0, 1.0],
'quality_flag': [2, 10, 338, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_bad_cols():
data = {'a': pd.DataFrame([0], index=pd.DatetimeIndex(
['20191001T1200Z']))}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC',
'mean', [_make_aggobs('a')])
def test_compute_aggregate_index_provided(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
the_index = nindex.copy()[::2]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], the_index)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 3.0],
index=the_index),
'quality_flag': pd.Series([0]*5, index=the_index)})
)
@pytest.mark.parametrize('dfindex,missing_idx', [
(pd.date_range(start='20191004T0000Z', freq='1h', periods=11), -1),
(pd.date_range(start='20191003T2300Z', freq='1h', periods=11), 0),
])
def test_compute_aggregate_missing_values_with_index(
aggobs, ids, dfindex, missing_idx):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], dfindex)
assert pd.isnull(agg['value'][missing_idx])
def test_compute_aggregate_partial_missing_values_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
data[ids[2]] = pd.DataFrame({'value': [1] * 5, 'quality_flag': [0] * 5},
index=nindex[5:])
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], nindex)
expected = pd.DataFrame(
{'value': pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}
)
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_obs_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
aggobs[:-2], nindex)
def test_compute_aggregate_out_of_effective(aggobs, ids):
limited_aggobs = [aggob
for aggob in aggobs
if aggob['effective_until'] is not None]
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
max_time = pd.Series([o['effective_until'] for o in limited_aggobs]).max()
ooe_index = pd.date_range(
max_time + pd.Timedelta('1H'),
max_time + pd.Timedelta('25H'),
freq='60min'
)
with pytest.raises(ValueError) as e:
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
limited_aggobs, ooe_index)
assert str(e.value) == 'No effective observations in data'
def test__observation_valid(aggobs):
out = utils._observation_valid(
nindex, 'f2844284-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(True, index=nindex))
def test__observation_valid_ended(aggobs):
out = utils._observation_valid(
nindex, 'f3e310ba-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series([False] * 6 + [True] * 4,
index=nindex))
def test__observation_valid_many(aggobs):
out = utils._observation_valid(
nindex, '09ed7cf6-ea0b-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(
[True, True, True, True, True, False, False, True, True, True],
index=nindex))
def test__observation_valid_deleted(aggobs):
with pytest.raises(ValueError):
utils._observation_valid(
nindex, '0fe9f2ba-ea0b-11e9-a7da-f4939feddd82', aggobs)
def test__observation_valid_deleted_before(aggobs):
out = utils._observation_valid(
nindex, '67ea9200-ea0e-11e9-832b-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(False, index=nindex))
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0745Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z',
'20191004T0800Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
])
def test__make_aggregate_index(length, label, expected):
test_data = {
0: pd.DataFrame(range(5), index=pd.date_range(
'20191004T0700Z', freq='7min', periods=5)), # end 35
1: pd.DataFrame(range(4), index=pd.date_range(
'20191004T0015-0700', freq='10min', periods=4))} # end 45
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0715Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z'])),
])
def test__make_aggregate_index_offset_right(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0701Z', freq='7min', periods=6)) # end 35
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0645Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0600Z',
'20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0640Z', '20191004T0700Z', '20191004T0720Z'])),
('36min', 'ending', pd.DatetimeIndex(['20191004T0712Z',
'20191004T0748Z'])),
('36min', 'beginning', pd.DatetimeIndex(['20191004T0636Z',
'20191004T0712Z'])),
])
def test__make_aggregate_index_offset_left(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0658Z', freq='7min', periods=6)) # end 32
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_tz():
length = '30min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T1600Z', freq='5min', periods=6)) # end 30
}
expected = pd.DatetimeIndex(['20190101T0900'],
tz='America/Denver')
out = utils._make_aggregate_index(test_data, length, label,
'America/Denver')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_invalid_length():
length = '33min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0158Z', freq='7min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
def test__make_aggregate_index_instant():
length = '30min'
label = 'instant'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0100Z', freq='10min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('start,end', [
(pd.Timestamp('20190101T0000Z'), pd.Timestamp('20190102T0000')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000Z')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000')),
])
def test__make_aggregate_index_localization(start, end):
length = '30min'
label = 'ending'
test_data = {
0: pd.DataFrame(range(1), index=pd.DatetimeIndex([start])),
1: pd.DataFrame(range(1), index=pd.DatetimeIndex([end])),
}
with pytest.raises(TypeError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('inp,oup', [
(pd.DataFrame(dtype=float), pd.Series(dtype=float)),
(pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float),
pd.DataFrame(dtype=float)),
(pd.Series([0, 1]), pd.Series([0, 1])),
(pd.DataFrame([[0, 1], [1, 2]]), pd.DataFrame([[0, 1], [1, 2]])),
pytest.param(
pd.Series([0, 1]),
pd.Series([0, 1], index=pd.date_range(start='now', freq='1min',
periods=2)),
marks=pytest.mark.xfail(type=AssertionError, strict=True)),
pytest.param(
pd.Series([0, 1]),
pd.Series([1, 0]),
marks=pytest.mark.xfail(type=AssertionError, strict=True))
])
def test_sha256_pandas_object_hash(inp, oup):
assert utils.sha256_pandas_object_hash(inp) == utils.sha256_pandas_object_hash(oup) # NOQA
def test_listhandler():
logger = logging.getLogger('testlisthandler')
handler = utils.ListHandler()
logger.addHandler(handler)
logger.setLevel('DEBUG')
logger.warning('Test it')
logger.debug('What?')
out = handler.export_records()
assert len(out) == 1
assert out[0].message == 'Test it'
assert len(handler.export_records(logging.DEBUG)) == 2
def test_listhandler_recreate():
logger = logging.getLogger('testlisthandler')
handler = utils.ListHandler()
logger.addHandler(handler)
logger.setLevel('DEBUG')
logger.warning('Test it')
logger.debug('What?')
out = handler.export_records()
assert len(out) == 1
assert out[0].message == 'Test it'
assert len(handler.export_records(logging.DEBUG)) == 2
l2 = logging.getLogger('testlist2')
h2 = utils.ListHandler()
l2.addHandler(h2)
l2.error('Second fail')
out = h2.export_records()
assert len(out) == 1
assert out[0].message == 'Second fail'
def test_hijack_loggers(mocker):
old_handler = mocker.MagicMock()
new_handler = mocker.MagicMock()
mocker.patch('solarforecastarbiter.utils.ListHandler',
return_value=new_handler)
logger = logging.getLogger('testhijack')
logger.addHandler(old_handler)
assert logger.handlers[0] == old_handler
with utils.hijack_loggers(['testhijack']):
assert logger.handlers[0] == new_handler
assert logger.handlers[0] == old_handler
def test_hijack_loggers_sentry(mocker):
events = set()
def before_send(event, hint):
events.add(event['logger'])
return
sentry_sdk.init(
"https://[email protected]/0",
before_send=before_send)
logger = logging.getLogger('testlog')
with utils.hijack_loggers(['testlog']):
logging.getLogger('root').error('will show up')
logger.error('AHHH')
assert 'root' in events
assert 'testlog' not in events
events = set()
logging.getLogger('root').error('will show up')
logger.error('AHHH')
assert 'root' in events
assert 'testlog' in events
@pytest.mark.parametrize('data,freq,expected', [
(pd.Series(index=pd.DatetimeIndex([]), dtype=float), '5min',
[pd.Series(index=pd.DatetimeIndex([]), dtype=float)]),
(pd.Series([1.0], index=pd.DatetimeIndex(['2020-01-01T00:00Z'])),
'5min',
[pd.Series([1.0], index=pd.DatetimeIndex(['2020-01-01T00:00Z']))]),
(pd.Series(
[1.0, 2.0, 3.0],
index=pd.date_range('2020-01-01T00:00Z', freq='1h', periods=3)),
'1h',
[pd.Series(
[1.0, 2.0, 3.0],
index=pd.date_range('2020-01-01T00:00Z', freq='1h', periods=3))]),
(pd.Series(
[1.0, 2.0, 4.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z',
'2020-01-01T04:00Z'])),
'1h',
[pd.Series(
[1.0, 2.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z'])),
pd.Series(
[4.0],
index=pd.DatetimeIndex(['2020-01-01T04:00Z'])),
]),
(pd.Series(
[1.0, 3.0, 5.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T03:00Z',
'2020-01-01T05:00Z'])),
'1h',
[pd.Series(
[1.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z'])),
pd.Series(
[3.0],
index=pd.DatetimeIndex(['2020-01-01T03:00Z'])),
pd.Series(
[5.0],
index=pd.DatetimeIndex(['2020-01-01T05:00Z'])),
]),
(pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float), '1h',
[pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float)]),
(pd.DataFrame(
{'a': [1.0, 2.0, 4.0], 'b': [11.0, 12.0, 14.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z',
'2020-01-01T04:00Z'])),
'1h',
[pd.DataFrame(
{'a': [1.0, 2.0], 'b': [11.0, 12.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z'])),
pd.DataFrame(
{'a': [4.0], 'b': [14.0]},
index=pd.DatetimeIndex(['2020-01-01T04:00Z'])),
]),
(pd.DataFrame(
{'_cid': [1.0, 2.0, 4.0], '_cid0': [11.0, 12.0, 14.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z',
'2020-01-01T04:00Z'])),
'1h',
[pd.DataFrame(
{'_cid': [1.0, 2.0], '_cid0': [11.0, 12.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z'])),
pd.DataFrame(
{'_cid': [4.0], '_cid0': [14.0]},
index=pd.DatetimeIndex(['2020-01-01T04:00Z'])),
]),
(pd.DataFrame(
[[0.0, 1.0], [2.0, 3.0]],
columns=pd.MultiIndex.from_product([[0], ['a', 'b']]),
index= | pd.DatetimeIndex(['2020-01-01T00:00Z', '2020-01-02T00:00Z']) | pandas.DatetimeIndex |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_ | numeric(temp_df["成交额"], errors="coerce") | pandas.to_numeric |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test utilities.
"""
from matched_markets.methodology import common_classes
from matched_markets.methodology import utils
import altair as alt
import numpy as np
import pandas as pd
import unittest
TimeWindow = common_classes.TimeWindow
class UtilsTest(unittest.TestCase):
def testRandomizeStrata(self):
"""Check that randomize_strata() works."""
# Mappings are possible even when number of items is 1.
self.assertEqual(utils.randomize_strata(1, [1]), [1])
self.assertLess(set(utils.randomize_strata(1, [1, 2])), {1, 2})
# Mappings are possible even when number of items <= number of groups.
self.assertEqual(utils.randomize_strata(2, [1]), [1, 1])
self.assertEqual(utils.randomize_strata(3, [1]), [1, 1, 1])
# Check that the mapping contains the expected group ids.
self.assertCountEqual(utils.randomize_strata(2, [1, 2]), [1, 2])
self.assertCountEqual(utils.randomize_strata(4, [1, 2]), [1, 2] * 2)
self.assertCountEqual(utils.randomize_strata(30, [1, 2, 3]), [1, 2, 3] * 10)
# Mappings are possible also when the number of items is not a multiple of
# groups.
groups = utils.randomize_strata(4, [1, 2, 3])
self.assertTrue(len(groups) == 4) # pylint: disable=g-generic-assert
self.assertEqual(set(groups), set([1, 2, 3]))
# String-valued group ids are possible.
self.assertCountEqual(utils.randomize_strata(30, ['a', 'b', 'c']),
['a', 'b', 'c'] * 10)
def testBrownianBridgeBounds(self):
"""Check that brownian_bridge_bounds() are calculated correctly."""
with self.assertRaisesRegex(ValueError, 'n must be >= 1'):
utils.brownian_bridge_bounds(0, 1)
with self.assertRaisesRegex(ValueError, 'sd_bound_multiplier must be > 0'):
utils.brownian_bridge_bounds(1, 0)
with self.assertRaisesRegex(ValueError, 'sd_bound_multiplier must be > 0'):
utils.brownian_bridge_bounds(1, -1)
# Unit standard deviation.
self.assertEqual(utils.brownian_bridge_bounds(1, 1), [0.0])
self.assertEqual(utils.brownian_bridge_bounds(2, 1), [np.sqrt(0.5), 0.0])
expected_one = utils.brownian_bridge_bounds(3, 1)
self.assertAlmostEqual(expected_one[0], np.sqrt(2.0 / 3.0))
self.assertAlmostEqual(expected_one[1], np.sqrt(2.0 / 3.0))
self.assertAlmostEqual(expected_one[2], 0)
# S.d. not equal to 1.
self.assertEqual(utils.brownian_bridge_bounds(2, 2), [np.sqrt(2.0), 0.0])
expected_two = utils.brownian_bridge_bounds(3, np.sqrt(3))
self.assertAlmostEqual(expected_two[0], np.sqrt(2))
self.assertAlmostEqual(expected_two[1], np.sqrt(2))
self.assertAlmostEqual(expected_two[2], 0)
def testCredibleIntervalWholeNumbers(self):
simulations = np.arange(1, 101)
level = 0.9
expected = np.array([5.0, 50.0, 95.0])
obtained = utils.credible_interval(simulations, level)
np.testing.assert_array_almost_equal(expected, obtained)
def testCredibleIntervalInterpolation(self):
simulations = np.arange(1, 101)
level = 0.88
expected = np.array([6.0, 50.0, 94.0])
obtained = utils.credible_interval(simulations, level)
np.testing.assert_array_almost_equal(expected, obtained)
def testCredibleIntervalRaisesOnLargeLevel(self):
simulations = np.arange(1, 101)
level = 0.999
with self.assertRaises(ValueError):
utils.credible_interval(simulations, level)
def testFindDaysToExclude(self):
day_week_exclude = [
'2020/10/10', '2020/11/10-2020/12/10', '2020/08/10']
days_to_remove = utils.find_days_to_exclude(day_week_exclude)
expected_days = [
TimeWindow(pd.Timestamp('2020-10-10'), pd.Timestamp('2020-10-10')),
TimeWindow(pd.Timestamp('2020-11-10'), pd.Timestamp('2020-12-10')),
TimeWindow(pd.Timestamp('2020-08-10'), pd.Timestamp('2020-08-10')),
]
for x in range(len(expected_days)):
self.assertEqual(days_to_remove[x].first_day, expected_days[x].first_day)
self.assertEqual(days_to_remove[x].last_day, expected_days[x].last_day)
def testWrongDateFormat(self):
incorrect_day = ['2020/13/13', '2020/03/03']
with self.assertRaises(ValueError):
utils.find_days_to_exclude(incorrect_day)
incorrect_time_window = ['2020/10/13 - 2020/13/11', '2020/03/03']
with self.assertRaises(ValueError):
utils.find_days_to_exclude(incorrect_time_window)
incorrect_format = ['2020/10/13 - 2020/13/11 . 2020/10/10']
with self.assertRaises(ValueError):
utils.find_days_to_exclude(incorrect_format)
def testExpandTimeWindows(self):
day_week_exclude = [
'2020/10/10', '2020/11/10-2020/12/10', '2020/08/10']
days_to_remove = utils.find_days_to_exclude(day_week_exclude)
periods = utils.expand_time_windows(days_to_remove)
expected = [
pd.Timestamp('2020-10-10', freq='D'),
pd.Timestamp('2020-08-10', freq='D'),
]
expected += pd.date_range(start='2020-11-10', end='2020-12-10', freq='D')
self.assertEqual(len(periods), len(expected))
for x in periods:
self.assertIn(x, expected)
def testHumanReadableFormat(self):
numbers = [123, 10765, 13987482, 8927462746, 1020000000000]
numb_formatted = [
utils.human_readable_number(num) for num in numbers
]
self.assertEqual(numb_formatted, ['123', '10.8K', '14M', '8.93B', '1.02tn'])
def testDefaultGeoAssignment(self):
geo_level_time_series = pd.DataFrame({
'geo': [1, 2, 3, 4],
'response': [1.1, 2.2, 3.3, 4.4]
})
geo_eligibility = pd.DataFrame({
'geo': [1, 3],
'control': [1, 0],
'treatment': [0, 1],
'exclude': [0, 0]
})
updated_eligibility = utils.default_geo_assignment(geo_level_time_series,
geo_eligibility)
self.assertTrue(
updated_eligibility.equals(
pd.DataFrame({
'geo': [1, 2, 3, 4],
'control': [1, 1, 0, 1],
'treatment': [0, 1, 1, 1],
'exclude': [0, 1, 0, 1]
})))
def testPlotIroasOverTime(self):
iroas_df = pd.DataFrame({
'date': [
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
],
'lower': [0, 0.5, 1, 1.5, 2],
'mean': [1, 1.5, 2, 2.5, 3],
'upper': [2, 2.5, 3, 3.5, 4]
})
experiment_dates = pd.DataFrame({
'date': ['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04'],
'color': [
'Pretest period', 'Pretest period', 'Experiment period',
'Experiment period'
]
})
cooldown_date = pd.DataFrame({
'date': ['2020-01-05'],
'color': ['End of cooldown period']
})
iroas_chart = utils.plot_iroas_over_time(iroas_df, experiment_dates,
cooldown_date)
self.assertIsInstance(iroas_chart, alt.LayerChart)
def testFindFrequency(self):
dates = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='D'))
geos = [1, 2, 3, 4]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
frequency = utils.infer_frequency(df, 'date', 'geo')
self.assertEqual(frequency, 'D')
weeks = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='W'))
df = pd.DataFrame({
'date': weeks * len(geos),
'geo': sorted(geos * len(weeks))
})
frequency = utils.infer_frequency(df, 'date', 'geo')
self.assertEqual(frequency, 'W')
def testDifferentFrequencies(self):
dates = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='D'))
weeks = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='W'))
geos = [1] * len(dates) + [2] * len(weeks)
df = pd.DataFrame({
'date': dates + weeks,
'geo': geos
})
with self.assertRaises(ValueError) as cm:
utils.infer_frequency(df, 'date', 'geo')
self.assertEqual(
str(cm.exception),
'The provided time series seem to have irregular frequencies.')
def testFindFrequencyDataNotSorted(self):
dates = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='D'))
geos = [1, 2, 3, 4]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
# permute the order of the rows, so that the dataset is not sorted by date
df = df.sample(frac=1, replace=False)
frequency = utils.infer_frequency(df, 'date', 'geo')
self.assertEqual(frequency, 'D')
def testInsufficientData(self):
dates = list( | pd.date_range(start='2020-01-01', end='2020-01-01', freq='D') | pandas.date_range |
from world_viewer.world import World
import pandas as pd
import numpy as np
import warnings
# from sensible_raw.loaders import loader
import json
from math import ceil
import os
os.environ['R_HOME'] = '/home/<EMAIL>/master/lib/R'
class CNSWorld(World):
PICKLE_PATH = './pickle/' # path for cached data
RELATION_NET_PICKLE = 'CNS_relation_net'
OPINIONS_PICKLE = 'CNS_opinions'
LIKE_MINDEDNESS_PICKLE = 'CNS_like_mindedness'
CNS_TIME_BEGIN = pd.Timestamp(pd.datetime(2013, 9, 2)) # first timestamp
CNS_TIME_END = pd.Timestamp(pd.datetime(2014, 12, 31)) # last timestamp
sigma = pd.to_timedelta(3, unit='d').total_seconds()
two_sigma_sqr = 2* sigma * sigma
def __init__(self, path='', start=pd.datetime(2013, 9, 2), end=pd.datetime(2014, 12, 31)):
super().__init__()
self.path = path
self.CNS_TIME_BEGIN = start
self.CNS_TIME_END = end
def load_world(self, opinions = ['smoking'], relation_agg = 2, read_cached = False, stop=False, write_pickle = True, continous_op = False):
self.name = "CNS" + '-'.join(opinions)
self.type = "CNS"
if continous_op:
warnings.warn("No comparison of continous opinions implementet yet!")
pickle_relation_net_filename = self.RELATION_NET_PICKLE \
+ "_" + str(relation_agg) \
+ ".pkl"
pickle_opinions_filename = self.OPINIONS_PICKLE \
+ "_" + '-'.join(opinions) \
+ ".pkl"
pickle_like_mindedness_filename = self.LIKE_MINDEDNESS_PICKLE \
+ "_" + '-'.join(opinions) \
+ ".pkl"
## 0. Load time
#time = pd.DataFrame(pd.date_range(self.CNS_TIME_BEGIN, self.CNS_TIME_END, freq='W-MON'),columns=['time'])
time = pd.DataFrame(pd.date_range(self.CNS_TIME_BEGIN, self.CNS_TIME_END, freq='d'),columns=['time'])
self.time = time
## 1. Load opinions
if read_cached:
opinions_cached = False
try:
op_nodes = pd.read_pickle(self.PICKLE_PATH + pickle_opinions_filename)
opinions_cached = True
except FileNotFoundError:
warnings.warn("No cached opinions found, read opinions from file.")
opinions_cached = False
if not (read_cached and opinions_cached):
op_nodes = pd.DataFrame() # general opinion dataframe
if len(list(set(opinions) & set(["smoking","physical"]))) > 0:
op_data = pd.DataFrame() # df for loaded data
# load data
for survey in np.arange(1,4):
print('Load survey ' + str(survey))
data_s = loader.load_data("questionnaires", "survey_"+str(survey), as_dataframe=True)
data_s = data_s[data_s.user < 1000] #clean strange users
op_time = self._get_op_time(survey)
data_s = data_s.set_index('user').join(op_time)
data_s = data_s[data_s.time.astype('int') > 10]
data_s[data_s.time < self.CNS_TIME_BEGIN] = self.CNS_TIME_BEGIN
data_s[data_s.time > self.CNS_TIME_END] = self.CNS_TIME_END
data_s['survey'] = survey
data_s.reset_index(inplace=True)
op_data = pd.concat([op_data,data_s],sort=False)
#possibilitie that users filled out more than one questionaires in one week
op_data.drop_duplicates(['user','time','variable_name'], keep='last', inplace=True)
# process opinions
for opinion in opinions:
# load smoking opinions
if opinion == "smoking":
print("Process opinion data for variable: smoking")
opinion = "op_" + opinion
smoking = op_data[op_data.variable_name == b'smoke_freq'].copy()
smoking[opinion] = (smoking.response != b'nej_jeg_har_aldrig_r') \
& (smoking.response != b'nej_men_jeg_har_rget')
smoking.reset_index(inplace=True)
smoking = smoking[['user', 'time', opinion, 'survey' ]]
smoking.rename(columns={'user':'node_id'},inplace=True)
smoking = self._add_time_to_op_nodes(smoking, time, opinion)
# write into general dataframe
if op_nodes.empty:
op_nodes = smoking
else:
op_nodes = op_nodes.set_index(['node_id','time']).join(smoking.set_index(['node_id','time']), how='outer')
op_nodes.reset_index(inplace=True)
# load physical opinions
elif opinion == "physical":
print("Process opinion data for variable: physical")
opinion = "op_" + opinion
physical = op_data[op_data.variable_name == b'physical_activity'].copy()
physical.response.replace(b'ingen',0,inplace=True)
physical.response.replace(b'ca__time_om_ugen',0,inplace=True)
physical.response.replace(b'ca_1-2_timer_om_ugen',1,inplace=True)
physical.response.replace(b'ca_3-4_timer_om_ugen',2,inplace=True)
physical.response.replace(b'ca_5-6_timer_om_ugen',3,inplace=True)
physical.response.replace(b'7_timer_om_ugen_elle',4,inplace=True)
physical.rename(columns={'response':opinion, 'user':'node_id'},inplace=True)
physical = physical[['node_id', 'time', opinion, 'survey' ]]
physical = self._add_time_to_op_nodes(physical, time, opinion)
# write into general dataframe
if op_nodes.empty:
op_nodes = physical
else:
op_nodes = op_nodes.set_index(['node_id','time','survey']) \
.join(physical.set_index(['node_id','time','survey']), how='outer')
op_nodes.reset_index(inplace=True)
elif opinion == "fitness":
print("Process opinion data for variable: fitness")
opinion = "op_" + opinion
fitness = pd.read_pickle('data/op_fitness.pkl').reset_index()
fitness = fitness[['node_id','time','op_fitness_abs']]
fitness = fitness.rename(columns={"op_fitness_abs":"fitness"})
fitness["op_fitness"] = 0
fitness.sort_values(['node_id', 'time'], inplace=True)
fitness = fitness[fitness.time >= self.CNS_TIME_BEGIN]
fitness = fitness[fitness.time <= self.CNS_TIME_END]
fitness.set_index('node_id', inplace=True)
fitness.reset_index(inplace=True)
# discretize opinion
fitness.loc[fitness.fitness >= 1, "op_fitness"] = True
fitness.loc[fitness.fitness < 1, "op_fitness"] = False
# write into general dataframe
if op_nodes.empty:
op_nodes = fitness
else:
op_nodes = op_nodes.set_index(['node_id','time','survey']) \
.join(fitness.set_index(['node_id','time','survey']), how='outer')
op_nodes.reset_index(inplace=True)
else:
raise ValueError('The opinion "' + opinion + '" is unknown.')
if write_pickle: op_nodes.to_pickle(self.PICKLE_PATH + pickle_opinions_filename)
#save opinions as instance variable
self.op_nodes = op_nodes
if stop: return 0
## 3. Load relation network
relations = pd.read_pickle("data/relations.pkl")
relations.reset_index(inplace=True)
relations = relations[relations.time >= self.CNS_TIME_BEGIN]
relations = relations[relations.time <= self.CNS_TIME_END]
# take only nodes for which the opinion is known
relations = relations[relations.id_A.isin(self.op_nodes.node_id)]
relations = relations[relations.id_B.isin(self.op_nodes.node_id)]
self.a_ij = relations[['id_A', 'id_B', 'time', 'edge']]
def _get_op_time(self, survey):
with open('user_scores'+str(survey)+'.json') as f:
op_time = json.load(f)
op_time = | pd.DataFrame(op_time) | pandas.DataFrame |
# Author: <NAME>
# Created on: August 2020
# Last modified on: September 18 2020
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from ipywidgets import interact, interact_manual, widgets, Layout, VBox, HBox, Button
from IPython.display import display, Javascript, Markdown, HTML, clear_output
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import requests as r
# The SEIR model differential equations.
def deriv(y, t, Delta, beta, mu, epsilon,gamma,alpha,delta):
"""
This function contains a system of equations for the S.E.I.R. model assuming non constant population
death (natural and due infection) and birth rates, as well as reinfection post recovery.
Args:
y (array): contains five floating point numbers S0, E0, I0, R0, D0 where each denotes initial conditions (float)
t (float): variable denoting time
Delta (float): rate of birth
beta (float): rate of contact with infectious
mu (float): rate of natural death
epsilon (float): rate of infectiousness
gamma (float): rate of recovery
alpha (float): rate of death due disease
delta (float): rate of reintegration into susceptible state
Returns:
[dS, dE, dI, dR, dD] (array)
dS: differential equation for Susceptible
dE: differential equation of Exposed
dI: differential equation for Infectious
dR: differential equation for Recovered
dD: differential equation for Deaths
"""
S, E, I, R, D = y
N = S + E + I + R
dS = Delta*N - beta*S*I/N - mu*S + delta*R
dE = beta*S*I/N - (mu + epsilon)*E
dI = epsilon*E - (gamma + mu + alpha)*I
dR = gamma*I - mu*R - delta*R
dD = alpha*I
return [dS,dE, dI, dR, dD]
def run_seir_model(Delta, beta, mu, epsilon,gamma,alpha,delta):
"""
This function creates an interactive plot simulating the S.E.I.R. model
Note that susceptible has been commented out for the Callysto teacher and student notebooks
Args:
Delta (float): rate of birth
beta (float): rate of contact with infectious
mu (float): rate of natural death
epsilon (float): rate of infectiousness
gamma (float): rate of recovery
alpha (float): rate of death due disease
delta (float): rate of reintegration into susceptible state
Returns:
seir_simulation (pandas DataFrame): contains data resulting from our model for each of the SEIRD stages
"""
# Initial number of infected and recovered individuals, I0 and R0.
S0, E0,I0, R0 ,D0 = 37000,0,1,0,0
# Total population, N.
N = S0 + E0 + I0 + R0
# Initial conditions vector
y0 = S0,E0, I0, R0, D0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(Delta, beta, mu, epsilon,gamma,alpha,delta))
S, E,I, R, D = ret.T
# Build dataframe with the data from the model
seir_simulation = | pd.DataFrame({"Susceptible":S,"Exposed":E,"Infectious":I,"Recovered":R,"Deaths":D, "Time (days)":t}) | pandas.DataFrame |
import pandas as pd
from typing import Union
from .template import Processor, Settings
class ProcessVcf(Processor):
vcf: str
vcf_df: pd.DataFrame
cds_edit_df: pd.DataFrame
def __init__(self, settings: Settings):
super().__init__(settings=settings)
def main(self, vcf: str) -> pd.DataFrame:
self.vcf = vcf
self.read_vcf()
self.remove_conflict_variants()
self.vcf_df_to_cds_edit_df()
return self.cds_edit_df
def read_vcf(self):
self.vcf_df = ReadVcf(self.settings).main(vcf=self.vcf)
def remove_conflict_variants(self):
self.vcf_df = RemoveConflictVariants(self.settings).main(indf=self.vcf_df)
def vcf_df_to_cds_edit_df(self):
self.cds_edit_df = VcfDfToCdsEditDf(self.settings).main(vcf_df=self.vcf_df)
class ReadVcf(Processor):
vcf: str
temp_tsv: str
outdf: pd.DataFrame
def __init__(self, settings: Settings):
super().__init__(settings=settings)
def main(self, vcf: str) -> pd.DataFrame:
self.vcf = vcf
self.write_temp_tsv()
self.read_temp_tsv()
return self.outdf
def write_temp_tsv(self):
self.temp_tsv = f'{self.workdir}/temp.tsv'
with open(self.vcf) as reader:
with open(self.temp_tsv, 'w') as writer:
for line in reader:
if line.startswith('##'):
continue
elif line.startswith('#CHROM'):
writer.write(line[1:])
else:
writer.write(line)
def read_temp_tsv(self):
self.outdf = pd.read_csv(self.temp_tsv, sep='\t')
class RemoveConflictVariants(Processor):
COLUMNS_IN = [
'POS',
'REF',
'ALT',
'QUAL',
]
COLUMNS_OUT = COLUMNS_IN
indf: pd.DataFrame
snv_df: pd.DataFrame
del_df: pd.DataFrame
ins_df: pd.DataFrame
outdf: pd.DataFrame
def __init__(self, settings: Settings):
super().__init__(settings=settings)
def main(self, indf: pd.DataFrame) -> pd.DataFrame:
self.indf = indf
self.separate_three_dfs()
self.remove_snv_conflict()
self.remove_deletion_conflict()
self.remove_insertion_conflict()
self.merge_back()
return self.outdf
def separate_three_dfs(self):
df = self.indf
ref_len = df['REF'].apply(len)
alt_len = df['ALT'].apply(len)
self.snv_df = df[ref_len == alt_len].reset_index(drop=True)
self.del_df = df[ref_len > alt_len].reset_index(drop=True)
self.ins_df = df[ref_len < alt_len].reset_index(drop=True)
def remove_snv_conflict(self):
self.snv_df = RemoveSnvConflict(self.settings).main(indf=self.snv_df)
def remove_deletion_conflict(self):
if len(self.del_df) > 0:
self.del_df = RemoveDeletionConflict(self.settings).main(indf=self.del_df)
def remove_insertion_conflict(self):
if len(self.ins_df) > 0:
self.ins_df = RemoveInsertionConflict(self.settings).main(indf=self.ins_df)
def merge_back(self):
self.outdf = self.snv_df.append(
self.del_df, ignore_index=True
).append(
self.ins_df, ignore_index=True
).sort_values(
by='POS',
ascending=True
).reset_index(
drop=True
)
class RemoveSnvConflict(Processor):
indf: pd.DataFrame
outdf: pd.DataFrame
def __init__(self, settings: Settings):
super().__init__(settings=settings)
def main(self, indf: pd.DataFrame) -> pd.DataFrame:
self.indf = indf
self.outdf = self.indf.sort_values(
by='QUAL',
ascending=False
).drop_duplicates(
subset='POS',
keep='first'
).sort_values(
by='POS',
ascending=True
).reset_index(
drop=True
)
return self.outdf
class RemoveDeletionConflict(Processor):
COLUMNS_IN = [
'POS',
'REF',
'ALT',
'QUAL',
]
COLUMNS_OUT = COLUMNS_IN
indf: pd.DataFrame
outdf: pd.DataFrame
def __init__(self, settings: Settings):
super().__init__(settings=settings)
def main(self, indf: pd.DataFrame) -> pd.DataFrame:
self.indf = indf
self.outdf = pd.DataFrame(columns=self.COLUMNS_OUT)
self.sort_by_position()
self.add_deletion_range()
self.remove_overlapped()
self.drop_deletion_range()
return self.outdf
def sort_by_position(self):
self.indf = self.indf.sort_values(
by='POS',
ascending=True
).reset_index(
drop=True
)
def add_deletion_range(self):
df = self.indf
for i, row in df.iterrows():
pos, ref, alt = row['POS'], row['REF'], row['ALT']
df.loc[i, 'start'] = pos + len(alt)
df.loc[i, 'end'] = pos + len(ref) - 1 # -1 because the end is inclusive
def remove_overlapped(self):
df = self.indf
prev = None
for i, row in df.iterrows():
this = row
if prev is None:
prev = this
continue
if overlap(prev, this):
if this['QUAL'] > prev['QUAL']:
prev = this
else:
pass # Just skip this deletion (lower quality), and keep the previous one
else:
self.outdf = self.outdf.append(prev, ignore_index=True)
prev = this
self.outdf = self.outdf.append(prev, ignore_index=True) # Append the final one
def drop_deletion_range(self):
self.outdf = self.outdf.drop(columns=['start', 'end'])
def overlap(row1: pd.Series, row2: pd.Series) -> bool:
return (row2['start'] <= row1['end']) and (row1['start'] <= row2['end'])
class RemoveInsertionConflict(Processor):
COLUMNS_IN = [
'POS',
'REF',
'ALT',
'QUAL',
]
COLUMNS_OUT = COLUMNS_IN
indf: pd.DataFrame
outdf: pd.DataFrame
def __init__(self, settings: Settings):
super().__init__(settings=settings)
def main(self, indf: pd.DataFrame) -> pd.DataFrame:
self.indf = indf
self.outdf = pd.DataFrame(columns=self.COLUMNS_OUT)
self.sort_by_position()
self.add_insertion_position()
self.remove_conflict()
self.drop_insertion_position()
return self.outdf
def sort_by_position(self):
self.indf = self.indf.sort_values(
by='POS',
ascending=True
).reset_index(
drop=True
)
def add_insertion_position(self):
df = self.indf
for i, row in df.iterrows():
pos, ref = row['POS'], row['REF']
df.loc[i, 'position'] = pos + len(ref)
def remove_conflict(self):
self.outdf = self.indf.sort_values(
by='QUAL',
ascending=False
).drop_duplicates(
subset='position',
keep='first'
).sort_values(
by='POS',
ascending=True
).reset_index(
drop=True
)
def drop_insertion_position(self):
self.outdf = self.outdf.drop(columns='position')
class VcfDfToCdsEditDf(Processor):
COLUMNS_IN = [
'POS',
'REF',
'ALT',
]
COLUMNS_OUT = [
'Position',
'Type',
'Base',
]
vcf_df: pd.DataFrame
cds_edit_df: pd.DataFrame
def __init__(self, settings: Settings):
super().__init__(settings=settings)
def main(self, vcf_df: pd.DataFrame) -> pd.DataFrame:
self.vcf_df = vcf_df
self.cds_edit_df = | pd.DataFrame(columns=self.COLUMNS_OUT) | pandas.DataFrame |
# Copyright (c) 2020 <NAME>
import sklearn.metrics
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
import scipy.sparse
import scipy.io
import scipy.special
import types
import json
import warnings
import math
import torch.nn.functional as F
import csv
from pynvml import *
from contextlib import redirect_stdout
from sparsechem import censored_mse_loss_numpy
from collections import namedtuple
from scipy.sparse import csr_matrix
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
class Nothing(object):
def __getattr__(self, name):
return Nothing()
def __call__(self, *args, **kwargs):
return Nothing()
def __repr__(self):
return "Nothing"
class Nothing(object):
def __getattr__(self, name):
return Nothing()
def __call__(self, *args, **kwargs):
return Nothing()
def __repr__(self):
return "Nothing"
# Control downsampling: how many scalar data do we keep for each run/tag
# combination?
SIZE_GUIDANCE = {'scalars': 10000}
def extract_scalars(multiplexer, run, tag):
"""Extract tabular data from the scalars at a given run and tag.
The result is a list of 3-tuples (wall_time, step, value).
"""
tensor_events = multiplexer.Tensors(run, tag)
return [
# (event.wall_time, event.step, tf.make_ndarray(event.tensor_proto).item())
(event.wall_time, event.step, event.tensor_proto.float_val[0])
for event in tensor_events
]
def create_multiplexer(logdir):
multiplexer = event_multiplexer.EventMultiplexer(
tensor_size_guidance=SIZE_GUIDANCE)
multiplexer.AddRunsFromDirectory(logdir)
multiplexer.Reload()
return multiplexer
def export_scalars(multiplexer, run, tag, filepath, write_headers=True):
data = extract_scalars(multiplexer, run, tag)
with open(filepath, 'w') as outfile:
writer = csv.writer(outfile)
if write_headers:
writer.writerow(('wall_time', 'step', 'value'))
for row in data:
writer.writerow(row)
def return_max_val(data):
max_val = 0
for row in data:
if row[2] > max_val:
max_val = row[2]
return max_val
def inverse_normalization(yr_hat_all, mean, variance, dev="cpu", array=False):
if array==False:
stdev = np.sqrt(variance)
diagstdev = scipy.sparse.diags(np.array(stdev)[0],0)
diag = torch.from_numpy(diagstdev.todense())
y_inv_norm = torch.matmul(yr_hat_all, diag.to(torch.float32).to(dev))
diagm = scipy.sparse.diags(mean, 0)
y_mask = np.ones(yr_hat_all.shape)
y_inv_norm = y_inv_norm + torch.from_numpy(y_mask * diagm).to(torch.float32).to(dev)
else:
y_mask = yr_hat_all.copy()
y_mask.data = np.ones_like(y_mask.data)
set_mask = set([(i,j) for i,j in zip(y_mask.nonzero()[0], y_mask.nonzero()[1])])
stdev = np.sqrt(variance)
diagstdev = scipy.sparse.diags(stdev,0)
y_inv_norm = yr_hat_all.multiply(y_mask * diagstdev)
diagm = scipy.sparse.diags(mean, 0)
y_inv_norm = y_inv_norm + y_mask * diagm
set_inv_norm = set([(i,j) for i,j in zip(y_inv_norm.nonzero()[0], y_inv_norm.nonzero()[1])])
set_delta = set_mask - set_inv_norm
for delta in set_delta:
y_inv_norm[delta[0],delta[1]]=0
assert yr_hat_all.shape == y_inv_norm.shape, "Shapes of yr_hat_all and y_inv_norm must be equal."
y_inv_norm.sort_indices()
assert (yr_hat_all.indptr == y_inv_norm.indptr).all(), "yr_hat_all and y_inv_norm must have the same .indptr"
assert (yr_hat_all.indices == y_inv_norm.indices).all(), "yr_hat_all and y_inv_norm must have the same .indices"
return y_inv_norm
def normalize_regr(y_regr, mean=None, std=None):
y_regr_64 = scipy.sparse.csc_matrix(y_regr, dtype=np.float64)
tot = np.array(y_regr_64.sum(axis=0).squeeze())[0]
set_regr = set([(i,j) for i,j in zip(y_regr_64.nonzero()[0], y_regr_64.nonzero()[1])])
N = y_regr_64.getnnz(axis=0)
m = tot/N
diagm = scipy.sparse.diags(m, 0)
y_mask = y_regr_64.copy()
y_mask.data = np.ones_like(y_mask.data)
y_normalized = y_regr_64 - y_mask * diagm
set_norm = set([(i,j) for i,j in zip(y_normalized.nonzero()[0], y_normalized.nonzero()[1])])
set_delta = set_regr - set_norm
sqr = y_regr_64.copy()
sqr.data **= 2
msquared = np.square(m)
variance = sqr.sum(axis=0)/N - msquared
stdev_inv = 1/np.sqrt(variance)
diagstdev_inv = scipy.sparse.diags(np.array(stdev_inv)[0],0)
y_normalized = y_normalized.multiply(y_mask * diagstdev_inv)
for delta in set_delta:
y_normalized[delta[0],delta[1]]=0
assert y_regr_64.shape == y_normalized.shape, "Shapes of y_regr and y_normalized must be equal."
y_normalized.sort_indices()
assert (y_regr_64.indptr == y_normalized.indptr).all(), "y_regr and y_normalized must have the same .indptr"
assert (y_regr_64.indices == y_normalized.indices).all(), "y_regr and y_normalized must have the same .indptr"
return y_normalized, m, variance
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def calc_acc_kappa(recall, fpr, num_pos, num_neg):
"""Calculates accuracy from recall and precision."""
num_all = num_neg + num_pos
tp = np.round(recall * num_pos).astype(np.int)
fn = num_pos - tp
fp = np.round(fpr * num_neg).astype(np.int)
tn = num_neg - fp
acc = (tp + tn) / num_all
pexp = num_pos / num_all * (tp + fp) / num_all + num_neg / num_all * (tn + fn) / num_all
kappa = (acc - pexp) / (1 - pexp)
return acc, kappa
def all_metrics(y_true, y_score, cal_fact_aucpr_task):
"""Compute classification metrics.
Args:
y_true true labels (0 / 1)
y_score logit values
"""
if len(y_true) <= 1 or (y_true[0] == y_true).all():
df = pd.DataFrame({"roc_auc_score": [np.nan], "auc_pr": [np.nan], "avg_prec_score": [np.nan], "f1_max": [np.nan], "p_f1_max": [np.nan], "kappa": [np.nan], "kappa_max": [np.nan], "p_kappa_max": [np.nan], "bceloss": [np.nan], "auc_pr_cal": [np.nan]})
return df
fpr, tpr, tpr_thresholds = sklearn.metrics.roc_curve(y_true=y_true, y_score=y_score)
roc_auc_score = sklearn.metrics.auc(x=fpr, y=tpr)
precision, recall, pr_thresholds = sklearn.metrics.precision_recall_curve(y_true = y_true, probas_pred = y_score)
with np.errstate(divide='ignore'):
#precision can be zero but can be ignored so disable warnings (divide by 0)
precision_cal = 1/(((1/precision - 1)*cal_fact_aucpr_task)+1)
bceloss = F.binary_cross_entropy_with_logits(
input = torch.FloatTensor(y_score),
target = torch.FloatTensor(y_true),
reduction="none").mean().item()
## calculating F1 for all cutoffs
F1_score = np.zeros(len(precision))
mask = precision > 0
F1_score[mask] = 2 * (precision[mask] * recall[mask]) / (precision[mask] + recall[mask])
f1_max_idx = F1_score.argmax()
f1_max = F1_score[f1_max_idx]
p_f1_max = scipy.special.expit(pr_thresholds[f1_max_idx])
auc_pr = sklearn.metrics.auc(x = recall, y = precision)
auc_pr_cal = sklearn.metrics.auc(x = recall, y = precision_cal)
avg_prec_score = sklearn.metrics.average_precision_score(
y_true = y_true,
y_score = y_score)
y_classes = np.where(y_score >= 0.0, 1, 0)
## accuracy for all thresholds
acc, kappas = calc_acc_kappa(recall=tpr, fpr=fpr, num_pos=(y_true==1).sum(), num_neg=(y_true==0).sum())
kappa_max_idx = kappas.argmax()
kappa_max = kappas[kappa_max_idx]
p_kappa_max = scipy.special.expit(tpr_thresholds[kappa_max_idx])
kappa = sklearn.metrics.cohen_kappa_score(y_true, y_classes)
df = pd.DataFrame({"roc_auc_score": [roc_auc_score], "auc_pr": [auc_pr], "avg_prec_score": [avg_prec_score], "f1_max": [f1_max], "p_f1_max": [p_f1_max], "kappa": [kappa], "kappa_max": [kappa_max], "p_kappa_max": p_kappa_max, "bceloss": bceloss, "auc_pr_cal": [auc_pr_cal]})
return df
def compute_corr(x, y):
if len(y) <= 1:
return np.nan
ystd = y.std()
xstd = x.std()
if ystd == 0 or xstd == 0:
return np.nan
return np.dot((x - x.mean()), (y - y.mean())) / len(y) / y.std() / x.std()
def all_metrics_regr(y_true, y_score, y_censor=None):
if len(y_true) <= 1:
df = pd.DataFrame({"rmse": [np.nan], "rmse_uncen": [np.nan], "rsquared": [np.nan], "corrcoef": [np.nan]})
return df
## censor0 means non-censored observations
censor0 = y_censor == 0 if y_censor is not None else slice(None)
mse_cen = censored_mse_loss_numpy(target=y_true, input=y_score, censor=y_censor).mean()
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
mse = ((y_true[censor0] - y_score[censor0])**2).mean()
yvar = y_true[censor0].var()
if yvar == 0 or np.isnan(yvar):
rsquared = np.nan
corr = np.nan
else:
rsquared = 1 - mse / yvar
corr = compute_corr(y_true[censor0], y_score[censor0])
df = pd.DataFrame({
"rmse": [np.sqrt(mse_cen)],
"rmse_uncen": [np.sqrt(mse)],
"rsquared": [rsquared],
"corrcoef": [corr],
})
return df
def compute_metrics(cols, y_true, y_score, num_tasks, cal_fact_aucpr):
if len(cols) < 1:
return pd.DataFrame({
"roc_auc_score": np.nan,
"auc_pr": np.nan,
"avg_prec_score": np.nan,
"f1_max": np.nan,
"p_f1_max": np.nan,
"kappa": np.nan,
"kappa_max": np.nan,
"p_kappa_max": np.nan,
"bceloss": np.nan}, index=np.arange(num_tasks))
df = pd.DataFrame({"task": cols, "y_true": y_true, "y_score": y_score})
if hasattr(cal_fact_aucpr, "__len__"):
metrics = df.groupby("task", sort=True).apply(lambda g:
all_metrics(
y_true = g.y_true.values,
y_score = g.y_score.values,
cal_fact_aucpr_task = cal_fact_aucpr[g['task'].values[0]]))
else:
metrics = df.groupby("task", sort=True).apply(lambda g:
all_metrics(
y_true = g.y_true.values,
y_score = g.y_score.values,
cal_fact_aucpr_task = 1.0))
metrics.reset_index(level=-1, drop=True, inplace=True)
return metrics.reindex(np.arange(num_tasks))
def compute_metrics_regr(cols, y_true, y_score, num_tasks, y_censor=None):
"""Returns metrics for regression tasks."""
if len(cols) < 1:
return pd.DataFrame({
"rmse": np.nan,
"rmse_uncen": np.nan,
"rsquared": np.nan,
"corrcoef": np.nan,
},
index=np.arange(num_tasks))
df = pd.DataFrame({
"task": cols,
"y_true": y_true,
"y_score": y_score,
"y_censor": y_censor,
})
metrics = df.groupby("task", sort=True).apply(lambda g:
all_metrics_regr(
y_true = g.y_true.values,
y_score = g.y_score.values,
y_censor = g.y_censor.values if y_censor is not None else None))
metrics.reset_index(level=-1, drop=True, inplace=True)
return metrics.reindex(np.arange(num_tasks))
def class_fold_counts(y_class, folding):
folds = np.unique(folding)
num_pos = []
num_neg = []
for fold in folds:
yf = y_class[folding == fold]
num_pos.append( np.array((yf == +1).sum(0)).flatten() )
num_neg.append( np.array((yf == -1).sum(0)).flatten() )
return np.row_stack(num_pos), np.row_stack(num_neg)
def print_metrics(epoch, train_time, metrics_tr, metrics_va, header):
if metrics_tr is None:
if header:
print("Epoch\tlogl_va | auc_va | aucpr_va | aucpr_cal_va | maxf1_va | tr_time")
output_fstr = (
f"{epoch}.\t{metrics_va['logloss']:.5f}"
f" | {metrics_va['roc_auc_score']:.5f}"
f" | {metrics_va['auc_pr']:.5f}"
f" | {metrics_va['auc_pr_cal']:.5f}"
f" | {metrics_va['f1_max']:.5f}"
f" | {train_time:6.1f}"
)
print(output_fstr)
return
## full print
if header:
print("Epoch\tlogl_tr logl_va | auc_tr auc_va | aucpr_tr aucpr_va | maxf1_tr maxf1_va | tr_time")
output_fstr = (
f"{epoch}.\t{metrics_tr['logloss']:.5f} {metrics_va['logloss']:.5f}"
f" | {metrics_tr['roc_auc_score']:.5f} {metrics_va['roc_auc_score']:.5f}"
f" | {metrics_tr['auc_pr']:.5f} {metrics_va['auc_pr']:.5f}"
f" | {metrics_tr['f1_max']:.5f} {metrics_va['f1_max']:.5f}"
f" | {train_time:6.1f}"
)
print(output_fstr)
def print_table(formats, data):
for key, fmt in formats.items():
print(fmt.format(data[key]), end="")
Column = namedtuple("Column", "key size dec title")
columns_cr = [
Column("epoch", size=6, dec= 0, title="Epoch"),
Column(None, size=1, dec=-1, title="|"),
Column("logloss", size=8, dec= 5, title="logl"),
Column("bceloss", size=8, dec= 5, title="bceloss"),
Column("roc_auc_score", size=8, dec= 5, title="aucroc"),
Column("auc_pr", size=8, dec= 5, title="aucpr"),
Column("auc_pr_cal", size=9, dec= 5, title="aucpr_cal"),
Column("f1_max", size=8, dec= 5, title="f1_max"),
Column(None, size=1, dec=-1, title="|"),
Column("rmse", size=9, dec= 5, title="rmse"),
Column("rsquared", size=9, dec= 5, title="rsquared"),
Column("corrcoef", size=9, dec= 5, title="corrcoef"),
Column(None, size=1, dec=-1, title="|"),
Column("train_time", size=6, dec= 1, title="tr_time"),
]
def print_cell(value, size, dec, left, end=" "):
align = "<" if left else ">"
if type(value) == str:
print(("{:" + align + str(size) + "}").format(value), end=end)
else:
print(("{:" + align + str(size) + "." + str(dec) + "f}").format(value), end=end)
def print_metrics_cr(epoch, train_time, results_tr, results_va, header):
data = pd.concat([results_va["classification_agg"], results_va["regression_agg"]])
data["train_time"] = train_time
data["epoch"] = epoch
if header:
for i, col in enumerate(columns_cr):
print_cell(col.title, col.size, dec=0, left=(i==0))
print()
## printing row with values
for i, col in enumerate(columns_cr):
print_cell(data.get(col.key, col.title), col.size, dec=col.dec, left=(i==0))
print()
def evaluate_binary(net, loader, loss, dev, progress=True):
net.eval()
logloss_sum = 0.0
logloss_count = 0
y_ind_list = []
y_true_list = []
y_hat_list = []
num_tasks = loader.dataset.y.shape[1]
with torch.no_grad():
for b in tqdm(loader, leave=False, disable=(progress == False)):
X = torch.sparse_coo_tensor(
b["x_ind"],
b["x_data"],
size = [b["batch_size"], loader.dataset.input_size]).to(dev)
y_ind = b["y_ind"].to(dev)
y_data = b["y_data"].to(dev)
y_hat_all = net(X)
y_hat = y_hat_all[y_ind[0], y_ind[1]]
output = loss(y_hat, y_data).sum()
logloss_sum += output
logloss_count += y_data.shape[0]
## storing data for AUCs
y_ind_list.append(b["y_ind"])
y_true_list.append(b["y_data"])
y_hat_list.append(y_hat.cpu())
if len(y_ind_list) == 0:
return {
"metrics": compute_metrics([], y_true=[], y_score=[], num_tasks=num_tasks),
"logloss": np.nan,
}
y_ind = torch.cat(y_ind_list, dim=1).numpy()
y_true = torch.cat(y_true_list, dim=0).numpy()
y_hat = torch.cat(y_hat_list, dim=0).numpy()
metrics = compute_metrics(y_ind[1], y_true=y_true, y_score=y_hat, num_tasks=num_tasks)
return {
'metrics': metrics,
'logloss': logloss_sum.cpu().numpy() / logloss_count
}
def train_binary(net, optimizer, loader, loss, dev, task_weights, normalize_loss=None, num_int_batches=1, progress=True):
"""
Args:
net pytorch network
optimizer optimizer to use
loader data loader with training data
dev device
task_weights weights of the tasks
normalize_loss normalization value, if None then use batch size
num_int_batches number of internal batches to use
progress whether to show a progress bar
"""
net.train()
logloss_sum = 0.0
logloss_count = 0
int_count = 0
for b in tqdm(loader, leave=False, disable=(progress == False)):
if int_count == 0:
optimizer.zero_grad()
X = torch.sparse_coo_tensor(
b["x_ind"],
b["x_data"],
size = [b["batch_size"], loader.dataset.input_size]).to(dev)
y_ind = b["y_ind"].to(dev)
y_w = task_weights[y_ind[1]]
y_data = b["y_data"].to(dev)
yhat_all = net(X)
yhat = yhat_all[y_ind[0], y_ind[1]]
norm = normalize_loss
if norm is None:
norm = b["batch_size"] * num_int_batches
output = (loss(yhat, y_data) * y_w).sum()
output_n = output / norm
output_n.backward()
int_count += 1
if int_count == num_int_batches:
optimizer.step()
int_count = 0
logloss_sum += output.detach() / y_data.shape[0]
logloss_count += 1
if int_count > 0:
## process tail batch (should not happen)
optimizer.step()
return logloss_sum / logloss_count
def batch_forward(net, b, input_size, loss_class, loss_regr, weights_class, weights_regr, censored_weight=[], dev="cpu", normalize_inv=None, y_cat_columns=None):
"""returns full outputs from the network for the batch b"""
X = torch.sparse_coo_tensor(
b["x_ind"],
b["x_data"],
size = [b["batch_size"], input_size]).to(dev, non_blocking=True)
if net.cat_id_size is None:
yc_hat_all, yr_hat_all = net(X)
else:
yc_hat_all, yr_hat_all, ycat_hat_all = net(X)
if normalize_inv is not None:
#inverse normalization
yr_hat_all = inverse_normalization(yr_hat_all, normalize_inv["mean"], normalize_inv["var"], dev).to(dev)
out = {}
out["yc_hat_all"] = yc_hat_all
out["yr_hat_all"] = yr_hat_all
out["yc_loss"] = 0
out["yr_loss"] = 0
out["yc_weights"] = 0
out["yr_weights"] = 0
out["yc_cat_loss"] = 0
if net.class_output_size > 0:
yc_ind = b["yc_ind"].to(dev, non_blocking=True)
yc_w = weights_class[yc_ind[1]]
yc_data = b["yc_data"].to(dev, non_blocking=True)
yc_hat = yc_hat_all[yc_ind[0], yc_ind[1]]
out["yc_ind"] = yc_ind
out["yc_data"] = yc_data
out["yc_hat"] = yc_hat
out["yc_loss"] = (loss_class(yc_hat, yc_data) * yc_w).sum()
out["yc_weights"] = yc_w.sum()
if net.cat_id_size is not None and net.cat_id_size > 0:
yc_cat_ind = b["yc_cat_ind"].to(dev, non_blocking=True)
yc_cat_data = b["yc_cat_data"].to(dev, non_blocking=True)
yc_cat_hat = ycat_hat_all[yc_cat_ind[0], yc_cat_ind[1]]
if y_cat_columns is not None:
yc_hat_all[:,y_cat_columns] = ycat_hat_all
yc_hat = yc_hat_all[yc_ind[0], yc_ind[1]]
out["yc_hat"] = yc_hat
out["yc_cat_loss"] = loss_class(yc_cat_hat, yc_cat_data).sum()
if net.regr_output_size > 0:
yr_ind = b["yr_ind"].to(dev, non_blocking=True)
yr_w = weights_regr[yr_ind[1]]
yr_data = b["yr_data"].to(dev, non_blocking=True)
yr_hat = yr_hat_all[yr_ind[0], yr_ind[1]]
out["ycen_data"] = b["ycen_data"]
if out["ycen_data"] is not None:
out["ycen_data"] = out["ycen_data"].to(dev, non_blocking=True)
if len(censored_weight) > 0:
## updating weights of censored data
yrcen_w = yr_w * censored_weight[yr_ind[1]]
yr_w = torch.where(out["ycen_data"] == 0, yr_w, yrcen_w)
out["yr_ind"] = yr_ind
out["yr_data"] = yr_data
out["yr_hat"] = yr_hat
out["yr_loss"] = (loss_regr(input=yr_hat, target=yr_data, censor=out["ycen_data"]) * yr_w).sum()
out["yr_weights"] = yr_w.sum()
return out
def train_class_regr(net, optimizer, loader, loss_class, loss_regr, dev,
weights_class, weights_regr, censored_weight,
normalize_loss=None, num_int_batches=1, progress=True, reporter=None, writer=None, epoch=0, args=None, scaler=None, nvml_handle=None):
net.train()
int_count = 0
batch_count = 0
#scaler = torch.cuda.amp.GradScaler()
for b in tqdm(loader, leave=False, disable=(progress == False)):
if int_count == 0:
optimizer.zero_grad()
norm = normalize_loss
if norm is None:
norm = b["batch_size"] * num_int_batches
if args.mixed_precision == 1:
mixed_precision = True
else:
mixed_precision = False
with torch.cuda.amp.autocast(enabled=mixed_precision):
fwd = batch_forward(net, b=b, input_size=loader.dataset.input_size, loss_class=loss_class, loss_regr=loss_regr, weights_class=weights_class, weights_regr=weights_regr, censored_weight=censored_weight, dev=dev)
if writer is not None and reporter is not None:
info = nvmlDeviceGetMemoryInfo(nvml_handle)
#writer.add_scalar("GPUmem", torch.cuda.memory_allocated() / 1024 ** 2, 3*(int_count+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"]))
writer.add_scalar("GPUmem", float("{}".format(info.used >> 20)), 3*(int_count+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"]))
if batch_count == 1:
with open(f"{args.output_dir}/memprofile.txt", "a+") as profile_file:
with redirect_stdout(profile_file):
profile_file.write(f"\nForward pass model detailed report:\n\n")
reporter.report()
loss = fwd["yc_loss"] + fwd["yr_loss"] + fwd["yc_cat_loss"] + net.GetRegularizer()
loss_norm = loss / norm
#loss_norm.backward()
if mixed_precision:
scaler.scale(loss_norm).backward()
else:
loss_norm.backward()
if writer is not None and reporter is not None:
info = nvmlDeviceGetMemoryInfo(nvml_handle)
#writer.add_scalar("GPUmem", torch.cuda.memory_allocated() / 1024 ** 2, 3*(int_count+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"])+1)
writer.add_scalar("GPUmem", float("{}".format(info.used >> 20)), 3*(int_count+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"])+1)
int_count += 1
if int_count == num_int_batches:
if mixed_precision and not isinstance(optimizer,Nothing):
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
if writer is not None and reporter is not None:
info = nvmlDeviceGetMemoryInfo(nvml_handle)
#writer.add_scalar("GPUmem", torch.cuda.memory_allocated() / 1024 ** 2, 3*(int_count-1+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"])+2)
writer.add_scalar("GPUmem", float("{}".format(info.used >> 20)), 3*(int_count-1+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"])+2)
int_count = 0
batch_count+=1
if int_count > 0:
## process tail batch (should not happen)
if mixed_precision and not isinstance(optimizer,Nothing):
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
def aggregate_results(df, weights):
"""Compute aggregates based on the weights"""
wsum = weights.sum()
if wsum == 0:
return pd.Series(np.nan, index=df.columns)
df2 = df.where(pd.isnull, 1) * weights[:,None]
return (df2.multiply(1.0 / df2.sum(axis=0), axis=1) * df).sum(axis=0)
def evaluate_class_regr(net, loader, loss_class, loss_regr, tasks_class, tasks_regr, dev, progress=True, normalize_inv=None, cal_fact_aucpr=1):
class_w = tasks_class.aggregation_weight
regr_w = tasks_regr.aggregation_weight
net.eval()
loss_class_sum = 0.0
loss_regr_sum = 0.0
loss_class_weights = 0.0
loss_regr_weights = 0.0
data = {
"yc_ind": [],
"yc_data": [],
"yc_hat": [],
"yr_ind": [],
"yr_data": [],
"yr_hat": [],
"ycen_data": [],
}
num_class_tasks = loader.dataset.class_output_size
num_regr_tasks = loader.dataset.regr_output_size
with torch.no_grad():
for b in tqdm(loader, leave=False, disable=(progress == False)):
fwd = batch_forward(net, b=b, input_size=loader.dataset.input_size, loss_class=loss_class, loss_regr=loss_regr, weights_class=tasks_class.training_weight, weights_regr=tasks_regr.training_weight, dev=dev, normalize_inv=normalize_inv, y_cat_columns=loader.dataset.y_cat_columns)
loss_class_sum += fwd["yc_loss"]
loss_regr_sum += fwd["yr_loss"]
loss_class_weights += fwd["yc_weights"]
loss_regr_weights += fwd["yr_weights"]
## storing data for AUCs
for key in data.keys():
if (key in fwd) and (fwd[key] is not None):
data[key].append(fwd[key].cpu())
out = {}
if len(data["yc_ind"]) == 0:
## there are no data for classification
out["classification"] = compute_metrics([], y_true=[], y_score=[], num_tasks=num_class_tasks, cal_fact_aucpr=cal_fact_aucpr)
out["classification_agg"] = out["classification"].reindex(labels=[]).mean(0)
out["classification_agg"]["logloss"] = np.nan
else:
yc_ind = torch.cat(data["yc_ind"], dim=1).numpy()
yc_data = torch.cat(data["yc_data"], dim=0).numpy()
yc_hat = torch.cat(data["yc_hat"], dim=0).numpy()
out["classification"] = compute_metrics(yc_ind[1], y_true=yc_data, y_score=yc_hat, num_tasks=num_class_tasks, cal_fact_aucpr=cal_fact_aucpr)
out["classification_agg"] = aggregate_results(out["classification"], weights=class_w)
out["classification_agg"]["logloss"] = loss_class_sum.cpu().item() / loss_class_weights.cpu().item()
if len(data["yr_ind"]) == 0:
out["regression"] = compute_metrics_regr([], y_true=[], y_score=[], num_tasks=num_regr_tasks)
out["regression_agg"] = out["regression"].reindex(labels=[]).mean(0)
out["regression_agg"]["mseloss"] = np.nan
else:
yr_ind = torch.cat(data["yr_ind"], dim=1).numpy()
yr_data = torch.cat(data["yr_data"], dim=0).numpy()
yr_hat = torch.cat(data["yr_hat"], dim=0).numpy()
if len(data["ycen_data"]) > 0:
ycen_data = torch.cat(data["ycen_data"], dim=0).numpy()
else:
ycen_data = None
out["regression"] = compute_metrics_regr(yr_ind[1], y_true=yr_data, y_score=yr_hat, y_censor=ycen_data, num_tasks=num_regr_tasks)
out["regression"]["aggregation_weight"] = regr_w
out["regression_agg"] = aggregate_results(out["regression"], weights=regr_w)
out["regression_agg"]["mseloss"] = loss_regr_sum.cpu().item() / loss_regr_weights.cpu().item()
out["classification_agg"]["num_tasks_total"] = loader.dataset.class_output_size
out["classification_agg"]["num_tasks_agg"] = (tasks_class.aggregation_weight > 0).sum()
out["regression_agg"]["num_tasks_total"] = loader.dataset.regr_output_size
out["regression_agg"]["num_tasks_agg"] = (tasks_regr.aggregation_weight > 0).sum()
return out
def enable_dropout(m):
if type(m) == torch.nn.Dropout:
m.train()
def predict(net, loader, dev, progress=True, dropout=False, y_cat_columns=None):
"""
Makes predictions for all compounds in the loader.
"""
net.eval()
if dropout:
net.apply(enable_dropout)
y_class_list = []
y_regr_list = []
with torch.no_grad():
for b in tqdm(loader, leave=False, disable=(progress == False)):
X = torch.sparse_coo_tensor(
b["x_ind"],
b["x_data"],
size = [b["batch_size"], loader.dataset.input_size]).to(dev)
if net.cat_id_size is None:
y_class, y_regr = net(X)
else:
y_class, y_regr, yc_cat = net(X)
if y_cat_columns is not None:
y_class[:,y_cat_columns] = yc_cat
y_class_list.append(torch.sigmoid(y_class).cpu())
y_regr_list.append(y_regr.cpu())
y_class = torch.cat(y_class_list, dim=0)
y_regr = torch.cat(y_regr_list, dim=0)
return y_class.numpy(), y_regr.numpy()
def predict_hidden(net, loader, dev, progress=True, dropout=False):
"""
Returns hidden values for all compounds in the loader.
"""
net.eval()
if dropout:
net.apply(enable_dropout)
out_list = []
with torch.no_grad():
for b in tqdm(loader, leave=False, disable=(progress == False)):
X = torch.sparse_coo_tensor(
b["x_ind"],
b["x_data"],
size = [b["batch_size"], loader.dataset.input_size]).to(dev)
out_list.append( net(X, last_hidden=True).cpu() )
return torch.cat(out_list, dim=0)
class SparseCollector(object):
def __init__(self, label):
self.y_hat_list = []
self.y_row_list = []
self.y_col_list = []
self.label = label
self.row_count = 0
def append(self, batch, y_all):
"""Appends prediction for the given batch."""
dev = y_all.device
if self.label not in batch:
return
y_ind = batch[self.label].to(dev)
y_hat = y_all[y_ind[0], y_ind[1]]
self.y_hat_list.append(y_hat.cpu())
self.y_row_list.append(batch[self.label][0] + self.row_count)
self.y_col_list.append(batch[self.label][1])
self.row_count += batch["batch_size"]
def tocsr(self, shape, sigmoid):
"""
Returns sparse CSR matrix
shape shape of the matrix
sigmoid whether or not to apply sigmoid
"""
if len(self.y_row_list) == 0:
return csr_matrix(shape, dtype=np.float32)
y_hat = torch.cat(self.y_hat_list, dim=0)
if sigmoid:
y_hat = torch.sigmoid(y_hat)
y_row = torch.cat(self.y_row_list, dim=0).numpy()
y_col = torch.cat(self.y_col_list, dim=0).numpy()
return csr_matrix((y_hat.numpy(), (y_row, y_col)), shape=shape)
def predict_sparse(net, loader, dev, progress=True, dropout=False, y_cat_columns=None):
"""
Makes predictions for the Y values in loader.
Returns sparse matrix of the shape loader.dataset.y.
"""
net.eval()
if dropout:
net.apply(enable_dropout)
class_collector = SparseCollector("yc_ind")
regr_collector = SparseCollector("yr_ind")
with torch.no_grad():
for b in tqdm(loader, leave=False, disable=(progress == False)):
X = torch.sparse_coo_tensor(
b["x_ind"],
b["x_data"],
size = [b["batch_size"], loader.dataset.input_size]).to(dev)
if net.cat_id_size is None:
yc, yr = net(X)
else:
yc, yr, yc_cat = net(X)
if y_cat_columns is not None:
yc[:,y_cat_columns] = yc_cat
class_collector.append(b, yc)
regr_collector.append(b, yr)
yc_shape = loader.dataset.y_class.shape
yr_shape = loader.dataset.y_regr.shape
yc_hat = class_collector.tocsr(shape=yc_shape, sigmoid=True)
yr_hat = regr_collector.tocsr(shape=yr_shape, sigmoid=False)
return yc_hat, yr_hat
def fold_transform_inputs(x, folding_size=None, transform="none"):
"""Fold and transform sparse matrix x:
Args:
x sparse matrix
folding_size modulo for folding
transform "none", "binarize", "tanh", "log1p"
Returns folded and transformed x.
"""
if folding_size is not None and x.shape[1] > folding_size:
## collapse x into folding_size columns
idx = x.nonzero()
folded = idx[1] % folding_size
x = scipy.sparse.csr_matrix((x.data, (idx[0], folded)), shape=(x.shape[0], folding_size))
x.sum_duplicates()
if transform is None or transform == "none":
pass
elif transform == "binarize":
x.data = (x.data > 0).astype(np.float32)
elif transform == "tanh":
x.data = np.tanh(x.data).astype(np.float32)
elif transform == "log1p":
x.data = np.log1p(x.data).astype(np.float32)
else:
raise ValueError(f"Unknown input transformation '{transform}'.")
return x
def set_weights(net, filename="./tf_h400_inits.npy"):
"""
Loads weights from disk and net parameters from them.
"""
print(f"Loading weights from '{filename}'.")
torch_to_value = np.load(filename, allow_pickle=True).item()
for name, param in net.named_parameters():
value = torch_to_value[name]
if value.shape != param.shape:
value = value.T
assert value.shape == param.shape
param.data.copy_(torch.FloatTensor(value))
print("Weights have been copied to Pytorch net.")
def load_sparse(filename):
"""Loads sparse from Matrix market or Numpy .npy file."""
if filename is None:
return None
if filename.endswith('.mtx'):
return scipy.io.mmread(filename).tocsr()
elif filename.endswith('.npy'):
return np.load(filename, allow_pickle=True).item().tocsr()
elif filename.endswith('.npz'):
return scipy.sparse.load_npz(filename).tocsr()
raise ValueError(f"Loading '{filename}' failed. It must have a suffix '.mtx', '.npy', '.npz'.")
def load_check_sparse(filename, shape):
y = load_sparse(filename)
if y is None:
return scipy.sparse.csr_matrix(shape, dtype=np.float32)
assert y.shape == shape, f"Shape of sparse matrix {filename} should be {shape} but is {y.shape}."
return y
def load_task_weights(filename, y, label):
"""Loads and processes task weights, otherwise raises an error using the label.
Args:
df DataFrame with weights
y csr matrix of labels
label name for error messages
Returns tuple of
training_weight
aggregation_weight
task_type
"""
res = types.SimpleNamespace(task_id=None, training_weight=None, aggregation_weight=None, task_type=None, censored_weight=torch.FloatTensor(), cat_id=None)
if y is None:
assert filename is None, f"Weights provided for {label}, please add also --{label}"
res.training_weight = torch.ones(0)
return res
if filename is None:
res.training_weight = torch.ones(y.shape[1])
return res
df = pd.read_csv(filename)
df.rename(columns={"weight": "training_weight"}, inplace=True)
## also supporting plural form column names:
df.rename(columns={c + "s": c for c in ["task_id", "training_weight", "aggregation_weight", "task_type", "censored_weight"]}, inplace=True)
assert "task_id" in df.columns, "task_id is missing in task info CVS file"
assert "training_weight" in df.columns, "training_weight is missing in task info CSV file"
df.sort_values("task_id", inplace=True)
for col in df.columns:
cols = ["", "task_id", "training_weight", "aggregation_weight", "task_type", "censored_weight","catalog_id"]
assert col in cols, f"Unsupported colum '{col}' in task weight file. Supported columns: {cols}."
assert y.shape[1] == df.shape[0], f"task weights for '{label}' have different size ({df.shape[0]}) to {label} columns ({y.shape[1]})."
assert (0 <= df.training_weight).all(), f"task weights (for {label}) must not be negative"
assert (df.training_weight <= 1).all(), f"task weights (for {label}) must not be larger than 1.0"
assert df.task_id.unique().shape[0] == df.shape[0], f"task ids (for {label}) are not all unique"
assert (0 <= df.task_id).all(), f"task ids in task weights (for {label}) must not be negative"
assert (df.task_id < df.shape[0]).all(), f"task ids in task weights (for {label}) must be below number of tasks"
res.training_weight = torch.FloatTensor(df.training_weight.values)
res.task_id = df.task_id.values
if "aggregation_weight" in df:
assert (0 <= df.aggregation_weight).all(), f"Found negative aggregation_weight for {label}. Aggregation weights must be non-negative."
res.aggregation_weight = df.aggregation_weight.values
if "task_type" in df:
res.task_type = df.task_type.values
if "censored_weight" in df:
assert (0 <= df.censored_weight).all(), f"Found negative censored_weight for {label}. Censored weights must be non-negative."
res.censored_weight = torch.FloatTensor(df.censored_weight.values)
if "catalog_id" in df:
res.cat_id = df.catalog_id.values
return res
def save_results(filename, conf, validation, training, stats=None):
"""Saves conf and results into json file. Validation and training can be None."""
out = {}
out["conf"] = conf.__dict__
if stats is not None:
out["stats"] = {}
for key in ["mean", "var"]:
#import ipdb; ipdb.set_trace()
out["stats"][key] = stats[key].tolist()
if validation is not None:
out["validation"] = {}
for key in ["classification", "classification_agg", "regression", "regression_agg"]:
out["validation"][key] = validation[key].to_json()
if training is not None:
out["training"] = {}
for key in ["classification", "classification_agg", "regression", "regression_agg"]:
out["training"][key] = training[key].to_json()
with open(filename, "w") as f:
json.dump(out, f)
def load_results(filename, two_heads=False):
"""Loads conf and results from a file
Args:
filename name of the json/npy file
two_heads set up class_output_size if missing
"""
if filename.endswith(".npy"):
return np.load(filename, allow_pickle=True).item()
with open(filename, "r") as f:
data = json.load(f)
for key in ["model_type"]:
if key not in data["conf"]:
data["conf"][key] = None
if two_heads and ("class_output_size" not in data["conf"]):
data["conf"]["class_output_size"] = data["conf"]["output_size"]
data["conf"]["regr_output_size"] = 0
data["conf"] = types.SimpleNamespace(**data["conf"])
if "results" in data:
for key in data["results"]:
data["results"][key] = | pd.read_json(data["results"][key]) | pandas.read_json |
import numpy as np
np.warnings.filterwarnings('ignore') #to not display numpy warnings... be careful
import pandas as pd
from mpi4py import MPI
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from subprocess import call
from orca import *
from orca.data import *
from datetime import datetime
import warnings
from ptreeopt.tree import PTree
warnings.filterwarnings('ignore')
# this whole script will run on all processors requested by the job script
with open('orca/data/scenario_names_all.txt') as f:
scenarios = f.read().splitlines()
with open('orca/data/demand_scenario_names_all.txt') as f:
demand_scenarios = f.read().splitlines()
calc_indices = False
climate_forecasts = False
simulation = True
tree_input_files = False
indicator_data_file = False
window_type = 'rolling'
window_length = 40
index_exceedence_sac = 8
shift = 0
SHA_shift = shift
ORO_shift = shift
FOL_shift = shift
SHA_baseline = pd.read_csv('orca/data/baseline_storage/SHA_storage.csv',parse_dates = True, index_col = 0)
SHA_baseline = SHA_baseline[(SHA_baseline.index >= '2006-09-30') & (SHA_baseline.index <= '2099-10-01')]
ORO_baseline = pd.read_csv('orca/data/baseline_storage/ORO_storage.csv',parse_dates = True, index_col = 0)
ORO_baseline = ORO_baseline[(ORO_baseline.index >= '2006-09-30') & (ORO_baseline.index <= '2099-10-01')]
FOL_baseline = pd.read_csv('orca/data/baseline_storage/FOL_storage.csv',parse_dates = True, index_col = 0)
FOL_baseline = FOL_baseline[(FOL_baseline.index >= '2006-09-30') & (FOL_baseline.index <= '2099-10-01')]
features = json.load(open('orca/data/json_files/indicators_whole_bounds.json'))
feature_names = []
feature_bounds = []
indicator_codes = []
min_depth = 4
for k,v in features.items():
indicator_codes.append(k)
feature_names.append(v['name'])
feature_bounds.append(v['bounds'])
action_dict = json.load(open('orca/data/json_files/action_list.json'))
actions = action_dict['actions']
snapshots = pickle.load(open('snapshots/training_scenarios_seed_2.pkl', 'rb'))
P = snapshots['best_P'][-1][0]
demand_indicators = {}
for D in demand_scenarios:
dfdemand = pd.read_csv('orca/data/demand_files/%s.csv'%D, index_col = 0, parse_dates = True)
dfdemand['demand_multiplier'] = dfdemand['combined_demand']
dfd_ind = pd.DataFrame(index = dfdemand.index)
for i in features: #indicators
ind = features[i]
if ind['type'] == 'demand':
if ind['delta'] == 'no':
if ind['stat'] == 'mu':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).mean()*100
elif ind['stat'] == 'sig':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).std()*100
elif ind['stat'] == 'max':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).max()*100
else:
if ind['stat'] == 'mu':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).mean().pct_change(periods=ind['delta'])*100
elif ind['stat'] == 'sig':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).std().pct_change(periods=ind['delta'])*100
elif ind['stat'] == 'max':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).max().pct_change(periods=ind['delta'])*100
elif ind['type'] == "discount":
discount_indicator = i
demand_indicators[D] = dfd_ind
indicator_columns = []
comm = MPI.COMM_WORLD # communication object
rank = comm.rank # what number processor am I?
sc = scenarios[rank]
call(['mkdir', 'orca/data/scenario_runs/%s'%sc])
if calc_indices:
gains_loop_df = | pd.read_csv('orca/data/historical_runs_data/gains_loops.csv', index_col = 0, parse_dates = True) | pandas.read_csv |
"""
GIS For Electrification (GISEle)
Developed by the Energy Department of Politecnico di Milano
Supporting Code
Group of supporting functions used inside all the process of GISEle algorithm
"""
import os
import requests
import pandas as pd
import geopandas as gpd
import numpy as np
import json
import shapely.ops
import iso8601
from scipy.spatial import distance_matrix
from scipy.spatial.distance import cdist
from shapely.geometry import Point, box, LineString, MultiPoint
from shapely.ops import split
from gisele.michele.michele import start
from gisele.data_import import import_pv_data, import_wind_data
from datetime import datetime
def l():
"""Print long separating lines."""
print('-' * 100)
def s():
"""Print short separating lines."""
print("-" * 40)
def nearest(row, df, src_column=None):
"""
Find the nearest point and return the value from specified column.
:param row: Iterative row of the first dataframe
:param df: Second dataframe to be found the nearest value
:param src_column: Column of the second dataframe that will be returned
:return value: Value of the desired src_column of the second dataframe
"""
# Find the geometry that is closest
nearest_p = df['geometry'] == shapely.ops.nearest_points(row['geometry'],
df.unary_union)[1]
# Get the corresponding value from df2 (matching is based on the geometry)
value = df.loc[nearest_p, src_column].values[0]
return value
def distance_2d(df1, df2, x, y):
"""
Find the 2D distance matrix between two datasets of points.
:param df1: first point dataframe
:param df2: second point dataframe
:param x: column representing the x coordinates (longitude)
:param y: column representing the y coordinates (latitude)
:return value: 2D Distance matrix between df1 and df2
"""
d1_coordinates = {'x': df1[x], 'y': df1[y]}
df1_loc = pd.DataFrame(data=d1_coordinates)
df1_loc.index = df1['ID']
d2_coordinates = {'x': df2[x], 'y': df2[y]}
df2_loc = pd.DataFrame(data=d2_coordinates)
df2_loc.index = df2['ID']
value = distance_matrix(df1_loc, df2_loc)
return value
def distance_3d(df1, df2, x, y, z):
"""
Find the 3D euclidean distance matrix between two datasets of points.
:param df1: first point dataframe
:param df2: second point dataframe
:param x: column representing the x coordinates (longitude)
:param y: column representing the y coordinates (latitude)
:param z: column representing the z coordinates (Elevation)
:return value: 3D Distance matrix between df1 and df2
"""
d1_coordinates = {'x': df1[x], 'y': df1[y], 'z': df1[z]}
df1_loc = pd.DataFrame(data=d1_coordinates)
df1_loc.index = df1['ID']
d2_coordinates = {'x': df2[x], 'y': df2[y], 'z': df2[z]}
df2_loc = pd.DataFrame(data=d2_coordinates)
df2_loc.index = df2['ID']
value = pd.DataFrame(cdist(df1_loc.values, df2_loc.values, 'euclidean'),
index=df1_loc.index, columns=df2_loc.index)
return value
def river_intersection(gdf,resolution):
'''
Check which lines in the adjacency matrix cross the rivers and assign
a very high weight to those lines
:param gdf:
:return:
'''
print('begin river intersection')
n = gdf['X'].size
weight_columns_x1 = np.repeat(gdf['X'].values[:,np.newaxis], n,1)
weight_columns_y1 = np.repeat(gdf['Y'].values[:,np.newaxis], n,1)
weight_columns_x2 = np.repeat(gdf['X'].values[np.newaxis,:], n,0)
weight_columns_y2 = np.repeat(gdf['Y'].values[np.newaxis,:], n,0)
weight_columns_x1_res = np.reshape(weight_columns_x1, (n*n, 1), order='F')
weight_columns_x2_res = np.reshape(weight_columns_x2, (n*n, 1), order='F')
weight_columns_y1_res = np.reshape(weight_columns_y1, (n*n, 1), order='F')
weight_columns_y2_res = np.reshape(weight_columns_y2, (n*n, 1), order='F')
df=pd.DataFrame()
df['x1'] = weight_columns_x1_res[:,0]
df['x2'] = weight_columns_x2_res[:,0]
df['y1'] = weight_columns_y1_res[:,0]
df['y2'] = weight_columns_y2_res[:,0]
# todo-> very slow passage, need to speed it up, no sense to compute it each time,
#todo -> take only short lines in a predefined buffer around rivers
#import rivers, intersect them according to the area considered
#create a buffer around them
#filter geodf to take only point that are closer than 1.5*resolution to the river
# it would be better to associate weights in advance,
df['2d_dist'] = ((df['x1']-df['x2'])**2+(df['y1']-df['y2'])**2)**0.5
# select only short lines and create linestrings
df_short_lines = df.loc[(df['2d_dist']<resolution * 1.5) &(df['2d_dist']>0)]
df_short_lines['geometry'] = df.apply(
lambda x: LineString([(x['x1'], x['y1']), (x['x2'], x['y2'])]), axis=1)
geodf = gpd.GeoDataFrame(df_short_lines, geometry='geometry')
# todo -> automatize this step!!!
geodf.crs = 'epsg:22287'
case_study='test_3'
dir='Case studies/'+case_study
# intersect short lines
test_inters = gpd.read_file('C:/Users/silvi/Progetti Github/Gisele_development/Case studies/test_3/Input/rivers.shp')
a = np.empty(shape=(geodf['geometry'].size, test_inters['geometry'].size))
for i, row in test_inters.iterrows():
a[:, i] = geodf['geometry'].intersects(row['geometry'])
a_tot = np.amax(a, 1)
geodf['intersection'] = a_tot
df['a_tot'] = 0
df.loc[geodf.index,'a_tot'] = geodf['intersection']
matrix = df['a_tot'].values.reshape((n, n), order='F') * 100
# df['geometry']=df.apply(lambda x: LineString([(x['x1'], x['y1']),(x['x2'], x['y2']) ]),axis=1)
# geodf= gpd.GeoDataFrame(df,geometry='geometry')
# geodf.crs = 'epsg:32737'
#
# test_inters=gpd.read_file('test_inters.shp')
# a=np.empty(shape =(geodf['geometry'].size,test_inters['geometry'].size))
# for i, row in test_inters.iterrows():
# a[:,i] = geodf['geometry'].intersects(row['geometry'])
# a_tot = np.amax(a, 1)
# geodf['intersection']=a_tot
# matrix=a_tot.reshape((n, n), order='F')*100
return matrix
def river_intersection(graph_gdf,box,graph,rivers):
#todo ->create the rivers geodf
rivers_clipped = gpd.clip(rivers, box) # box needs to be gdf with same crs as rivers
graph_gdf.loc[graph_gdf[rivers_clipped['geometry'].intersects(graph_gdf['geometry'])],'Cost'] = \
graph_gdf.loc[graph_gdf[rivers_clipped['geometry'].intersects(graph_gdf['geometry'])],'Cost']*5
graph_gdf['inters'] =''
for i, row in graph_gdf.iterrows():
graph_gdf.loc[i,'inters'] = rivers_clipped['geometry'].intersects(row['geometry'])
graph_gdf.loc[graph_gdf['inters']==True,'Cost'] = graph_gdf.loc[graph_gdf['inters']==True,'Cost']*10
graph_intersect =graph_gdf[graph_gdf['inters']==True]
for i,row in graph_intersect.iterrows():
graph[row['ID1']][row['ID2']]['weight'] = row['Cost']
return (graph,graph_gdf)
def cost_matrix(gdf, dist_3d_matrix, line_bc,resolution,Rivers_option):
"""
Creates the cost matrix in €/km by finding the average weight between
two points and then multiplying by the distance and the line base cost.
:param gdf: Geodataframe being analyzed
:param dist_3d_matrix: 3D distance matrix of all points [meters]
:param line_bc: line base cost for line deployment [€/km]
:return value: Cost matrix of all the points present in the gdf [€]
"""
# Altitude distance in meters
weight = gdf['Weight'].values
n = gdf['X'].size
weight_columns = np.repeat(weight[:, np.newaxis], n, 1)
weight_rows = np.repeat(weight[np.newaxis, :], n, 0)
if Rivers_option:
river_inters =river_intersection(gdf,resolution)
total_weight = (weight_columns + weight_rows) / 2 + river_inters
else:
total_weight = (weight_columns + weight_rows) / 2
# 3D distance
value = (dist_3d_matrix * total_weight) * line_bc / 1000
return value
def line_to_points(line, df):
"""
Finds all the points of a linestring geodataframe correspondent to a
point geodataframe.
:param line: Linestring geodataframe being analyzed
:param df: Point geodataframe where the linestring nodes will be referenced
:return nodes_in_df: Point geodataframe containing all nodes of linestring
"""
nodes = list(line.ID1.astype(int)) + list(line.ID2.astype(int))
nodes = list(dict.fromkeys(nodes))
nodes_in_df = gpd.GeoDataFrame(crs=df.crs, geometry=[])
for i in nodes:
nodes_in_df = nodes_in_df.append(df[df['ID'] == i], sort=False)
nodes_in_df.reset_index(drop=True, inplace=True)
return nodes_in_df
def create_roads(gdf_roads, geo_df):
'''
Creates two geodataframes
:param gdf_roads: geodataframe with all roads in the area, as imported from OSM
:param geo_df: geodataframe with the grid of points
:return:line_gdf: point geodataframe containing verteces of the roads (in all the area)
segments: geodataframe containing all the roads segments (in all the area)
the GeoDataframes are also saves as shapefiles
'''
#w = geo_df.shape[0]
if not geo_df.empty: #in case we are just processing the roads without pre-existing grid of points
w = int(geo_df['ID'].max())+1 # this is because not all road points are included in the weighted grid of points. Basically,
#it could be 10500,10501 and then 10504. df.shape[0] will give us a bad starting point in this case-> we want to start from 10505
else:
w=0
line_vertices = pd.DataFrame(
index=pd.Series(range(w, w + len(gdf_roads.index))),
columns=['ID', 'X', 'Y', 'ID_line', 'Weight', 'Elevation'], dtype=int)
# create geodataframe with all the segments that compose the road
segments = gpd.GeoDataFrame(columns=['geometry', 'ID1', 'ID2'])
k = 0
gdf_roads.reset_index(inplace=True)
x = 0
for i, row in gdf_roads.iterrows():
for j in list(row['geometry'].coords):
line_vertices.loc[w, 'X'] = j[0]
line_vertices.loc[w, 'Y'] = j[1]
line_vertices.loc[w, 'ID_line'] = k
line_vertices.loc[w, 'ID'] = w
line_vertices.loc[w, 'Weight'] = 1
w = w + 1
k = k + 1
points_to_split = MultiPoint(
[Point(x, y) for x, y in row['geometry'].coords[1:]])
splitted = split(row['geometry'], points_to_split)
for j in splitted:
segments.loc[x, 'geometry'] = j
segments.loc[x, 'length'] = segments.loc[
x, 'geometry'].length / 1000
segments.loc[x, 'ID1'] = line_vertices[
(line_vertices['X'] == j.coords[0][0]) & (
line_vertices['Y'] == j.coords[0][1])][
'ID'].values[0]
segments.loc[x, 'ID2'] = line_vertices[
(line_vertices['X'] == j.coords[1][0]) & (
line_vertices['Y'] == j.coords[1][1])][
'ID'].values[0]
x = x + 1
print('\r' + str(i) + '/' + str(gdf_roads.index.__len__()),
sep=' ', end='', flush=True)
if not geo_df.empty:
line_vertices.loc[:, 'Elevation'] = geo_df[geo_df['Elevation']>0].Elevation.mean()
else:
line_vertices.loc[:, 'Elevation']=1000
# line_vertices.loc[:, 'Elevation'] = 300
geometry = [Point(xy) for xy in
zip(line_vertices['X'], line_vertices['Y'])]
line_gdf = gpd.GeoDataFrame(line_vertices, crs=geo_df.crs,
geometry=geometry)
#line_gdf.to_file('Output/Datasets/Roads/gdf_roads.shp')
#segments.to_file('Output/Datasets/Roads/roads_segments.shp')
#segments.crs=22287
# line_gdf.to_file('Testing_strategy/Points.shp')
# segments.to_file('Testing_strategy/lines.shp')
return line_gdf, segments
def create_roads2(gdf_roads, geo_df,crs):
'''
Creates two geodataframes
:param gdf_roads: geodataframe with all roads in the area, as imported from OSM
:param geo_df: geodataframe with the grid of points
:return:line_gdf: point geodataframe containing verteces of the roads (in all the area)
segments: geodataframe containing all the roads segments (in all the area)
the GeoDataframes are also saves as shapefiles
'''
#w = geo_df.shape[0]
if not geo_df.empty: #in case we are just processing the roads without pre-existing grid of points
w = int(geo_df['ID'].max())+1 # this is because not all road points are included in the weighted grid of points. Basically,
#it could be 10500,10501 and then 10504. df.shape[0] will give us a bad starting point in this case-> we want to start from 10505
else:
w=0
line_vertices = pd.DataFrame(
index=pd.Series(range(w, w + len(gdf_roads.index))),
columns=['ID', 'X', 'Y', 'ID_line', 'Weight', 'Elevation'], dtype=int)
# create geodataframe with all the segments that compose the road
segments = gpd.GeoDataFrame(columns=['geometry', 'ID1', 'ID2'])
k = 0
gdf_roads.reset_index(inplace=True)
x = 0
for i, row in gdf_roads.iterrows():
for j in list(row['geometry'].coords):
if not (j[0] in line_vertices['X'].to_list() and j[1] in line_vertices['Y'].to_list()):
line_vertices.loc[w, 'X'] = j[0]
line_vertices.loc[w, 'Y'] = j[1]
line_vertices.loc[w, 'ID_line'] = k
line_vertices.loc[w, 'ID'] = w
line_vertices.loc[w, 'Weight'] = 1
w = w + 1
else:
pass
#print('Double road point!')
k = k + 1
points_to_split = MultiPoint(
[Point(x, y) for x, y in row['geometry'].coords[1:]])
splitted = split(row['geometry'], points_to_split)
for j in splitted:
segments.loc[x, 'geometry'] = j
segments.loc[x, 'length'] = segments.loc[
x, 'geometry'].length / 1000
segments.loc[x, 'ID1'] = line_vertices[
(line_vertices['X'] == j.coords[0][0]) & (
line_vertices['Y'] == j.coords[0][1])][
'ID'].values[0]
segments.loc[x, 'ID2'] = line_vertices[
(line_vertices['X'] == j.coords[1][0]) & (
line_vertices['Y'] == j.coords[1][1])][
'ID'].values[0]
x = x + 1
print('\r' + str(i) + '/' + str(gdf_roads.index.__len__()),
sep=' ', end='', flush=True)
if not geo_df.empty:
line_vertices.loc[:, 'Elevation'] = geo_df[geo_df['Elevation']>0].Elevation.mean()
else:
line_vertices.loc[:, 'Elevation']=1000
# line_vertices.loc[:, 'Elevation'] = 300
geometry = [Point(xy) for xy in
zip(line_vertices['X'], line_vertices['Y'])]
line_gdf = gpd.GeoDataFrame(line_vertices, crs=crs,
geometry=geometry)
#line_gdf.to_file('Output/Datasets/Roads/gdf_roads.shp')
#segments.to_file('Output/Datasets/Roads/roads_segments.shp')
#segments.crs=22287
# line_gdf.to_file('Testing_strategy/Points.shp')
# segments.to_file('Testing_strategy/lines.shp')
return line_gdf, segments
def create_box(limits, df):
"""
Creates a delimiting box around a geodataframe.
:param limits: Linestring geodataframe being analyzed
:param df: Point geodataframe to be delimited
:return df_box: All points of df that are inside the delimited box
"""
x_min = min(limits.X)
x_max = max(limits.X)
y_min = min(limits.Y)
y_max = max(limits.Y)
dist = Point(x_min, y_min).distance(Point(x_max, y_max))
if dist < 5000:
extension = dist
elif dist < 15000:
extension = dist * 0.6
else:
extension = dist / 4
bubble = box(minx=x_min - extension, maxx=x_max + extension,
miny=y_min - extension, maxy=y_max + extension)
df_box = df[df.within(bubble)]
df_box.index = pd.Series(range(0, len(df_box.index)))
return df_box
def create_box(limits, df,resolution):
"""
Creates a delimiting box around a geodataframe.
:param limits: Linestring geodataframe being analyzed
:param df: Point geodataframe to be delimited
:return df_box: All points of df that are inside the delimited box
"""
x_min = min(limits.X)
x_max = max(limits.X)
y_min = min(limits.Y)
y_max = max(limits.Y)
dist = Point(x_min, y_min).distance(Point(x_max, y_max))
if dist < 5*resolution:
extension = dist
elif dist < 15*resolution:
extension = dist * 0.6
else:
extension = dist / 4
bubble = box(minx=x_min - extension, maxx=x_max + extension,
miny=y_min - extension, maxy=y_max + extension)
df_box = df[df.within(bubble)]
df_box.index = pd.Series(range(0, len(df_box.index)))
return df_box
def edges_to_line(path, df, edges_matrix):
"""
Transforms a list of NetworkX graph edges into a linestring geodataframe
based on a input point geodataframe
:param path: NetworkX graph edges sequence
:param df: Point geodataframe to be used as reference
:param edges_matrix: Matrix containing the cost to connect a pair of points
:return line: Linestring geodataframe containing point IDs and its cost
:return line_points: All points of df that are part of the linestring
"""
steps = len(path)
line = gpd.GeoDataFrame(index=range(0, steps),
columns=['ID1', 'ID2', 'Cost', 'geometry'],
crs=df.crs)
line_points = []
for h in range(0, steps):
line.at[h, 'geometry'] = LineString(
[(df.loc[path[h][0], 'X'],
df.loc[path[h][0], 'Y']),
(df.loc[path[h][1], 'X'],
df.loc[path[h][1], 'Y'])])
# int here is necessary to use the command .to_file
line.at[h, 'ID1'] = int(df.loc[path[h][0], 'ID'])
line.at[h, 'ID2'] = int(df.loc[path[h][1], 'ID'])
line.at[h, 'Cost'] = int(edges_matrix.loc[df.loc[path[h][0], 'ID'],
df.loc[path[h][1], 'ID']])
line_points.append(list(df.loc[path[h], 'ID']))
line.drop(line[line['Cost'] == 0].index, inplace=True)
line.Cost = line.Cost.astype(int)
return line, line_points
def load(clusters_list, grid_lifetime, input_profile,gisele_folder, case_study):
"""
Reads the input daily load profile from the input csv. Reads the number of
years of the project and the demand growth from the data.dat file of
Micehele. Then it multiplies the load profile by the Clusters' peak load
and append values to create yearly profile composed of 12 representative
days.
:param grid_lifetime: Number of years the grid will operate
:param clusters_list: List of clusters ID numbers
:return load_profile: Cluster load profile for the whole period
:return years: Number of years the microgrid will operate
:return total_energy: Energy provided by the grid in its lifetime [kWh]
"""
l()
print("5. Microgrid Sizing")
l()
case_folder = gisele_folder + '/Case studies/' + case_study
data_michele = pd.read_table(gisele_folder+"/gisele/michele/Inputs/data.dat", sep="=",
header=None)
print("Creating load profile for each cluster..")
daily_profile = pd.DataFrame(index=range(1, 25),
columns=clusters_list.Cluster)
for column in daily_profile:
daily_profile.loc[:, column] = \
(input_profile.loc[:, 'Hourly Factor']
* float(clusters_list.loc[clusters_list['Cluster']==column, 'Load [kW]'])).values
rep_days = int(data_michele.loc[0, 1].split(';')[0])
grid_energy = daily_profile.append([daily_profile] * 364,
ignore_index=True)
# append 11 times since we are using 12 representative days in a year
load_profile = daily_profile.append([daily_profile] * (rep_days - 1),
ignore_index=True)
years = int(data_michele.loc[1, 1].split(';')[0])
demand_growth = float(data_michele.loc[87, 1].split(';')[0])
daily_profile_new = daily_profile
# appending for all the years considering demand growth
for i in range(grid_lifetime - 1):
daily_profile_new = daily_profile_new.multiply(1 + demand_growth)
if i < (years - 1):
load_profile = load_profile.append([daily_profile_new] * rep_days,
ignore_index=True)
grid_energy = grid_energy.append([daily_profile_new] * 365,
ignore_index=True)
total_energy = pd.DataFrame(index=clusters_list.Cluster,
columns=['Energy'])
for cluster in clusters_list.Cluster:
total_energy.loc[cluster, 'Energy'] = \
grid_energy.loc[:, cluster].sum().round(2)
print("Load profile created")
total_energy.to_csv(case_folder +'/Intermediate/Microgrid/Grid_energy.csv')
return load_profile, years, total_energy
def shift_timezone(df, shift):
"""
Move the values of a dataframe with DateTimeIndex to another UTC zone,
adding or removing hours.
:param df: Dataframe to be analyzed
:param shift: Amount of hours to be shifted
:return df: Input dataframe with values shifted in time
"""
if shift > 0:
add_hours = df.tail(shift)
df = pd.concat([add_hours, df], ignore_index=True)
df.drop(df.tail(shift).index, inplace=True)
elif shift < 0:
remove_hours = df.head(abs(shift))
df = pd.concat([df, remove_hours], ignore_index=True)
df.drop(df.head(abs(shift)).index, inplace=True)
return df
def sizing(load_profile, clusters_list, geo_df_clustered, wt, mg_types, gisele_folder,case_study):
"""
Imports the solar and wind production from the RenewablesNinja api and then
Runs the optimization algorithm MicHEle to find the best microgrid
configuration for each Cluster.
:param load_profile: Load profile of all clusters during all years
:param clusters_list: List of clusters ID numbers
:param geo_df_clustered: Point geodataframe with Cluster identification
:param wt: Wind turbine model used for computing the wind velocity
:param mg_types: number of times to evaluate microgrids in each cluster.
renewables fraction in michele changes accordingly
:return mg: Dataframe containing the information of the Clusters' microgrid
"""
case_folder = gisele_folder + '/Case studies/' + case_study
geo_df_clustered = geo_df_clustered.to_crs(4326)
mg = {}
# mg = pd.DataFrame(index=clusters_list.index,
# columns=['Cluster','PV [kW]', 'Wind [kW]', 'Hydro [kW]'
# 'Diesel [kW]',
# 'BESS [kWh]', 'Inverter [kW]',
# 'Investment Cost [kEUR]', 'OM Cost [kEUR]',
# 'Replace Cost [kEUR]', 'Total Cost [kEUR]',
# 'Energy Demand [MWh]', 'Energy Produced [MWh]',
# 'LCOE [EUR/kWh]','CO2 [kg]', 'Unavailability [MWh/y]'],
# dtype=float)
for i in range(mg_types):
mg[i] = pd.DataFrame(index=clusters_list.index,
columns=['Cluster','Renewable fraction index', 'PV [kW]', 'Wind [kW]', 'Diesel [kW]',
'BESS [kWh]', 'Inverter [kW]',
'Investment Cost [kEUR]', 'OM Cost [kEUR]',
'Replace Cost [kEUR]', 'Total Cost [kEUR]',
'Energy Demand [MWh]', 'Energy Produced [MWh]',
'LCOE [EUR/kWh]','CO2 [kg]', 'Unavailability [MWh/y]'],
dtype=float)
#save useful values from michele input data
with open(gisele_folder+'/gisele/michele/Inputs/data.json') as f:
input_michele = json.load(f)
proj_lifetime = input_michele['num_years']
num_typ_days = input_michele['num_days']
clusters = clusters_list.Cluster
for index in range(len(clusters)): # fix because the Clusters are starting from 1 instead of 0
# try:
cluster_n = clusters[index]
l()
print('Creating the optimal Microgrid for Cluster ' + str(cluster_n))
l()
load_profile_cluster = load_profile.loc[:, cluster_n]
lat = geo_df_clustered[geo_df_clustered['Cluster']
== cluster_n].geometry.y.values[0]
lon = geo_df_clustered[geo_df_clustered['Cluster']
== cluster_n].geometry.x.values[0]
all_angles = | pd.read_csv(gisele_folder+'/general_input/TiltAngles.csv') | pandas.read_csv |
import argparse
import pandas as pd
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--query',
type=str,
help='path to the query csv')
parser.add_argument('-g', '--gallery',
type=str,
help='path to the gallery csv')
parser.add_argument('-d', '--distmat',
type=str,
help='path to distance matrix txt')
parser.add_argument('-o', '--output',
type=str,
help='output file name and directory')
return parser.parse_args()
def get_retrieved_labels(g_mapping, dist_vec):
order_list = np.argsort(dist_vec)
return np.array([
g_mapping[x]
for x in order_list
if x in g_mapping
])
def nearest_neighbor(q_label, retrieved_labels):
return int(q_label == retrieved_labels[0])
def first_tier(q_label, retrieved_labels):
n_relevant_objs = (retrieved_labels == q_label).sum()
retrieved_1st_tier = retrieved_labels[:n_relevant_objs]
return (retrieved_1st_tier == q_label).mean()
def second_tier(q_label, retrieved_labels):
n_relevant_objs = (retrieved_labels == q_label).sum()
retrieved_2nd_tier = retrieved_labels[:2*n_relevant_objs]
return (retrieved_2nd_tier == q_label).mean() * 2
def mean_average_precision(q_label, retrieved_labels):
score = 0.0
num_hits = 0.0
for i, p in enumerate(retrieved_labels):
if p == q_label and p not in retrieved_labels[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
return score
# Parse arguments
args = parse_args()
# Generate mapping of gallery ids to their corresponding label
g_df = pd.read_csv(args.gallery)
g_mapping = {
id: label
for id, label in zip(g_df['obj_id'].values, g_df['class_id'].values)
}
# Load dataset and distance matrix
q_df = pd.read_csv(args.query)
# Load predicted distance matrix
dist_mtx = np.loadtxt(args.distmat)
# Define metrics
METRICS = ['MAP', 'NN', 'FT', 'ST']
metric_compute = {
'NN': nearest_neighbor,
'FT': first_tier,
'ST': second_tier,
'MAP': mean_average_precision,
}
# Compute for each query
score = {
metric_id: [
metric_compute[metric_id](
q_label,
get_retrieved_labels(g_mapping, dist_mtx[qid])
)
for qid, q_label in zip(q_df['obj_id'].values, q_df['class_id'].values)
]
for metric_id in METRICS
}
# Output to dataframe
q_df = pd.concat([q_df, | pd.DataFrame(score) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import re
import ipaddress
import codecs
import time
import pandas as pd
import urllib3
from urllib3 import util
from classifier4gyoithon.GyoiClassifier import DeepClassifier
from classifier4gyoithon.GyoiExploit import Metasploit
from classifier4gyoithon.GyoiReport import CreateReport
from util import Utilty
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Identify product name using signature.
def identify_product(categoy, target_url, response, utility):
product_list = []
reason_list = []
full_path = os.path.dirname(os.path.abspath(__file__))
file_name = 'signature_' + categoy + '.txt'
try:
with codecs.open(os.path.join(full_path + '/signatures/', file_name), 'r', 'utf-8') as fin:
matching_patterns = fin.readlines()
for pattern in matching_patterns:
items = pattern.replace('\r', '').replace('\n', '').split('@')
keyword_list = []
product = items[0]
signature = items[1]
list_match = re.findall(signature, response, flags=re.IGNORECASE)
if len(list_match) != 0:
# Output result (header)
keyword_list.append(list_match)
utility.print_message(OK, 'category : {}'.format(categoy))
utility.print_message(OK, 'product : {}'.format(product))
utility.print_message(OK, 'reason : {}'.format(keyword_list))
utility.print_message(OK, 'target url : {}'.format(target_url))
utility.print_message(NONE, '-' * 42)
product_list.append(product)
reason_list.append(keyword_list)
except Exception as err:
utility.print_exception(err, '{}'.format(err))
return product_list, reason_list
# Classifier product name using signatures.
def classifier_signature(ip_addr, port, target_url, response, log_file, utility):
utility.print_message(NOTE, 'Analyzing gathered HTTP response using Signature.')
ip_list = []
port_list = []
vhost_list = []
judge_list = []
version_list = []
reason_list = []
scan_type_list = []
ua_list = []
http_ver_list = []
ssl_list = []
sni_list = []
url_list = []
log_list = []
product_list = []
for category in ['os', 'web', 'framework', 'cms']:
products, keywords = identify_product(category, target_url, response, utility)
for product, keyword in zip(products, keywords):
ip_list.append(ip_addr)
port_list.append(port)
vhost_list.append(ip_addr)
judge_list.append(category + ':' + str(product))
version_list.append('-')
reason_list.append(keyword)
scan_type_list.append('[ip]')
ua_list.append('-')
http_ver_list.append('HTTP/1.1')
ssl_list.append('-')
sni_list.append('-')
url_list.append(target_url)
log_list.append(log_file)
product_list.append(product)
if len(product_list) == 0:
utility.print_message(WARNING, 'Product Not Found.')
return []
# logging.
series_ip = | pd.Series(ip_list) | pandas.Series |
import pymongo
from sqlalchemy import create_engine
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import re
import pandas as pd
import logging
s = SentimentIntensityAnalyzer()
# Establish a connection to the MongoDB server
client = pymongo.MongoClient(host="mongodb", port=27017)
# Select the database you want to use withing the MongoDB server
db = client.twitter
docs = db.tweets.find()
def clean_tweets(tweet):
"""clean the data with queries"""
tweet = re.sub('@[A-Za-z0-9]+', '', tweet) #removes @mentions
tweet = re.sub('#', '', tweet) #removes hashtag symbol
tweet = re.sub('RT\s', '', tweet) #removes RT to announce retweet
tweet = re.sub('https?:\/\/\S+', '', tweet) #removes most URLs
return tweet
def extract(docs):
""" Read data from MongoDB, clean them and return the entries"""
clean_entries = []
for doc in docs:
text = doc['text']
entry = clean_tweets(text)
clean_entries.append(entry)
return clean_entries
pg = create_engine('postgresql://postgres:password@postgresdb:5432/twitter', echo=True)
pg.execute('''
CREATE TABLE IF NOT EXISTS tweets (
text VARCHAR(500),
sentiment NUMERIC
);
''')
def transform(entries):
"""perform Sentiment Analysis and return entries with sentiments"""
for entry in entries:
logging.critical(entry)
sentiment = s.polarity_scores(entry)
score = sentiment['compound']
query = "INSERT INTO tweets VALUES (%s, %s);"
pg.execute(query, (entry, score))
print(sentiment)
entries = extract(docs)
transform(entries)
df = pd.DataFrame(entries, columns=['tweet_text'])
def load(df):
"""store the results in a dataframe and in a csv file"""
pol_scores = df['tweet_text'].apply(s.polarity_scores).apply(pd.Series)
df= | pd.concat([df, pol_scores], axis=1) | pandas.concat |
import os
import shutil
from glob import glob
import numpy as np
import pandas as pd
import pickle
def data_loader(data_path):
data = pd.read_csv(data_path, index_col=0)
data.index = | pd.to_datetime(data.index) | pandas.to_datetime |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
# TODO: Modify DB to fix 1084
from unittest import TestCase, main
from datetime import datetime
from os import close, remove
from os.path import join, basename, exists
from tempfile import mkstemp
import pandas as pd
from qiita_core.util import qiita_test_checker
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_db.exceptions import (QiitaDBError, QiitaDBUnknownIDError,
QiitaDBStatusError, QiitaDBLookupError)
from qiita_db.study import Study, StudyPerson
from qiita_db.user import User
from qiita_db.util import get_mountpoint, get_count
from qiita_db.data import BaseData, RawData, PreprocessedData, ProcessedData
from qiita_db.metadata_template import PrepTemplate
@qiita_test_checker()
class BaseDataTests(TestCase):
"""Tests the BaseData class"""
def test_init(self):
"""Raises an error if trying to instantiate the base data"""
with self.assertRaises(IncompetentQiitaDeveloperError):
BaseData(1)
@qiita_test_checker()
class RawDataTests(TestCase):
"""Tests the RawData class"""
def setUp(self):
fd, self.seqs_fp = mkstemp(suffix='_seqs.fastq')
close(fd)
fd, self.barcodes_fp = mkstemp(suffix='_barcodes.fastq')
close(fd)
self.filetype = 2
self.filepaths = [(self.seqs_fp, 1), (self.barcodes_fp, 2)]
_, self.db_test_raw_dir = get_mountpoint('raw_data')[0]
with open(self.seqs_fp, "w") as f:
f.write("\n")
with open(self.barcodes_fp, "w") as f:
f.write("\n")
self._clean_up_files = []
# Create some new PrepTemplates
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')
self.pt1 = PrepTemplate.create(metadata, Study(1), "16S")
self.pt2 = PrepTemplate.create(metadata, Study(1), "18S")
self.prep_templates = [self.pt1, self.pt2]
def tearDown(self):
for f in self._clean_up_files:
remove(f)
def test_create(self):
"""Correctly creates all the rows in the DB for the raw data"""
# Check that the returned object has the correct id
exp_id = get_count("qiita.raw_data") + 1
obs = RawData.create(self.filetype, self.prep_templates,
self.filepaths)
self.assertEqual(obs.id, exp_id)
# Check that the raw data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.raw_data WHERE raw_data_id=%d" % exp_id)
# raw_data_id, filetype, link_filepaths_status
self.assertEqual(obs, [[exp_id, 2, 'idle']])
# Check that the raw data has been correctly linked with the prep
# templates
sql = """SELECT prep_template_id
FROM qiita.prep_template
WHERE raw_data_id = %s
ORDER BY prep_template_id"""
obs = self.conn_handler.execute_fetchall(sql, (exp_id,))
self.assertEqual(obs, [[self.pt1.id], [self.pt2.id]])
# Check that the files have been copied to right location
exp_seqs_fp = join(self.db_test_raw_dir,
"%d_%s" % (exp_id, basename(self.seqs_fp)))
self.assertTrue(exists(exp_seqs_fp))
self._clean_up_files.append(exp_seqs_fp)
exp_bc_fp = join(self.db_test_raw_dir,
"%d_%s" % (exp_id, basename(self.barcodes_fp)))
self.assertTrue(exists(exp_bc_fp))
self._clean_up_files.append(exp_bc_fp)
# Check that the filepaths have been correctly added to the DB
top_id = self.conn_handler.execute_fetchone(
"SELECT count(1) FROM qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d or "
"filepath_id=%d" % (top_id - 1, top_id))
exp_seqs_fp = "%d_%s" % (exp_id, basename(self.seqs_fp))
exp_bc_fp = "%d_%s" % (exp_id, basename(self.barcodes_fp))
# filepath_id, path, filepath_type_id
exp = [[top_id - 1, exp_seqs_fp, 1, '852952723', 1, 5],
[top_id, exp_bc_fp, 2, '852952723', 1, 5]]
self.assertEqual(obs, exp)
# Check that the raw data have been correctly linked with the filepaths
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.raw_filepath WHERE raw_data_id=%d" % exp_id)
# raw_data_id, filepath_id
self.assertEqual(obs, [[exp_id, top_id - 1], [exp_id, top_id]])
def test_create_error(self):
with self.assertRaises(QiitaDBError):
RawData.create(self.filetype, [PrepTemplate(1)], self.filepaths)
def test_get_filepaths(self):
"""Correctly returns the filepaths to the raw files"""
rd = RawData(1)
obs = rd.get_filepaths()
exp = [
(1, join(self.db_test_raw_dir, '1_s_G1_L001_sequences.fastq.gz'),
"raw_forward_seqs"),
(2, join(self.db_test_raw_dir,
'1_s_G1_L001_sequences_barcodes.fastq.gz'), "raw_barcodes")]
self.assertEqual(obs, exp)
def test_studies(self):
"""Correctly returns the study ids"""
rd = RawData(1)
self.assertEqual(rd.studies, [1])
def test_data_types(self):
"""Correctly returns the data_types of raw_data"""
rd = RawData(1)
self.assertEqual(rd.data_types(), ["18S"])
def test_data_types_id(self):
"""Correctly returns the data_types of raw_data"""
rd = RawData(1)
self.assertEqual(rd.data_types(ret_id=True), [2])
def test_filetype(self):
rd = RawData(1)
self.assertEqual(rd.filetype, "FASTQ")
def test_prep_templates(self):
rd = RawData(1)
self.assertEqual(rd.prep_templates, [1])
def test_link_filepaths_status(self):
rd = RawData(1)
self.assertEqual(rd.link_filepaths_status, 'idle')
def test_link_filepaths_status_setter(self):
rd = RawData(1)
self.assertEqual(rd.link_filepaths_status, 'idle')
rd._set_link_filepaths_status('linking')
self.assertEqual(rd.link_filepaths_status, 'linking')
rd._set_link_filepaths_status('unlinking')
self.assertEqual(rd.link_filepaths_status, 'unlinking')
rd._set_link_filepaths_status('failed: error')
self.assertEqual(rd.link_filepaths_status, 'failed: error')
def test_link_filepaths_status_setter_error(self):
rd = RawData(1)
with self.assertRaises(ValueError):
rd._set_link_filepaths_status('not a valid status')
def test_is_preprocessed(self):
self.assertTrue(RawData(1)._is_preprocessed())
rd = RawData.create(self.filetype, self.prep_templates, self.filepaths)
self.assertFalse(rd._is_preprocessed())
def test_clear_filepaths(self):
rd = RawData.create(self.filetype, [self.pt1], self.filepaths)
self.assertTrue(self.conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.raw_filepath "
"WHERE raw_data_id=%s)", (rd.id,))[0])
# add files to clean before cleaning the filepaths
study_id = rd.studies[0]
path_for_removal = join(get_mountpoint("uploads")[0][1], str(study_id))
self._clean_up_files = [join(path_for_removal,
basename(f).split('_', 1)[1])
for _, f, _ in rd.get_filepaths()]
# cleaning the filepaths
rd.clear_filepaths()
self.assertFalse(self.conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.raw_filepath "
"WHERE raw_data_id=%s)", (rd.id,))[0])
def test_clear_filepaths_error(self):
with self.assertRaises(QiitaDBError):
RawData(1).clear_filepaths()
def test_exists(self):
self.assertTrue(RawData.exists(1))
self.assertFalse(RawData.exists(1000))
def test_delete_error_no_exists(self):
# the raw data doesn't exist
with self.assertRaises(QiitaDBUnknownIDError):
RawData.delete(1000, 0)
def test_delete_error_raw_data_not_linked(self):
# the raw data and the prep template id are not linked
with self.assertRaises(QiitaDBError):
RawData.delete(1, self.pt2.id)
def test_delete_error_prep_template_no_exists(self):
# the prep template does not exist
with self.assertRaises(QiitaDBError):
RawData.delete(1, 1000)
def test_delete_error_linked_files(self):
# the raw data has linked files
with self.assertRaises(QiitaDBError):
RawData.delete(1, 1)
def test_delete(self):
rd = RawData.create(self.filetype, self.prep_templates,
self.filepaths)
sql_pt = """SELECT prep_template_id
FROM qiita.prep_template
WHERE raw_data_id = %s
ORDER BY prep_template_id"""
obs = self.conn_handler.execute_fetchall(sql_pt, (rd.id,))
self.assertEqual(obs, [[self.pt1.id], [self.pt2.id]])
# This delete call will only unlink the raw data from the prep template
RawData.delete(rd.id, self.pt2.id)
# Check that it successfully unlink the raw data from pt2
obs = self.conn_handler.execute_fetchall(sql_pt, (rd.id,))
self.assertEqual(obs, [[self.pt1.id]])
self.assertEqual(self.pt2.raw_data, None)
# If we try to remove the RawData now, it should raise an error
# because it still has files attached to it
with self.assertRaises(QiitaDBError):
RawData.delete(rd.id, self.pt1.id)
# Clear the files so we can actually remove the RawData
study_id = rd.studies[0]
path_for_removal = join(get_mountpoint("uploads")[0][1], str(study_id))
self._clean_up_files.extend([join(path_for_removal,
basename(f).split('_', 1)[1])
for _, f, _ in rd.get_filepaths()])
rd.clear_filepaths()
RawData.delete(rd.id, self.pt1.id)
obs = self.conn_handler.execute_fetchall(sql_pt, (rd.id,))
self.assertEqual(obs, [])
# Check that all expected rows have been deleted
sql = """SELECT EXISTS(
SELECT * FROM qiita.raw_filepath
WHERE raw_data_id = %s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (rd.id,))[0])
sql = """SELECT EXISTS(
SELECT * FROM qiita.raw_data
WHERE raw_data_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (rd.id,))[0])
def test_status(self):
rd = RawData(1)
s = Study(1)
self.assertEqual(rd.status(s), 'private')
# Since the status is inferred from the processed data, change the
# status of the processed data so we can check how it changes in the
# preprocessed data
pd = ProcessedData(1)
pd.status = 'public'
self.assertEqual(rd.status(s), 'public')
# Check that new raw data has sandbox as status since no
# processed data exists for them
rd = RawData.create(self.filetype, self.prep_templates, self.filepaths)
self.assertEqual(rd.status(s), 'sandbox')
def test_status_error(self):
# Let's create a new study, so we can check that the error is raised
# because the new study does not have access to the raw data
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": StudyPerson(2),
"principal_investigator_id": StudyPerson(3),
"lab_person_id": StudyPerson(1)
}
s = Study.create(User('<EMAIL>'), "Fried chicken microbiome",
[1], info)
rd = RawData(1)
with self.assertRaises(QiitaDBStatusError):
rd.status(s)
@qiita_test_checker()
class PreprocessedDataTests(TestCase):
"""Tests the PreprocessedData class"""
def setUp(self):
self.prep_template = PrepTemplate(1)
self.study = Study(1)
self.params_table = "preprocessed_sequence_illumina_params"
self.params_id = 1
fd, self.fna_fp = mkstemp(suffix='_seqs.fna')
close(fd)
fd, self.qual_fp = mkstemp(suffix='_seqs.qual')
close(fd)
self.filepaths = [(self.fna_fp, 4), (self.qual_fp, 5)]
_, self.db_test_ppd_dir = get_mountpoint(
'preprocessed_data')[0]
self.ebi_submission_accession = "EBI123456-A"
self.ebi_study_accession = "EBI123456-B"
with open(self.fna_fp, "w") as f:
f.write("\n")
with open(self.qual_fp, "w") as f:
f.write("\n")
self._clean_up_files = []
def tearDown(self):
for f in self._clean_up_files:
remove(f)
def test_create(self):
"""Correctly creates all the rows in the DB for preprocessed data"""
# Check that the returned object has the correct id
obs = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
self.assertEqual(obs.id, 3)
# Check that the preprocessed data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_data WHERE "
"preprocessed_data_id=3")
# preprocessed_data_id, preprocessed_params_table,
# preprocessed_params_id, submitted_to_insdc_status,
# ebi_submission_accession, ebi_study_accession, data_type_id,
# link_filepaths_status, vamps_status, processing_status
exp = [[3, "preprocessed_sequence_illumina_params", 1,
'not submitted', "EBI123456-A", "EBI123456-B", 2, 'idle',
'not submitted', 'not_processed']]
self.assertEqual(obs, exp)
# Check that the preprocessed data has been linked with its study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_preprocessed_data WHERE "
"preprocessed_data_id=3")
exp = [[1, 3]]
self.assertEqual(obs, exp)
# Check that the files have been copied to right location
exp_fna_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.fna_fp))
self.assertTrue(exists(exp_fna_fp))
self._clean_up_files.append(exp_fna_fp)
exp_qual_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.qual_fp))
self.assertTrue(exists(exp_qual_fp))
self._clean_up_files.append(exp_qual_fp)
# Check that the filepaths have been correctly added to the DB
obs_id = self.conn_handler.execute_fetchone(
"SELECT count(1) from qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d or "
"filepath_id=%d" % (obs_id - 1, obs_id))
exp_fna_fp = "3_%s" % basename(self.fna_fp)
exp_qual_fp = "3_%s" % basename(self.qual_fp)
# filepath_id, path, filepath_type_id
exp = [[obs_id - 1, exp_fna_fp, 4, '852952723', 1, 3],
[obs_id, exp_qual_fp, 5, '852952723', 1, 3]]
self.assertEqual(obs, exp)
def test_create_data_type_only(self):
# Check that the returned object has the correct id
obs = PreprocessedData.create(self.study, self.params_table,
self.params_id, self.filepaths,
data_type="18S")
self.assertEqual(obs.id, 3)
# Check that the preprocessed data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_data WHERE "
"preprocessed_data_id=3")
# preprocessed_data_id, preprocessed_params_table,
# preprocessed_params_id, submitted_to_insdc_status,
# ebi_submission_accession, ebi_study_accession, data_type_id,
# link_filepaths_status, vamps_status, processing_status
exp = [[3, "preprocessed_sequence_illumina_params", 1,
'not submitted', None, None, 2, 'idle', 'not submitted',
'not_processed']]
self.assertEqual(obs, exp)
# Check that the preprocessed data has been linked with its study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_preprocessed_data WHERE "
"preprocessed_data_id=3")
exp = [[1, 3]]
self.assertEqual(obs, exp)
# Check that the files have been copied to right location
exp_fna_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.fna_fp))
self.assertTrue(exists(exp_fna_fp))
self._clean_up_files.append(exp_fna_fp)
exp_qual_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.qual_fp))
self.assertTrue(exists(exp_qual_fp))
self._clean_up_files.append(exp_qual_fp)
# Check that the filepaths have been correctly added to the DB
obs_id = self.conn_handler.execute_fetchone(
"SELECT count(1) from qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d or "
"filepath_id=%d" % (obs_id - 1, obs_id))
exp_fna_fp = "3_%s" % basename(self.fna_fp)
exp_qual_fp = "3_%s" % basename(self.qual_fp)
# filepath_id, path, filepath_type_id
exp = [[obs_id - 1, exp_fna_fp, 4, '852952723', 1, 3],
[obs_id, exp_qual_fp, 5, '852952723', 1, 3]]
self.assertEqual(obs, exp)
# Check that the preprocessed data have been correctly
# linked with the filepaths
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_filepath WHERE "
"preprocessed_data_id=3")
# preprocessed_data_id, filepath_id
self.assertEqual(obs, [[3, obs_id - 1], [3, obs_id]])
def test_delete_basic(self):
"""Correctly deletes a preprocessed data"""
# testing regular delete
ppd = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
PreprocessedData.delete(ppd.id)
# testing that the deleted preprocessed data can't be instantiated
with self.assertRaises(QiitaDBUnknownIDError):
PreprocessedData(ppd.id)
# and for completeness testing that it raises an error if ID
# doesn't exist
with self.assertRaises(QiitaDBUnknownIDError):
PreprocessedData.delete(ppd.id)
# testing that we can not remove cause the preprocessed data != sandbox
with self.assertRaises(QiitaDBStatusError):
PreprocessedData.delete(1)
def test_delete_advanced(self):
# testing that we can not remove cause preprocessed data has been
# submitted to EBI or VAMPS
ppd = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
# fails due to VAMPS submission
ppd.update_vamps_status('success')
with self.assertRaises(QiitaDBStatusError):
PreprocessedData.delete(ppd.id)
ppd.update_vamps_status('failed')
# fails due to EBI submission
ppd.update_insdc_status('success', 'AAAA', 'AAAA')
with self.assertRaises(QiitaDBStatusError):
PreprocessedData.delete(ppd.id)
def test_create_error_dynamic_table(self):
"""Raises an error if the preprocessed_params_table does not exist"""
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "foo", self.params_id,
self.filepaths, data_type="18S")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "preprocessed_foo",
self.params_id, self.filepaths,
data_type="18S")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "foo_params", self.params_id,
self.filepaths, data_type="18S")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "preprocessed_foo_params",
self.params_id, self.filepaths,
data_type="18S")
def test_create_error_data_type(self):
with self.assertRaises(QiitaDBLookupError):
PreprocessedData.create(self.study,
"preprocessed_sequence_illumina_params",
self.params_id, self.filepaths,
data_type="Metabolomics")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study,
"preprocessed_sequence_illumina_params",
self.params_id, self.filepaths,
data_type="Metabolomics",
prep_template=self.prep_template)
def test_get_filepaths(self):
"""Correctly returns the filepaths to the preprocessed files"""
ppd = PreprocessedData(1)
obs = ppd.get_filepaths()
exp = [(3, join(self.db_test_ppd_dir, '1_seqs.fna'),
"preprocessed_fasta"),
(4, join(self.db_test_ppd_dir, '1_seqs.qual'),
"preprocessed_fastq"),
(5, join(self.db_test_ppd_dir, '1_seqs.demux'),
"preprocessed_demux")]
self.assertItemsEqual(obs, exp)
def test_processed_data(self):
"""Correctly returns the processed data id"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.processed_data, [1])
def test_prep_template(self):
"""Correctly returns the prep template"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.prep_template, 1)
def test_study(self):
"""Correctly returns the study"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.study, 1)
def test_ebi_submission_accession(self):
"""Correctly returns the ebi_submission_accession"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.ebi_submission_accession, 'EBI123456-AA')
def test_ebi_ebi_study_accession(self):
"""Correctly returns the ebi_study_accession"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.ebi_study_accession, 'EBI123456-BB')
def test_set_ebi_submission_accession(self):
new = PreprocessedData.create(
self.study, self.params_table, self.params_id, self.filepaths,
prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
new.ebi_submission_accession = 'EBI12345-CC'
self.assertEqual(new.ebi_submission_accession, 'EBI12345-CC')
def test_ebi_study_accession(self):
new = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
new.ebi_study_accession = 'EBI12345-DD'
self.assertEqual(new.ebi_study_accession, 'EBI12345-DD')
def test_submitted_to_insdc_status(self):
"""submitted_to_insdc_status works correctly"""
# False case
pd = PreprocessedData(1)
self.assertEqual(pd.submitted_to_insdc_status(), 'submitting')
# True case
pd = PreprocessedData(2)
self.assertEqual(pd.submitted_to_insdc_status(), 'not submitted')
def test_update_insdc_status(self):
"""Able to update insdc status"""
pd = PreprocessedData(1)
self.assertEqual(pd.submitted_to_insdc_status(), 'submitting')
pd.update_insdc_status('failed')
self.assertEqual(pd.submitted_to_insdc_status(), 'failed')
pd.update_insdc_status('success', 'foo', 'bar')
self.assertEqual(pd.submitted_to_insdc_status(), 'success')
self.assertEqual(pd.ebi_study_accession, 'foo')
self.assertEqual(pd.ebi_submission_accession, 'bar')
with self.assertRaises(ValueError):
pd.update_insdc_status('not valid state')
with self.assertRaises(ValueError):
pd.update_insdc_status('success', 'only one accession')
def test_data_type(self):
"""Correctly returns the data_type of preprocessed_data"""
pd = ProcessedData(1)
self.assertEqual(pd.data_type(), "18S")
def test_data_type_id(self):
"""Correctly returns the data_type of preprocessed_data"""
pd = ProcessedData(1)
self.assertEqual(pd.data_type(ret_id=True), 2)
def test_link_filepaths_status(self):
ppd = PreprocessedData(1)
self.assertEqual(ppd.link_filepaths_status, 'idle')
def test_link_filepaths_status_setter(self):
ppd = PreprocessedData(1)
self.assertEqual(ppd.link_filepaths_status, 'idle')
ppd._set_link_filepaths_status('linking')
self.assertEqual(ppd.link_filepaths_status, 'linking')
ppd._set_link_filepaths_status('unlinking')
self.assertEqual(ppd.link_filepaths_status, 'unlinking')
ppd._set_link_filepaths_status('failed: error')
self.assertEqual(ppd.link_filepaths_status, 'failed: error')
def test_link_filepaths_status_setter_error(self):
ppd = PreprocessedData(1)
with self.assertRaises(ValueError):
ppd._set_link_filepaths_status('not a valid status')
def test_insdc_status(self):
ppd = PreprocessedData(1)
# verifying current value
self.assertEqual(ppd.submitted_to_insdc_status(), 'submitting')
# changing value and then verifying new value
ppd.update_insdc_status('failed')
self.assertEqual(ppd.submitted_to_insdc_status(), 'failed')
# checking failure
with self.assertRaises(ValueError):
ppd.update_insdc_status('not a valid status')
def test_vamps_status(self):
ppd = PreprocessedData(1)
# verifying current value
self.assertEqual(ppd.submitted_to_vamps_status(), 'not submitted')
# changing value and then verifying new value
ppd.update_vamps_status('failed')
self.assertEqual(ppd.submitted_to_vamps_status(), 'failed')
# checking failure
with self.assertRaises(ValueError):
ppd.update_vamps_status('not a valid status')
def test_processing_status(self):
"""processing_status works correctly"""
# Processed case
ppd = PreprocessedData(1)
self.assertEqual(ppd.processing_status, 'not_processed')
# not processed case
ppd = PreprocessedData.create(self.study, self.params_table,
self.params_id, self.filepaths,
data_type="18S")
self.assertEqual(ppd.processing_status, 'not_processed')
def test_processing_status_setter(self):
"""Able to update the processing status"""
ppd = PreprocessedData.create(self.study, self.params_table,
self.params_id, self.filepaths,
data_type="18S")
self.assertEqual(ppd.processing_status, 'not_processed')
ppd.processing_status = 'processing'
self.assertEqual(ppd.processing_status, 'processing')
ppd.processing_status = 'processed'
self.assertEqual(ppd.processing_status, 'processed')
state = 'failed: some error message'
ppd.processing_status = state
self.assertEqual(ppd.processing_status, state)
def test_processing_status_setter_valueerror(self):
"""Raises an error if the processing status is not recognized"""
ppd = PreprocessedData.create(self.study, self.params_table,
self.params_id, self.filepaths,
data_type="18S")
with self.assertRaises(ValueError):
ppd.processing_status = 'not a valid state'
def test_exists(self):
self.assertTrue(PreprocessedData.exists(1))
self.assertFalse(PreprocessedData.exists(1000))
def test_status(self):
ppd = PreprocessedData(1)
self.assertEqual(ppd.status, 'private')
# Since the status is inferred from the processed data, change the
# status of the processed data so we can check how it changes in the
# preprocessed data
pd = ProcessedData(1)
pd.status = 'public'
self.assertEqual(ppd.status, 'public')
# Check that new preprocessed data has sandbox as status since no
# processed data exists for them
ppd = PreprocessedData.create(self.study, self.params_table,
self.params_id, self.filepaths,
data_type="16S")
self.assertEqual(ppd.status, 'sandbox')
@qiita_test_checker()
class ProcessedDataTests(TestCase):
"""Tests the ProcessedData class"""
def setUp(self):
self.preprocessed_data = PreprocessedData(1)
self.params_table = "processed_params_uclust"
self.params_id = 1
fd, self.biom_fp = mkstemp(suffix='_table.biom')
close(fd)
self.filepaths = [(self.biom_fp, 6)]
self.date = datetime(2014, 5, 29, 12, 24, 51)
_, self.db_test_pd_dir = get_mountpoint(
'processed_data')[0]
with open(self.biom_fp, "w") as f:
f.write("\n")
self._clean_up_files = []
def tearDown(self):
for f in self._clean_up_files:
remove(f)
def test_create(self):
"""Correctly creates all the rows in the DB for the processed data"""
# Check that the returned object has the correct id
obs = ProcessedData.create(self.params_table, self.params_id,
self.filepaths,
preprocessed_data=self.preprocessed_data,
processed_date=self.date)
self.assertEqual(obs.id, 2)
# Check that the processed data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.processed_data WHERE processed_data_id=2")
# processed_data_id, processed_params_table, processed_params_id,
# processed_date, data_type_id, link_filepaths_status,
# processed_data_status_id
exp = [[2, "processed_params_uclust", 1, self.date, 2, 'idle', 4]]
self.assertEqual(obs, exp)
# Check that the files have been copied to right location
exp_biom_fp = join(self.db_test_pd_dir,
"2_%s" % basename(self.biom_fp))
self.assertTrue(exists(exp_biom_fp))
self._clean_up_files.append(exp_biom_fp)
# Check that the filepaths have been correctly added to the DB
obs_id = self.conn_handler.execute_fetchone(
"SELECT count(1) from qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d" % obs_id)
exp_biom_fp = "2_%s" % basename(self.biom_fp)
# Filepath_id, path, filepath_type_id
exp = [[obs_id, exp_biom_fp, 6, '852952723', 1, 4]]
self.assertEqual(obs, exp)
# Check that the processed data have been correctly linked
# with the fileapths
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.processed_filepath WHERE processed_data_id=2")
# processed_data_id, filepath_id
self.assertEqual(obs, [[2, obs_id]])
# Check that the processed data have been correctly linked with the
# study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_processed_data WHERE "
"processed_data_id=2")
# study_id, processed_data
self.assertEqual(obs, [[1, 2]])
# Check that the processed data have been correctly linked with the
# preprocessed data
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_processed_data WHERE "
"processed_data_id=2")
# preprocessed_data_id, processed_Data_id
self.assertEqual(obs, [[1, 2]])
def test_delete(self):
"""Correctly deletes a processed data"""
# testing regular delete
pd = ProcessedData.create(self.params_table, self.params_id,
self.filepaths,
preprocessed_data=self.preprocessed_data,
processed_date=self.date)
ProcessedData.delete(pd.id)
# testing that it raises an error if ID doesn't exist
with self.assertRaises(QiitaDBUnknownIDError):
ProcessedData.delete(pd.id)
# testing that we can not remove cause the processed data != sandbox
with self.assertRaises(QiitaDBStatusError):
ProcessedData.delete(1)
# testing that we can not remove cause processed data has analyses
pd = ProcessedData(1)
pd.status = 'sandbox'
with self.assertRaises(QiitaDBError):
ProcessedData.delete(1)
def test_create_no_date(self):
"""Correctly adds a processed data with no date on it"""
# All the other settings have been already tested on test_create
# here we will only check that the code added a good date
before = datetime.now()
ProcessedData.create(self.params_table, self.params_id, self.filepaths,
preprocessed_data=self.preprocessed_data)
after = datetime.now()
obs = self.conn_handler.execute_fetchone(
"SELECT processed_date FROM qiita.processed_data WHERE "
"processed_data_id=2")[0]
# Make sure that we clean up the environment
exp_biom_fp = join(self.db_test_pd_dir,
"2_%s" % basename(self.biom_fp))
self._clean_up_files.append(exp_biom_fp)
self.assertTrue(before <= obs <= after)
def test_create_w_study(self):
"""Correctly adds a processed data passing a study"""
obs = ProcessedData.create(self.params_table, self.params_id,
self.filepaths, study=Study(1),
processed_date=self.date, data_type="18S")
# Check that the processed data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.processed_data WHERE processed_data_id=2")
# processed_data_id, processed_params_table, processed_params_id,
# processed_date, data_type_id, link_filepaths_status,
# processed_data_status_id
exp = [[2, "processed_params_uclust", 1, self.date, 2, 'idle', 4]]
self.assertEqual(obs, exp)
# Check that the files have been copied to right location
exp_biom_fp = join(self.db_test_pd_dir,
"2_%s" % basename(self.biom_fp))
self.assertTrue(exists(exp_biom_fp))
self._clean_up_files.append(exp_biom_fp)
# Check that the filepaths have been correctly added to the DB
obs_id = self.conn_handler.execute_fetchone(
"SELECT count(1) from qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d" % obs_id)
exp_biom_fp = "2_%s" % basename(self.biom_fp)
# Filepath_id, path, filepath_type_id
exp = [[obs_id, exp_biom_fp, 6, '852952723', 1, 4]]
self.assertEqual(obs, exp)
# Check that the processed data have been correctly linked
# with the fileapths
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.processed_filepath WHERE processed_data_id=2")
# processed_data_id, filepath_id
self.assertTrue(obs, [[2, 10]])
# Check that the processed data have been correctly linked with the
# study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_processed_data WHERE "
"processed_data_id=2")
# study_id, processed_data
self.assertEqual(obs, [[1, 2]])
def test_create_params_table_error(self):
"""Raises an error if the processed_params_table does not exist"""
with self.assertRaises(IncompetentQiitaDeveloperError):
ProcessedData.create("foo", self.params_id, self.filepaths,
preprocessed_data=self.preprocessed_data)
with self.assertRaises(IncompetentQiitaDeveloperError):
ProcessedData.create("processed_params_foo", self.params_id,
self.filepaths,
preprocessed_data=self.preprocessed_data)
with self.assertRaises(IncompetentQiitaDeveloperError):
ProcessedData.create("processed_params_", self.params_id,
self.filepaths,
preprocessed_data=self.preprocessed_data)
def test_create_no_preprocessed_no_study_error(self):
with self.assertRaises(IncompetentQiitaDeveloperError):
ProcessedData.create(self.params_table, self.params_id,
self.filepaths)
def test_create_preprocessed_and_study_error(self):
with self.assertRaises(IncompetentQiitaDeveloperError):
ProcessedData.create(self.params_table, self.params_id,
self.filepaths,
preprocessed_data=self.preprocessed_data,
study=Study(1))
def test_create_preprocessed_and_data_type_error(self):
with self.assertRaises(IncompetentQiitaDeveloperError):
ProcessedData.create(self.params_table, self.params_id,
self.filepaths,
preprocessed_data=self.preprocessed_data,
data_type="Metabolomics",)
def test_create_no_preprocessed_and_study_error(self):
with self.assertRaises(IncompetentQiitaDeveloperError):
ProcessedData.create(self.params_table, self.params_id,
self.filepaths)
def test_get_filepath(self):
"""Correctly returns the filepaths to the processed files"""
# check the test data
pd = ProcessedData(1)
obs = pd.get_filepaths()
exp = [(9, join(self.db_test_pd_dir,
'1_study_1001_closed_reference_otu_table.biom'), "biom")]
self.assertEqual(obs, exp)
def test_get_filepath_ids(self):
pd = ProcessedData(1)
self.assertEqual(pd.get_filepath_ids(), [9])
def test_preprocessed_data(self):
"""Correctly returns the preprocessed_data"""
pd = ProcessedData(1)
self.assertEqual(pd.preprocessed_data, 1)
def test_data_type(self):
pd = ProcessedData(1)
self.assertEqual(pd.data_type(), "18S")
def test_data_type_id(self):
pd = ProcessedData(1)
self.assertEqual(pd.data_type(ret_id=True), 2)
def test_link_filepaths_status(self):
pd = ProcessedData(1)
self.assertEqual(pd.link_filepaths_status, 'idle')
def test_link_filepaths_status_setter(self):
pd = ProcessedData(1)
self.assertEqual(pd.link_filepaths_status, 'idle')
| pd._set_link_filepaths_status('linking') | pandas._set_link_filepaths_status |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/10/25 16:28
Desc: 新浪财经-所有指数-实时行情数据和历史行情数据
https://finance.sina.com.cn/realstock/company/sz399552/nc.shtml
"""
import datetime
import re
from mssdk.utils import demjson
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from mssdk.index.cons import (
zh_sina_index_stock_payload,
zh_sina_index_stock_url,
zh_sina_index_stock_count_url,
zh_sina_index_stock_hist_url,
)
from mssdk.stock.cons import hk_js_decode
def _replace_comma(x):
"""
去除单元格中的 ","
:param x: 单元格元素
:type x: str
:return: 处理后的值或原值
:rtype: str
"""
if ',' in str(x):
return str(x).replace(",", "")
else:
return x
def get_zh_index_page_count() -> int:
"""
指数的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 需要抓取的指数的总页数
:rtype: int
"""
res = requests.get(zh_sina_index_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def stock_zh_index_spot() -> pd.DataFrame:
"""
新浪财经-指数
大量采集会被目标网站服务器封禁 IP
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 所有指数的实时行情数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = get_zh_index_page_count()
zh_sina_stock_payload_copy = zh_sina_index_stock_payload.copy()
for page in tqdm(range(1, page_count + 1)):
zh_sina_stock_payload_copy.update({"page": page})
res = requests.get(zh_sina_index_stock_url, params=zh_sina_stock_payload_copy)
data_json = demjson.decode(res.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
big_df = big_df.applymap(_replace_comma)
big_df["trade"] = big_df["trade"].astype(float)
big_df["pricechange"] = big_df["pricechange"].astype(float)
big_df["changepercent"] = big_df["changepercent"].astype(float)
big_df["buy"] = big_df["buy"].astype(float)
big_df["sell"] = big_df["sell"].astype(float)
big_df["settlement"] = big_df["settlement"].astype(float)
big_df["open"] = big_df["open"].astype(float)
big_df["high"] = big_df["high"].astype(float)
big_df["low"] = big_df["low"].astype(float)
big_df["low"] = big_df["low"].astype(float)
big_df.columns = [
'代码',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'_',
'_',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
'_',
'_',
]
big_df = big_df[
[
'代码',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
]
]
return big_df
def stock_zh_index_daily(symbol: str = "sh000922") -> pd.DataFrame:
"""
新浪财经-指数-历史行情数据, 大量抓取容易封IP
https://finance.sina.com.cn/realstock/company/sh000909/nc.shtml
:param symbol: sz399998, 指定指数代码
:type symbol: str
:return: 历史行情数据
:rtype: pandas.DataFrame
"""
params = {"d": "2020_2_4"}
res = requests.get(zh_sina_index_stock_hist_url.format(symbol), params=params)
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
temp_df = pd.DataFrame(dict_list)
temp_df['date'] = | pd.to_datetime(temp_df["date"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
HornMT_Dataset_Preparation
Created on Mon Dec 12 01:25:16 2021
@author: <NAME>
"""
# Import libs
import pandas as pd
# Load HornMT dataset
file_path = '/data/HornMT.xlsx'
HornMT = pd.read_excel(file_path)
#HornMT.head(1)
# Preprocess the dataframe
eng = pd.DataFrame(HornMT['eng'])
aaf = | pd.DataFrame(HornMT['aaf']) | pandas.DataFrame |
import pandas as pd
# load profile results
exp1 = pd.read_csv('./profiles/experiment1/variables_EtmEVsModel.csv')
exp2 = pd.read_csv('./profiles/experiment2/variables_EtmEVsModel.csv')
exp3 = pd.read_csv('./profiles/experiment3/variables_EtmEVsModel.csv')
# load samples
exp1_sample = pd.read_csv('./profiles/experiment1/parameters_sample.csv')
exp2_sample = | pd.read_csv('./profiles/experiment2/parameters_sample.csv') | pandas.read_csv |
from collections import Counter
import numpy as np
import pandas as pd
def neighbors_pairs(labels, neighbors):
p1, p2 = [], []
for l, ns in zip(labels, neighbors):
for n in ns:
if n > l:
p1.append(l)
p2.append(n)
return np.array([p1, p2], dtype=np.int32)
def overlap_genes(data_genes, lr_genes):
overlap_set = set([i.lower() for i in data_genes]).intersection(set([i.lower() for i in lr_genes]))
return list(overlap_set)
def balanced_pairs(pairs):
"""Generate the same number of negative pairs as positive pairs"""
pairs.iloc[:, 2] = pairs.iloc[:, 2].astype(dtype=float)
existed_pairs = [tuple(i) for i in pairs.iloc[:, [0, 1]].to_numpy()]
relationship = pairs.iloc[:, 2].values
count = Counter(relationship)
add_amount = count[1] - count.get(0, 0)
if add_amount > 0:
pos_pairs = pairs[pairs.iloc[:, 2] == 1]
p1 = pos_pairs.iloc[:, 0].values
p2 = pos_pairs.iloc[:, 1].values
neg_count = 0
neg_pairs = []
while neg_count < count[1]:
ix1, ix2 = np.random.randint(0, len(p1), 2)
p = (p1[ix1], p2[ix2])
if p not in existed_pairs:
neg_pairs.append((p1[ix1], p2[ix2], 0.0))
neg_count += 1
return pd.concat([pairs, pd.DataFrame(neg_pairs, columns=pairs.columns)]).reset_index(drop=True)
else:
return pairs
def train_test_split(pairs, partition=0.9):
"""Split the train and test dataset, ligand or receptor appear in train will not show in test
The negative pairs will be randomly generated, the number of negative pairs will be equal to positive pairs
"""
pairs = pairs.copy()
pairs.iloc[:, 2] = pairs.iloc[:, 2].astype(dtype=float)
ix = np.arange(0, len(pairs))
cut = int(len(ix) * partition)
train = pairs.iloc[:cut, :]
test = pairs.iloc[cut:, :]
train_pool = np.unique(train.iloc[:, [0, 1]].to_numpy())
clean_test = []
for i, row in test.iterrows():
p1, p2, _ = row
if (p1 not in train_pool) & (p2 not in train_pool):
clean_test.append(row)
test = | pd.DataFrame(clean_test) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import re
import logging
import json
import math
from ConfigReader import configuration
import mysql.connector
from common import constants as constant
from mysql.connector import errorcode
from datetime import datetime
import pandas as pd
import jaydebeapi
class source(object):
def __init__(self):
logging.debug("Initiating schemaReader.source()")
def removeNewLine(self, _data):
if _data == None:
return None
else:
return _data
def readTableColumns(self, JDBCCursor, serverType = None, database = None, schema = None, table = None):
logging.debug("Executing schemaReader.readTableColumns()")
query = None
result_df = pd.DataFrame()
if serverType == constant.MSSQL:
query = "select "
query += " SchemaName = CAST((TBL.TABLE_SCHEMA) AS NVARCHAR(4000)), "
query += " TableName = CAST((TBL.TABLE_NAME) AS NVARCHAR(4000)), "
query += " TableDescription = CAST((tableProp.value) AS NVARCHAR(4000)), "
query += " ColumnName = CAST((COL.COLUMN_NAME) AS NVARCHAR(4000)), "
query += " ColumnDataType = CAST((COL.DATA_TYPE) AS NVARCHAR(4000)), "
query += " ColumnLength = COL.CHARACTER_MAXIMUM_LENGTH, "
query += " ColumnDescription = CAST((colDesc.ColumnDescription) AS NVARCHAR(4000)), "
query += " ColumnPrecision = CAST((COL.numeric_precision) AS NVARCHAR(128)), "
query += " ColumnScale = COL.numeric_scale, "
query += " IsNullable = CAST((COL.Is_Nullable) AS NVARCHAR(128)), "
query += " TableType = CAST((TBL.TABLE_TYPE) AS NVARCHAR(4000)), "
query += " CreateDate = sysTables.create_date "
query += "FROM INFORMATION_SCHEMA.TABLES TBL "
query += "INNER JOIN INFORMATION_SCHEMA.COLUMNS COL "
query += " ON COL.TABLE_NAME = TBL.TABLE_NAME "
query += " AND COL.TABLE_SCHEMA = TBL.TABLE_SCHEMA "
query += "LEFT JOIN sys.tables sysTables "
query += " ON sysTables.object_id = object_id(TBL.TABLE_SCHEMA + '.' + TBL.TABLE_NAME) "
query += "LEFT JOIN sys.extended_properties tableProp "
query += " ON tableProp.major_id = object_id(TBL.TABLE_SCHEMA + '.' + TBL.TABLE_NAME) "
query += " AND tableProp.minor_id = 0 "
query += " AND tableProp.name = 'MS_Description' "
query += "LEFT JOIN ( "
query += " SELECT "
query += " sc.object_id, "
query += " sc.column_id, "
query += " sc.name, "
query += " colProp.[value] AS ColumnDescription "
query += " FROM sys.columns sc "
query += " INNER JOIN sys.extended_properties colProp "
query += " ON colProp.major_id = sc.object_id "
query += " AND colProp.minor_id = sc.column_id "
query += " AND colProp.name = 'MS_Description' "
query += " ) colDesc "
query += " ON colDesc.object_id = object_id(TBL.TABLE_SCHEMA + '.' + TBL.TABLE_NAME) "
query += " AND colDesc.name = COL.COLUMN_NAME "
query += "WHERE lower(TBL.TABLE_TYPE) in ('base table','view') "
query += " AND COL.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " AND COL.TABLE_NAME = '%s' "%(table)
query += "ORDER BY TBL.TABLE_SCHEMA, TBL.TABLE_NAME,COL.ordinal_position"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] in ("numeric", "decimal"):
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4],row[7], row[8] )
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], row[5])
elif row[4] in ("geometry", "image", "ntext", "text", "xml"):
line_dict["SOURCE_COLUMN_TYPE"] = "%s"%(row[4])
elif row[4] == "varbinary":
if row[7] != None and row[7] > -1:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4],row[7], row[8] )
else:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
else:
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == "" or row[6] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[9]
line_dict["TABLE_TYPE"] = row[10]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S.%f')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.ORACLE:
# First determine if column ORIGIN_CON_ID exists in ALL_TAB_COMMENTS. If it does, we need to take that into consideration
oracle_OriginConId_exists = True
oracle_OriginConId = None
# query = "SELECT ORIGIN_CON_ID FROM ALL_TAB_COMMENTS WHERE 1 = 0"
query = "SELECT ORIGIN_CON_ID FROM ALL_TAB_COMMENTS "
query += "WHERE OWNER = '%s' "%(schema)
if table != None:
query += " AND TABLE_NAME = '%s' "%(table)
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
if "invalid identifier" in str(errMsg):
oracle_OriginConId_exists = False
else:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
if oracle_OriginConId_exists == True:
rowCount = 0
for row in JDBCCursor.fetchall():
oracle_OriginConId = row[0]
rowCount += 1
if rowCount != 1:
# If there are more than one originConId, it's impossible to determine what we will use. So then we go to default
oracle_OriginConId = None
query = "SELECT "
query += " ALL_TAB_COLUMNS.OWNER SCHEMA_NAME, "
query += " ALL_TAB_COLUMNS.TABLE_NAME, "
query += " ALL_TAB_COMMENTS.COMMENTS TABLE_COMMENT, "
query += " ALL_TAB_COLUMNS.COLUMN_NAME, "
query += " ALL_TAB_COLUMNS.DATA_TYPE, "
query += " ALL_TAB_COLUMNS.DATA_LENGTH, "
query += " ALL_COL_COMMENTS.COMMENTS COLUMN_COMMENT, "
query += " ALL_TAB_COLUMNS.CHAR_LENGTH, "
query += " ALL_TAB_COLUMNS.DATA_PRECISION, "
query += " ALL_TAB_COLUMNS.DATA_SCALE, "
query += " ALL_TAB_COLUMNS.NULLABLE, "
query += " ALL_OBJECTS.OBJECT_TYPE, "
query += " ALL_OBJECTS.CREATED "
query += "FROM ALL_TAB_COLUMNS ALL_TAB_COLUMNS "
query += "LEFT JOIN ALL_TAB_COMMENTS ALL_TAB_COMMENTS "
query += " ON ALL_TAB_COLUMNS.OWNER = ALL_TAB_COMMENTS.OWNER "
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = ALL_TAB_COMMENTS.TABLE_NAME "
if oracle_OriginConId_exists == True:
if oracle_OriginConId == None:
query += " AND ALL_TAB_COMMENTS.ORIGIN_CON_ID <= 1 "
else:
query += " AND ALL_TAB_COMMENTS.ORIGIN_CON_ID = %s "%(oracle_OriginConId)
query += "LEFT JOIN ALL_COL_COMMENTS ALL_COL_COMMENTS "
query += " ON ALL_TAB_COLUMNS.OWNER = ALL_COL_COMMENTS.OWNER "
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = ALL_COL_COMMENTS.TABLE_NAME "
query += " AND ALL_TAB_COLUMNS.COLUMN_NAME = ALL_COL_COMMENTS.COLUMN_NAME "
if oracle_OriginConId_exists == True:
if oracle_OriginConId == None:
query += " AND ALL_COL_COMMENTS.ORIGIN_CON_ID <= 1 "
else:
query += " AND ALL_COL_COMMENTS.ORIGIN_CON_ID = %s "%(oracle_OriginConId)
query += "LEFT JOIN ALL_OBJECTS ALL_OBJECTS "
query += " ON ALL_TAB_COLUMNS.OWNER = ALL_OBJECTS.OWNER "
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = ALL_OBJECTS.OBJECT_NAME "
query += " AND ALL_OBJECTS.OBJECT_TYPE IN ('TABLE', 'VIEW') "
query += "WHERE ALL_TAB_COLUMNS.OWNER = '%s' "%(schema)
if table != None:
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, ALL_TAB_COLUMNS.TABLE_NAME, ALL_TAB_COLUMNS.COLUMN_ID"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
else:
if re.search('TIMESTAMP', row[4]) or row[4] in ("CLOB", "DATE", "LONG", "BLOB", "NCLOB", "LONG RAW"):
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
elif row[4] in ("VARCHAR", "VARCHAR2", "CHAR", "NCHAR", "NVARCHAR2"):
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[7]))
elif row[4] in ("NUMBER", "FLOAT", "BINARY_FLOAT", "BINARY_DOUBLE"):
if row[8] == None:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
elif row[8] == 0: #("DATA_PRECISION") == 0) then use char_length
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[7]))
elif row[9]== None or row[9] == 0:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[8]))
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], int(row[8]), int(row[9]))
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[5]))
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == "" or row[6] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[10]
line_dict["TABLE_TYPE"] = row[11]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[12], '%Y-%m-%d %H:%M:%S')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.MYSQL:
query = "select "
query += " c.table_schema as table_schema, "
query += " c.table_name, "
query += " t.table_comment, "
query += " c.column_name, "
query += " c.data_type, "
query += " c.character_maximum_length, "
query += " c.column_comment, "
query += " c.is_nullable, "
query += " c.numeric_precision, "
query += " c.numeric_scale, "
query += " t.table_type, "
query += " t.create_time "
query += "from information_schema.columns c "
query += "left join information_schema.tables t "
query += " on c.table_schema = t.table_schema and c.table_name = t.table_name "
query += "where c.table_schema = '%s' "%(database)
if table != None:
query += " and c.table_name = '%s' "%(table)
query += "order by c.table_schema,c.table_name, c.ordinal_position "
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] == "decimal":
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(self.removeNewLine(row[4]), row[8], row[9])
elif row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == None or row[6] == "":
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[7]
line_dict["TABLE_TYPE"] = row[10]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_UDB:
query = "SELECT "
query += " TRIM(ST.CREATOR) as SCHEMA_NAME, "
query += " TRIM(ST.NAME) as TABLE_NAME, "
query += " TRIM(ST.REMARKS) as TABLE_COMMENT, "
query += " TRIM(SC.NAME) as SOURCE_COLUMN_NAME, "
query += " TRIM(SC.COLTYPE) SOURCE_COLUMN_TYPE, "
query += " SC.LENGTH as SOURCE_COLUMN_LENGTH, "
query += " SC.SCALE as SOURCE_COLUMN_SCALE, "
query += " TRIM(SC.REMARKS) as SOURCE_COLUMN_COMMENT, "
query += " SC.NULLS as IS_NULLABLE, "
query += " ST.TYPE as TABLE_TYPE, "
query += " ST.CTIME as CREATE_TIME "
query += "FROM SYSIBM.SYSTABLES ST "
query += "LEFT JOIN SYSIBM.SYSCOLUMNS SC "
query += " ON ST.NAME = SC.TBNAME "
query += " AND ST.CREATOR = SC.TBCREATOR "
query += "WHERE "
query += " ST.CREATOR = '%s' "%(schema)
if table != None:
query += " AND ST.NAME = '%s' "%(table)
query += "ORDER BY ST.CREATOR, ST.NAME"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] == "DECIMAL":
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], row[5], row[6])
elif row[4] in ("DOUBLE", "REAL", "SMALLINT", "DATE", "BLOB", "INTEGER", "TIMESTMP", "BIGINT", "CLOB"):
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[7] == "" or row[7] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[8]
line_dict["TABLE_TYPE"] = row[9]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[10], '%Y-%m-%d %H:%M:%S.%f')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_AS400:
query = "SELECT "
query += " TRIM(ST.TABLE_SCHEMA) as SCHEMA_NAME, "
query += " TRIM(ST.TABLE_NAME) as TABLE_NAME, "
query += " ST.LONG_COMMENT as TABLE_COMMENT, "
query += " TRIM(SC.COLUMN_NAME) as SOURCE_COLUMN_NAME, "
query += " SC.TYPE_NAME as SOURCE_COLUMN_TYPE, "
query += " SC.COLUMN_SIZE as SOURCE_COLUMN_LENGTH, "
query += " SC.DECIMAL_DIGITS as SOURCE_COLUMN_SCALE, "
query += " SC.REMARKS as SOURCE_COLUMN_COMMENT, "
query += " SC.IS_NULLABLE, "
query += " ST.TABLE_TYPE, "
# ST.LAST_ALTERED_TIMESTAMP is not really correct, but it's the best we got
# https://www.ibm.com/support/knowledgecenter/SSAE4W_9.6.0/db2/rbafzcatsystbls.htm
query += " ST.LAST_ALTERED_TIMESTAMP "
query += "FROM QSYS2.SYSTABLES ST "
query += "LEFT JOIN SYSIBM.SQLCOLUMNS SC "
query += " ON ST.TABLE_SCHEMA = SC.TABLE_SCHEM "
query += " AND ST.TABLE_NAME= SC.TABLE_NAME "
query += "WHERE "
query += " ST.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " AND SC.TABLE_NAME = '%s' "%(table)
query += "ORDER BY ST.TABLE_SCHEMA, SC.TABLE_NAME, SC.ORDINAL_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] == "DECIMAL":
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], row[5], row[6])
elif row[4] in ("DOUBLE", "REAL", "SMALLINT", "DATE", "BLOB", "INTEGER", "TIMESTMP", "BIGINT", "CLOB"):
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if self.removeNewLine(row[7]) == "" or row[7] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[8]
line_dict["TABLE_TYPE"] = row[9]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[10], '%Y-%m-%d %H:%M:%S.%f')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = | pd.DataFrame(rows_list) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import pandas
import sys
if len(sys.argv) != 4:
print("Usage: python add_only_MS_counts_to_timeseries.py infected_int recovered_int deaths_int")
exit(1)
confirmed_df = | pandas.read_csv("data/time_series/time_series_covid-19_nrw_confirmed.csv") | pandas.read_csv |
import unittest
import canopy
import pandas as pd
import numpy as np
temperatureK = 373.15
temperatureC = 100.0
temperatureF = 212.0
temperatureK2 = 473.15
temperatureC2 = 200.0
temperatureF2 = 392.0
class UnitsTest(unittest.TestCase):
def setUp(self):
self.units = canopy.Units()
# Specific units.
def test_specific_units(self):
self.assertAlmostEqual(
self.units.convert_value_between_units(1, 'inHg', 'Pa'),
3386.39, delta=0.01);
self.assertAlmostEqual(
self.units.convert_value_between_units(1000, 'Pa', 'inHg'),
0.2953, delta=0.0001);
def test_try_get_conversion_to_si(self):
existing = self.units.try_get_conversion_to_si('F')
self.assertEqual(existing.factor, 5/9)
self.assertEqual(existing.offset, 459.67 * 5 / 9)
missing = self.units.try_get_conversion_to_si('blah')
self.assertEqual(missing, None)
def test_get_conversion_to_si(self):
existing = self.units.get_conversion_to_si('F')
self.assertEqual(existing.factor, 5/9)
self.assertEqual(existing.offset, 459.67 * 5 / 9)
with self.assertRaises(KeyError):
self.units.get_conversion_to_si('blah')
def test_get_conversion_to_si_or_default(self):
existing = self.units.get_conversion_to_si_or_default('F')
self.assertEqual(existing.factor, 5/9)
self.assertEqual(existing.offset, 459.67 * 5 / 9)
missing = self.units.get_conversion_to_si_or_default('blah')
self.assertEqual(missing.factor, 1)
self.assertEqual(missing.offset, 0)
# FROM SI
def test_convert_value_from_si(self):
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'C'), temperatureC, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'F'), temperatureF, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'C', True), temperatureK, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'F', True), temperatureK * 9 / 5, delta=0.01)
self.assertEqual(self.units.convert_value_from_si(temperatureK, 'K'), temperatureK)
def test_convert_array_from_si(self):
data = np.array([temperatureK, temperatureK2])
data_copy = np.copy(data)
result = self.units.convert_array_from_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(np.array_equal(result, data))
self.assertTrue(np.array_equal(data, data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_array_from_si_no_conversion_required(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_from_si(data, 'K')
self.assertIs(result, data)
def test_convert_array_from_si_always_return_copy(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_from_si(data, 'K', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(np.array_equal(result, data))
def test_convert_array_from_si_inplace(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_from_si(data, 'F', inplace=True)
self.assertIs(result, data)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_series_from_si(self):
data = pd.Series([temperatureK, temperatureK2])
data_copy = data.copy()
result = self.units.convert_series_from_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(result.equals(data))
self.assertTrue(data.equals(data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_series_from_si_no_conversion_required(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_from_si(data, 'K')
self.assertIs(result, data)
def test_convert_series_from_si_always_return_copy(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_from_si(data, 'K', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(result.equals(data))
def test_convert_series_from_si_inplace(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_from_si(data, 'F', inplace=True)
self.assertIs(result, data)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
# TO SI
def test_convert_value_to_si(self):
self.assertAlmostEqual(self.units.convert_value_to_si(temperatureC, 'C'), temperatureK, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_to_si(temperatureF, 'F'), temperatureK, delta=0.01)
self.assertEqual(self.units.convert_value_to_si(3, 'e-3'), 0.003)
self.assertEqual(self.units.convert_value_to_si(3, 'e-6'), 0.000003)
self.assertAlmostEqual(self.units.convert_value_to_si(temperatureC, 'C', True), temperatureC, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_to_si(temperatureF, 'F', True), temperatureF * 5 / 9, delta=0.01)
self.assertEqual(self.units.convert_value_to_si(temperatureK, 'K'), temperatureK)
def test_convert_array_to_si(self):
data = np.array([temperatureF, temperatureF2])
data_copy = np.copy(data)
result = self.units.convert_array_to_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(np.array_equal(result, data))
self.assertTrue(np.array_equal(data, data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureK, delta=0.01)
self.assertAlmostEqual(result[1], temperatureK2, delta=0.01)
def test_convert_array_to_si_no_conversion_required(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_to_si(data, 'K')
self.assertIs(result, data)
def test_convert_array_to_si_always_return_copy(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_to_si(data, 'K', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(np.array_equal(result, data))
def test_convert_array_to_si_inplace(self):
data = np.array([temperatureF, temperatureF2])
result = self.units.convert_array_to_si(data, 'F', inplace=True)
self.assertIs(result, data)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureK, delta=0.01)
self.assertAlmostEqual(result[1], temperatureK2, delta=0.01)
def test_convert_series_to_si(self):
data = pd.Series([temperatureF, temperatureF2])
data_copy = data.copy()
result = self.units.convert_series_to_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(result.equals(data))
self.assertTrue(data.equals(data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureK, delta=0.01)
self.assertAlmostEqual(result[1], temperatureK2, delta=0.01)
def test_convert_series_to_si_no_conversion_required(self):
data = | pd.Series([temperatureK, temperatureK2]) | pandas.Series |
import sys
import pandas as pd
import argparse
import json
import numpy as np
from progressbar import ProgressBar
import copy
import os
DEFAULT_PROJECT_REPO = os.path.sep.join(__file__.split(os.path.sep)[:-2])
PROJECT_REPO_DIR = os.path.abspath(
os.environ.get('PROJECT_REPO_DIR', DEFAULT_PROJECT_REPO))
sys.path.append(os.path.join(PROJECT_REPO_DIR, 'src'))
from feature_transformation import (parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols, calc_start_and_stop_indices_from_percentiles)
def compute_mews(ts_df, args, mews_df):
id_cols = parse_id_cols(args.data_dict)
id_cols = remove_col_names_from_list_if_not_in_df(id_cols, ts_df)
feature_cols = ['systolic_blood_pressure', 'heart_rate', 'respiratory_rate', 'body_temperature']
time_col = parse_time_col(args.data_dict)
# Obtain fenceposts based on where any key differs
# Be sure keys are converted to a numerical datatype (so fencepost detection is possible)
keys_df = ts_df[id_cols].copy()
for col in id_cols:
if not | pd.api.types.is_numeric_dtype(keys_df[col].dtype) | pandas.api.types.is_numeric_dtype |
import os
import numpy as np
import pandas as pd
import shutil
def parseDataSet(data_dir, clear=False):
"""Merge Articles data of each tag of different years into one csv file
Arguments:
data_dir {str} -- path of directory where dataset is stored
Keyword Arguments:
clear {bool} -- After merging if True delete raw data (default: {False})
"""
for tag_dir in os.listdir(data_dir):
df_temp = pd.DataFrame()
tag_dir_path = os.path.join(data_dir, tag_dir)
dest_path = os.path.join(data_dir, f"{tag_dir}.csv")
print(f"Processing {tag_dir} tag")
for data_file in os.listdir(tag_dir_path):
file_path = os.path.join(tag_dir_path, data_file)
df = | pd.read_csv(file_path) | pandas.read_csv |
"""
Gradient boosting feature selection with preliminary scan of the hyperparameter space using the gride search method
Author: <NAME>
Email: <EMAIL>
"""
import os
import numpy as np
import pandas as pd
import joblib
import random
import seaborn as sns
import matplotlib.pyplot as plt
from copy import deepcopy
from time import time
from itertools import product
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, label_binarize
from sklearn import metrics
from sklearn.metrics import multilabel_confusion_matrix, roc_curve, roc_auc_score, max_error, \
auc, f1_score, classification_report, recall_score, precision_recall_curve, \
balanced_accuracy_score, confusion_matrix, accuracy_score, average_precision_score, \
hamming_loss, matthews_corrcoef, mean_squared_error, mean_absolute_error, r2_score
from imblearn.over_sampling import (RandomOverSampler,
SMOTE,
SMOTENC,
BorderlineSMOTE,
ADASYN)
from lightgbm.sklearn import LGBMClassifier, LGBMRegressor
from xgboost import XGBClassifier, XGBRegressor
class GBFS():
"""
Class used to select preliminary subset of features that maximizes the choice performance metric
args:
(1) path_to_file (type:str) - location of the training set
(2) path_to_save (type:str) - location to save new data files
(3) oversampled_it (type:bool) - whether to oversampled the training data; choose False if already oversampled
(4) problem (type:str) - whether it is a 'classification' or 'regression' problem
(5*) target_classes (type:int) - for classification, specify the number of target classes
return:
(1) list of features selected during GBFS
"""
def __init__(self, path_to_file, path_to_save, oversampled_it, problem, *args, **kwargs):
self.path_to_save = path_to_save
self.sample_train = joblib.load(path_to_file)
# Last column taken as the target variable or classes
self.features = self.sample_train.columns.values[:-1]
self.target = self.sample_train.columns.values[-1]
print('Name of target column: ', self.target)
print('No. of exploratory features: ', len(self.features) )
self.oversampled_it = oversampled_it
self.problem = problem
self.target_classes = kwargs.get('target_classes')
def oversample(self, df, technique, *args, **kwargs):
"""
Oversample with various technique:
(a) 'ros'
(b)'smoothed_ros'
(c)'smote'
(d)'smote_nc'
(e)'smote_borderline1'
(f)'smote_borderline2'
(g)'adasyn'
This function is embedded into the 'grid_search()' function
args:
(1) df (pandas.Dataframe) - training data
(2) technique (type:str) - oversampling technique to use
(b*) categorical_features (type:list); list of indices specifying the position of categorical columns; this is only applicable when using 'smote_nc'
return:
(1) pandas.Dataframe with oversampled data
"""
#Oversample the training set
x = df[self.features].values
y = df[self.target].values
#Different oversampling techniques
if technique == 'ros':
os = RandomOverSampler()
elif technique == 'smoothed_ros':
os = RandomOverSampler(shrinkage=0.15)
elif technique == 'smote':
os = SMOTE()
elif technique == 'smote_nc':
self.categorical_features = kwargs.get('categorical_features')
os = SMOTENC(categorical_features=categorical_features, k_neighbors=5)
elif technique == 'smote_borderline1':
os = BorderlineSMOTE(k_neighbors=3, m_neighbors=15, kind='borderline-1')
elif technique == 'smote_borderline2':
os = BorderlineSMOTE(k_neighbors=3, m_neighbors=15, kind='borderline-2')
elif technique == 'adasyn':
os = ADASYN()
# Fit on data
x_oversampled, y_oversampled = os.fit_resample(x, y)
# Create pandas.Dataframe
oversampled_train = pd.concat([pd.DataFrame(data=x_oversampled), pd.DataFrame(data=y_oversampled, columns=[self.target])], axis=1)
# Add column names
oversampled_train.columns = df.columns
print(' No. of rows in training set after oversampling:', len(oversampled_train))
return oversampled_train
def grid_search(self, model, params, stratify, cv_folds, oversample_technique, *args, **kwargs):
"""
Perform grid search to conduct a preliminary search of the hyperparameter space
This function takes either raw training data or oversampled training data (as specified during initialization i.e. 'oversample_it'),
Note 20% of training set is used as out-of-sample validation set
Oversample with various technique:
(a) 'ros'
(b)'smoothed_ros'
(c)'smote'
(d)'smote_nc'
(e)'smote_borderline1'
(f)'smote_borderline2'
(g)'adasyn'
args:
(1) model (sklearn.estimator) - the model to be optimised
(2) params (type:int or float) - hyperparameter values
(3) stratify (type:bool) - whether to stratify data while splitting into training and validation sets
(4) cv_folds (type:int) - number of cross validation
(4) oversample_technique (type:str) - oversample method to employ
Returns:
(1) model fitted with the optimal hyperparameters
"""
# Define the lowest score
if self.problem == 'classification':
max_score = 0
elif self.problem == 'regression':
max_score = float('-inf')
#Permutations based on the values of the hyperparameters
params_perm = list(product(*params.values()))
print('Total no. of permutations:', len(params_perm))
for i, chosen_param in enumerate(params_perm):
print('\n')
print(' (' + str(i+1) + ' of ' + str(len(params_perm)) + ')', ' Attempt: ', list(zip(params.keys(), chosen_param)))
metric_score = []
#Set the parameters for the chosen estimator/model
for p, v in zip(params.keys(), chosen_param):
model.set_params(**{p: v})
#Oversample data and train the model. Compute mean performance metric using out-of-sample validation set and the chosen CV fold
for fold in range(cv_folds):
#Each fold will adjust the random_state
if stratify == True:
sample_tr, sample_va = train_test_split(
self.sample_train,
test_size = 0.2,
random_state = fold + random.randint(0, 100),
stratify = self.sample_train[self.target].to_list()
)
elif stratify == False:
sample_tr, sample_va = train_test_split(
self.sample_train,
test_size = 0.2,
random_state = fold + random.randint(0, 100)
)
print(' No. of rows in the training set:', len(sample_tr))
if self.problem == 'classification':
if self.oversampled_it == True:
print(' Oversampling training data...')
# Oversample data
oversampled_tr = self.oversample(
df = sample_tr,
technique = oversample_technique
)
# Scale features
scaling = MinMaxScaler(feature_range=(0, 1)) #Range can be adjusted
sample_tr_features = pd.DataFrame(
scaling.fit_transform(oversampled_tr[self.features].values),
columns=oversampled_tr[self.features].columns,
index=oversampled_tr[self.features].index
)
sample_va_features = pd.DataFrame(
scaling.fit_transform(sample_va[self.features].values),
columns=sample_va[self.features].columns,
index=sample_va[self.features].index
)
oversampled_tr = pd.concat([sample_tr_features, oversampled_tr[self.target]], axis=1)
sample_va = pd.concat([sample_va_features, sample_va[self.target]], axis=1)
# Fit to model
model.fit(oversampled_tr[self.features], oversampled_tr[self.target].values.ravel())
elif self.oversampled_it == False:
# Scale features
scaling = MinMaxScaler(feature_range=(0, 1)) # Range can be adjusted
sample_tr_features = pd.DataFrame(
scaling.fit_transform(sample_tr[self.features].values),
columns=sample_tr[self.features].columns,
index=sample_tr[self.features].index
)
sample_va_features = pd.DataFrame(
scaling.fit_transform(sample_va[self.features].values),
columns=sample_va[self.features].columns,
index=sample_va[self.features].index
)
sample_tr = pd.concat([sample_tr_features, sample_tr[self.target]], axis=1)
sample_va = pd.concat([sample_va_features, sample_va[self.target]], axis=1)
# Fit to model
model.fit(sample_tr[self.features], sample_tr[self.target].values.ravel())
try:
score = roc_auc_score(
sample_va[self.target],
model.predict_proba(sample_va[self.features]),
average='weighted',
multi_class="ovr"
)
metric_score += [score]
except:
score = roc_auc_score(
sample_va[self.target],
model.predict(sample_va[self.features]),
average='weighted',
multi_class="ovr"
)
metric_score += [score]
elif self.problem == 'regression':
#Scale features
scaling = MinMaxScaler(feature_range=(0, 1)) #Range can be adjusted
sample_tr_features = pd.DataFrame(
scaling.fit_transform(sample_tr[self.features].values),
columns=sample_tr[self.features].columns,
index=sample_tr[self.features].index
)
sample_va_features = pd.DataFrame(
scaling.fit_transform(sample_va[self.features].values),
columns=sample_va[self.features].columns,
index=sample_va[self.features].index
)
sample_tr = pd.concat([sample_tr_features, sample_tr[self.target]], axis=1)
sample_va = | pd.concat([sample_va_features, sample_va[self.target]], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 29 11:50:26 2016
@author: sglyon
"""
#%% read data
import pandas as pd
url1 = "https://raw.githubusercontent.com/NYUDataBootcamp/"
url2 = "Materials/master/Data/fall16_ug_pace_raw.csv"
url = url1 + url2
df = pd.read_csv(url)
#%% rename columns
df.columns = ["time", "experience", "pace", "help"]
#%% clean up dates
df["time"] = | pd.to_datetime(df["time"]) | pandas.to_datetime |
from functools import partial, singledispatch
from types import MappingProxyType
from typing import Collection, Union
import joblib
import nltk
import pandas as pd
from ndg_tools._validation import _validate_strings
from ndg_tools.language.processors.tokens import fetch_stopwords, remove_stopwords
from ndg_tools.language.settings import DEFAULT_TOKENIZER
from ndg_tools.language.utils import chain_processors
from ndg_tools.typing import CallableOnStr, Documents, Tokenizer
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from sklearn.utils import deprecated
from tqdm.notebook import tqdm
NGRAM_FINDERS = MappingProxyType(
{
2: nltk.BigramCollocationFinder,
3: nltk.TrigramCollocationFinder,
4: nltk.QuadgramCollocationFinder,
}
)
"""Mapping for selecting ngram-finder."""
NGRAM_METRICS = MappingProxyType(
{
2: nltk.BigramAssocMeasures,
3: nltk.TrigramAssocMeasures,
4: nltk.QuadgramAssocMeasures,
}
)
"""Mapping for selecting ngram scoring object."""
def categorical_ngrams(
data: DataFrame,
*,
text: str,
cat: Union[str, Series],
n: int = 2,
metric: str = "pmi",
tokenizer: Tokenizer = DEFAULT_TOKENIZER,
preprocessor: CallableOnStr = None,
stopwords: Union[str, Collection[str]] = None,
min_freq: int = 0,
select_best: float = None,
fuse_tuples: bool = False,
sep: str = " ",
n_jobs=None,
):
get_ngrams = partial(
scored_ngrams,
n=n,
metric=metric,
stopwords=stopwords,
preprocessor=preprocessor,
tokenizer=tokenizer,
min_freq=min_freq,
fuse_tuples=fuse_tuples,
sep=sep,
)
get_ngrams = joblib.delayed(get_ngrams)
workers = joblib.Parallel(n_jobs=n_jobs, prefer="processes")
# Get aligned labels and group frames, ignoring empty
labels, groups = zip(
*[(lab, grp) for lab, grp in data.groupby(cat) if not grp.empty]
)
# Search for ngrams with optional multiprocessing
cat_ngrams = workers(get_ngrams(grp.loc[:, text]) for grp in groups)
# Turn each scored ngram Series into a DataFrame
cat_ngrams = [
ng.reset_index().assign(**{cat: lab})
for lab, ng in zip(labels, cat_ngrams)
if not ng.empty
]
# Select top scores in each category
if select_best is not None:
for i, group in enumerate(cat_ngrams):
cut = group.score.quantile(1 - select_best)
cat_ngrams[i] = group.loc[group.score >= cut]
# Stack frames vertically and renumber
return pd.concat(cat_ngrams).reset_index(drop=True)
@deprecated("Use `categorical_ngrams` instead.")
def stratified_ngrams(
data: DataFrame,
*,
text: str,
cat: Union[str, Series],
n: int = 2,
metric: str = "pmi",
tokenizer: Tokenizer = DEFAULT_TOKENIZER,
preprocessor: CallableOnStr = None,
stopwords: Union[str, Collection[str]] = None,
min_freq: int = 0,
select_best: float = None,
fuse_tuples: bool = False,
sep: str = " ",
n_jobs=None,
):
return categorical_ngrams(
data,
text=text,
cat=cat,
n=n,
metric=metric,
tokenizer=tokenizer,
preprocessor=preprocessor,
stopwords=stopwords,
min_freq=min_freq,
select_best=select_best,
fuse_tuples=fuse_tuples,
sep=sep,
n_jobs=n_jobs,
)
@singledispatch
def scored_ngrams(
docs: Documents,
n: int = 2,
metric: str = "pmi",
tokenizer: Tokenizer = DEFAULT_TOKENIZER,
preprocessor: CallableOnStr = None,
stopwords: Union[str, Collection[str]] = None,
min_freq: int = 0,
fuse_tuples: bool = False,
sep: str = " ",
) -> Series:
"""Get Series of collocations and scores.
Parameters
----------
docs : str or iterable of str
Documents to scan for ngrams.
n : int, optional
Size of collocations, by default 2.
metric : str, optional
Scoring metric to use. Valid options include:
'raw_freq', 'pmi', 'mi_like', 'likelihood_ratio',
'jaccard', 'poisson_stirling', 'chi_sq', 'student_t'.
See nltk.BigramAssocMeasures, nltk.TrigramAssocMeasures,
and nltk.QuadgramAssocMeasures for additional size-specific
options.
tokenizer : callable, optional
Callable for tokenizing docs.
preprocessor : callable, optional
Callable for preprocessing docs before tokenization, by default None.
stopwords : str or collection of str, optional
Name of known stopwords set or collection of stopwords to remove from docs.
By default None.
min_freq : int, optional
Drop ngrams below this frequency, by default 0.
fuse_tuples : bool, optional
Join ngram tuples with `sep`, by default True.
sep : str, optional
Separator to use for joining ngram tuples, by default " ".
Only relevant if `fuze_tuples=True`.
Returns
-------
Series
Series {ngrams -> scores}.
"""
_validate_strings(docs)
# Get collocation finder and measures
if not isinstance(n, int):
raise TypeError(f"Expected `n` to be int, got {type(n)}.")
if 1 < n < 5:
n = int(n)
finder = NGRAM_FINDERS[n]
measures = NGRAM_METRICS[n]()
else:
raise ValueError(f"Valid `n` values are 2, 3, and 4. Got {n}.")
pre_pipe = []
if preprocessor is not None:
# Apply preprocessing
pre_pipe.append(preprocessor)
# Tokenize
pre_pipe.append(tokenizer)
if stopwords is not None:
# Fetch stopwords if passed str
if isinstance(stopwords, str):
stopwords = fetch_stopwords(stopwords)
# Remove stopwords
pre_pipe.append(partial(remove_stopwords, stopwords=stopwords))
docs = chain_processors(docs, pre_pipe)
# Find and score collocations
ngrams = finder.from_documents(docs)
ngrams.apply_freq_filter(min_freq)
ngram_score = ngrams.score_ngrams(getattr(measures, metric))
# Put the results in a DataFrame, squeeze into Series
kind = {2: "bigram", 3: "trigram", 4: "quadgram"}[n]
ngram_score = | pd.DataFrame(ngram_score, columns=[kind, "score"]) | pandas.DataFrame |
import pandas as pd
import pytest
from maker import Board
from utils import match_count
nan = float("nan")
@pytest.mark.parametrize(('player', 'match'), [
(128, 64),
(127, 63),
(100, 36),
(65, 1),
(64, 32),
(3, 1),
(2, 1),
])
def test_match_count(player, match):
assert match_count(player) == match
def pytest_funcarg__board(request):
return Board(match_count=2, keys=["club", "region"])
@pytest.mark.parametrize(("a", "b", "expected"), [
(pd.Series({"club": "a", "region": nan}),
pd.Series({"club": "a", "region": "west"}),
False),
(pd.Series({"club": "a", "region": nan}),
pd.Series({"club": "b", "region": nan}),
True),
(pd.Series({"club": "a", "region": nan}),
pd.Series({"club": "b", "region": "west"}),
True),
(pd.Series({"club": "a", "region": "west"}),
pd.Series({"club": "b", "region": "west"}),
False),
])
def test_valid_match(a, b, expected):
board = Board(match_count=2, keys=["club", "region"])
assert board._is_valid(a, b) is expected
@pytest.mark.parametrize(("data"), [
[pd.Series({"club": "a"}),
pd.Series({"club": "a"}),
pd.Series({"club": "a"})]
])
def test_same_club_in_a_row(data):
board = Board(match_count=10)
for d in data:
board.append(d)
assert len(board._upper) == 3
@pytest.mark.parametrize(("data"), [
[pd.Series({"club": "a", "name": "a"}),
pd.Series({"club": "a", "name": "b"}),
| pd.Series({"club": "a", "name": "c"}) | pandas.Series |
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def FPKM_statistic_options(target_file, option):
with open(target_file, 'r') as f:
data = f.readlines()
f.close()
cazyfamily_dic = {}
cazyFamily_count = {}
for row in data[2:]:
if row.startswith(option):
row = row.replace('\n', '')
row_infos = row.split(',')
split_tag = option + "="
cazyFamily_info = row_infos[0].split(split_tag)[1]
cazyFamilys = cazyFamily_info.split('+')
if cazyFamilys[0] == '-':
continue
for cazyFamily in cazyFamilys:
if cazyFamily in cazyfamily_dic.keys():
cazyFamily = cazyFamily.split('(')[0]
cazyFamily_count[cazyFamily] += 1
cazyfamily_dic[cazyFamily] = cazyfamily_dic[cazyFamily] + float(row_infos[-1])
else:
cazyFamily = cazyFamily.split('(')[0]
cazyfamily_dic[cazyFamily] = float(row_infos[-1])
cazyFamily_count[cazyFamily] = 1
cazyFamily_pairs = []
for cazyFamily in cazyfamily_dic.keys():
pair = [cazyFamily, cazyfamily_dic[cazyFamily] / cazyFamily_count[cazyFamily]]
cazyFamily_pairs.append(pair)
df = pd.DataFrame(cazyFamily_pairs)
df.to_csv('FPKM_statistic_options.csv', index=False, header=['cazyfamily_name', 'FPKM'])
def FPKM_statistic(target_file):
with open(target_file, 'r') as f:
data = f.readlines()
f.close()
FPKM_dic = {}
cazyFamily_count = {}
for row in data[2:]:
if 'FPKM' in row:
trans_info = row.split(';')[1]
if 'HMMER=-' not in trans_info:
cazyFamily_info = trans_info.split('HMMER=')[1].split('(')[0]
FPKM_value = float(row.split('FPKM \"')[1].split('\";')[0])
if cazyFamily_info in FPKM_dic:
FPKM_dic[cazyFamily_info] += FPKM_value
cazyFamily_count[cazyFamily_info] += 1
else:
FPKM_dic[cazyFamily_info] = FPKM_value
cazyFamily_count[cazyFamily_info] = 1
gff_statistic = []
for cazyFamily in FPKM_dic.keys():
processed_FPKM = FPKM_dic[cazyFamily] / cazyFamily_count[cazyFamily]
gff_statistic.append([cazyFamily, processed_FPKM])
df = | pd.DataFrame(gff_statistic) | pandas.DataFrame |
import pandas as pd
from pprint import pprint
from jellyfish import jaro_distance
import unidecode
from _Classes.PyscopusModified import ScopusModified
from _Classes.Author import Author
from _Classes.Individuals import Student, Egress
from _Classes.Indicators import Indicators
from _Funções_e_Valores.verify_authors import search_authors_list, treat_exceptions
from _Funções_e_Valores._exceptions import scopus_articles_exceptions
from _Funções_e_Valores.values import quadrennium, FILE, HAS_EVENTS, FULL_PERIOD_AUTHORS, REQUEST_SCOPUS_DATA, EGRESS, SCOPUS_APIKEY
class Data():
def __init__(self, professors, egress, students, qualis_2016, qualis_2020, qualis_2016_events, qualis_2020_events):
super(Data, self).__init__()
self.professors = professors
self.egress = egress
self.students = students
self.qualis_2016 = qualis_2016
self.qualis_2020 = qualis_2020
self.qualis_2016_events = qualis_2016_events
self.qualis_2020_events = qualis_2020_events
self.exceptions = {'Nome Trabalho':[], 'Nome Evento Cadastrado':[], 'Nome Evento Canônico':[]} # For the exceptions sheet from the excel file
self.reports = {'Author':[], 'Report':[]} # Reports by author
self.authors_dict = {"Author":[], "A/E":[]} # Dictionary of authors (Professors, Students and Egress)
columns = []
for year in quadrennium:
if year not in columns:
columns.append(year)
for col in columns:
self.authors_dict[f"20{col}"] = []
self.art_prof = | pd.DataFrame() | pandas.DataFrame |
#=======================================================================================================================
#
# ALLSorts v2 - Hierarchical Classifier Stage
# Author: <NAME>
# License: MIT
#
#=======================================================================================================================
''' --------------------------------------------------------------------------------------------------------------------
Imports
---------------------------------------------------------------------------------------------------------------------'''
''' Internal '''
from ALLSorts.common import _flatHierarchy, _pseudoCounts, message
''' External '''
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model import LogisticRegression
import numpy as np
import math
import pandas as pd
from sklearn.base import clone
from joblib import Parallel, delayed
''' --------------------------------------------------------------------------------------------------------------------
Classes
---------------------------------------------------------------------------------------------------------------------'''
class HierarchicalClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_jobs=1, hierarchy=False, model=False, params=False):
self.n_jobs = n_jobs
self.hierarchy = hierarchy
self.params = params
self.model = model
self.fitted = {}
self.thresholds = {}
self.trained = False
# Choose model
if not self.model:
self.model = LogisticRegression(penalty="l1",
solver="liblinear",
class_weight="balanced")
#if self.params:
# self.model.set_params(**params)
def _flatHierarchy(self):
return _flatHierarchy(self.hierarchy)
def _pseudoCounts(self, X, y, name, parents):
return _pseudoCounts(X, y, name, parents, self.f_hierarchy)
def _setThresholds(self):
for subtype in self.f_hierarchy:
self.thresholds[subtype] = 0.5
def _getParents(self):
parents = []
for subtype in self.f_hierarchy:
if self.f_hierarchy[subtype]:
parents.append(subtype)
return parents
def _checkInput(self, X):
# Prepare
if isinstance(X, dict):
counts = X["counts"]
self.genes = X["genes"]
else:
counts = X.copy()
self.genes = False
return counts
def _clf(self, X, y, model, subtype, multi=False):
# Prepare new labels per this subtype
train_y = y.copy()
if multi:
train_y[~train_y.isin([subtype])] = "Others"
# Refine genes if custom feature selection
counts = X[self.genes[subtype]] if self.genes else X.copy()
fitted = model.fit(counts, train_y)
return fitted
def _recurseCLF(self, sub_hier, X, y, name="root"):
if sub_hier == False: # Recursion stop condition
return False
parents = list(sub_hier.keys())
# Create pseudo-counts based on hiearachy
X_p, y_p = self._pseudoCounts(X, y, name, parents)
subtypes = list(y_p.unique())
# OVR if multiple subtypes, single binary if 2.
if len(subtypes) > 2:
results = Parallel(n_jobs=1, prefer="threads")( \
delayed(self._clf)(X_p, y_p, clone(self.model), subtype, multi=True) \
for subtype in subtypes)
# Unpack results
for sub_pos in range(len(subtypes)):
self.fitted[subtypes[sub_pos]] = results[sub_pos]
else:
subtype = "_".join(subtypes)
fitted = self._clf(X_p, y_p, clone(self.model), subtype, multi=False)
self.fitted[subtype] = fitted
# Recurse through the hierarchy
for parent in parents:
self._recurseCLF(sub_hier[parent],
X, y, name=parent)
def _weightedProba(self, probs):
probs_weighted = probs.copy()
for parent in self.f_hierarchy:
if self.f_hierarchy[parent] != False: # has children
for child in self.f_hierarchy[parent]:
preds = probs_weighted[child].multiply(probs_weighted[parent], axis="index")
probs_weighted[child] = list(preds)
return probs_weighted
def _evalPreds(self, predicted_proba, p=False):
y_pred = []
# Define parent/child relationships
children = []
parents = []
for parent in self.f_hierarchy:
if self.f_hierarchy[parent] != False:
parents.append(parent)
children.append(self.f_hierarchy[parent])
# Check each result
for sample, probabilities in predicted_proba.iterrows():
# If unclassified, call it from the start
if probabilities["Pred"] == "":
y_pred.append("Unclassified")
continue
# Otherwise, unpack results
prediction = probabilities["Pred"][:-1] # Remove final comma
predictions = prediction.split(",")
# Cull if parent node is not successful
for i in range(0, len(children)):
pred_children = list(set(predictions).intersection(set(children[i])))
has_children = len(pred_children) > 0
if p:
if has_children and parents[i] not in predictions:
predictions = list(set(predictions).difference(set(children[i])))
elif has_children and parents[i] in predictions:
predictions.remove(parents[i])
# Groups at the moment are mutually exclusive
if len(pred_children) > 1 and len(predictions) > 1:
prob_no = probabilities[pred_children].apply(pd.to_numeric)
max_subtype = [prob_no.idxmax()]
drop_preds = list(set(prob_no.index.values).difference(set(max_subtype)))
for drop_pred in drop_preds:
try:
predictions.remove(drop_pred)
except ValueError:
continue
# Unclassified
if len(predictions) == 0:
y_pred.append("Unclassified")
# Multi-label
elif len(predictions) > 1:
y_pred.append(','.join(predictions))
# Single prediction
else:
y_pred.append(predictions[0])
return y_pred
def _filterHealthy(self, probabilities, counts):
probabilities.loc[(counts["B-ALL"] < -3) | (counts["B-ALL"] > 3)] = 0.0
return probabilities
def predict_proba(self, X, parents=False, filter_healthy=False):
'''
Retrieve the unweighted probabilities
Input: Pre-processed counts matrix
Output: DataFrame of probabilities
'''
counts = self._checkInput(X)
predicted_proba = | pd.DataFrame(index=counts.index) | pandas.DataFrame |
from io import StringIO
from string import ascii_uppercase as uppercase
import textwrap
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import (
CategoricalIndex,
MultiIndex,
Series,
date_range,
)
def test_info_categorical_column_just_works():
n = 2500
data = np.array(list("abcdefghij")).take(np.random.randint(0, 10, size=n))
s = Series(data).astype("category")
s.isna()
buf = StringIO()
s.info(buf=buf)
s2 = s[s == "d"]
buf = StringIO()
s2.info(buf=buf)
def test_info_categorical():
# GH14298
idx = CategoricalIndex(["a", "b"])
s = Series(np.zeros(2), index=idx)
buf = StringIO()
s.info(buf=buf)
@pytest.mark.parametrize("verbose", [True, False])
def test_info_series(lexsorted_two_level_string_multiindex, verbose):
index = lexsorted_two_level_string_multiindex
ser = Series(range(len(index)), index=index, name="sth")
buf = StringIO()
ser.info(verbose=verbose, buf=buf)
result = buf.getvalue()
expected = textwrap.dedent(
"""\
<class 'pandas.core.series.Series'>
MultiIndex: 10 entries, ('foo', 'one') to ('qux', 'three')
"""
)
if verbose:
expected += textwrap.dedent(
"""\
Series name: sth
Non-Null Count Dtype
-------------- -----
10 non-null int64
"""
)
expected += textwrap.dedent(
f"""\
dtypes: int64(1)
memory usage: {ser.memory_usage()}.0+ bytes
"""
)
assert result == expected
def test_info_memory():
s = Series([1, 2], dtype="i8")
buf = StringIO()
s.info(buf=buf)
result = buf.getvalue()
memory_bytes = float(s.memory_usage())
expected = textwrap.dedent(
f"""\
<class 'pandas.core.series.Series'>
RangeIndex: 2 entries, 0 to 1
Series name: None
Non-Null Count Dtype
-------------- -----
2 non-null int64
dtypes: int64(1)
memory usage: {memory_bytes} bytes
"""
)
assert result == expected
def test_info_wide():
s = Series(np.random.randn(101))
msg = "Argument `max_cols` can only be passed in DataFrame.info, not Series.info"
with pytest.raises(ValueError, match=msg):
s.info(max_cols=1)
def test_info_shows_dtypes():
dtypes = [
"int64",
"float64",
"datetime64[ns]",
"timedelta64[ns]",
"complex128",
"object",
"bool",
]
n = 10
for dtype in dtypes:
s = Series(np.random.randint(2, size=n).astype(dtype))
buf = StringIO()
s.info(buf=buf)
res = buf.getvalue()
name = f"{n:d} non-null {dtype}"
assert name in res
@pytest.mark.skipif(PYPY, reason="on PyPy deep=True doesn't change result")
def test_info_memory_usage_deep_not_pypy():
s_with_object_index = Series({"a": [1]}, index=["foo"])
assert s_with_object_index.memory_usage(
index=True, deep=True
) > s_with_object_index.memory_usage(index=True)
s_object = Series({"a": ["a"]})
assert s_object.memory_usage(deep=True) > s_object.memory_usage()
@pytest.mark.skipif(not PYPY, reason="on PyPy deep=True does not change result")
def test_info_memory_usage_deep_pypy():
s_with_object_index = Series({"a": [1]}, index=["foo"])
assert s_with_object_index.memory_usage(
index=True, deep=True
) == s_with_object_index.memory_usage(index=True)
s_object = | Series({"a": ["a"]}) | pandas.Series |
# -*- coding: utf-8 -*-
"""
To derive the MAG network and diffusion cascades we employed the tables Paper, Paper References, Author, PaperAuthorAffiliation, Fields of Study, Paper Fields of Study from MAG
https://docs.microsoft.com/en-us/academic-services/graph/reference-data-schema
Extract network and diffusion cascades from CS of MAG
"""
import pandas as pd
import os
import numpy as np
import networkx as nx
def clean_authors_by_name():
#---- Clean authors
auth = pd.read_csv("authors.txt")
auth = auth.ix[auth.columns[0,2]]
# the unique display names are 83209107 and the normalized are 60m
idx = auth.iloc[:,1].apply(lambda x:x[0]<"a" or x[0]>"z")
auth=auth[~idx]
auth.to_csv("authors.txt",index=False)
def prepare_fields():
#---- Keep the CS papers that have high confidence
f = pd.read_csv("fields.txt",sep="\t",header=None)
cs_fields = f.loc[f[4].str.contains('computer')==True,0].values
f1 = open("paper_fields.txt","r")
f2 = open("paper_fields_filtered.txt","w")
i = 0
for l in f1:
i+=1
if(i%1000000==0):
print(i)
parts = l.split("\t")
#-- check if the confidence is enough
try:
ty = int(parts[1])
conf= float(parts[2])
except:
next
if(conf>0.5):
if(ty in cs_fields):
f2.write("cs"+","+parts[0]+"\n")
f1.close()
def extract_network():
pap_auth = pd.read_csv("paper_author.txt")
pap_auth = pap_auth.drop(pap_auth.columns[0],axis=1)
pap_auth.to_csv("paper_author.txt",index=False)
fields = pd.read_csv("paper_fields_filtered.txt")
fields.columns = ["PapID","field"]
#--- Optional: Keep only the authors whose names do not have many possible IDs (name ambiguity)
to_remove = pd.read_csv("ambig_authors_papers.txt")
to_remove.columns = ["AuthID","PapID","label"]
to_remove = to_remove.loc[to_remove["label"]==0,"AuthID"].unique()
fields = pd.read_csv("paper_fields_filtered.txt")
fields.columns = ["PapID","field"]
for f in fields.field.unique():
print(f)
net_name="../network_"+f+".csv"
tmp_f = fields.loc[fields.field==f,"PapID"]
#---- First filtering (keep papers in field f)
tmp_f = pap_auth.merge(tmp_f.to_frame("PapID"),on="PapID")
#---- Second filtering (remove ambiguous authors with 1 paper)
#---- Create the edge list
tmp_f = tmp_f.merge(tmp_f,on="PapID")
tmp_f.loc[tmp_f.AuthID_x<tmp_f.AuthID_y,["AuthID_x","AuthID_y"]].to_csv(net_name,index=False)
def extract_cascades():
#----- Extract cascades
pap = pd.read_csv("papers_cs.txt",sep=";", encoding = "ISO-8859-1")
pap = pap.drop(pap.columns[[1,2,4]],axis=1)
pap["PapID"] = pap["PapID"].astype(int)
pap['Date'] = pd.to_datetime(pap["Date"]).values.astype(np.int64) // 10 ** 9
ref = pd.read_csv("references.txt",header=None)
ref.columns = ["PapID","RefID"]
auth = pd.read_csv("author_papers.txt")
auth["AuthID"] = auth["AuthID"].map(str)
auth = auth.groupby("PapID").agg(lambda x:"%s" % ', '.join(x)).reset_index()
pap_fields = | pd.read_csv("paper_fields.txt") | pandas.read_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pandas as pd
import sys
from collections import Iterable
import numpy as np
import pkg_resources
import bz2
import pickle
import os
from clonesig.estimator import Estimator
from pandas.errors import EmptyDataError
from clonesig.evaluate import score_sig_1D_base
MIXTURE_THRESHOLD = 0.05
"""
folder_path = 'PhylogicNDT500/Sim_500_19_cst'
"""
folder_path = sys.argv[1]
signature_filename = 'data/sigProfiler_SBS_signatures_2018_03_28.csv'
sig = pd.read_csv(
pkg_resources.resource_stream(
'clonesig', signature_filename),
sep=',')
all_sigs = sig.columns[2:].to_list()
input_filename = '{}/input_t.tsv'.format(folder_path)
input_df = pd.read_csv(input_filename, sep='\t')
input_df = input_df.assign(signature=input_df.signature.astype(int))
true_signatures_1D_df = input_df[['mutation_id', 'signature']]
# tracksig
try:
mixture_file = pd.read_csv('{}/tracksig/tracksig_mixtures_cancertype.csv'.
format(folder_path), sep=',')
except FileNotFoundError:
print('non working')
try:
changepoint_file = pd.read_csv(
'{}/tracksig/tracksig_changepoints_cancertype.txt'.
format(folder_path), header=None, sep=' ')
changepoints_tracksig_list = changepoint_file.values[0]
except EmptyDataError:
changepoints_tracksig_list = np.array(list())
input_df = pd.read_csv('{}/input_t.tsv'.format(folder_path), sep='\t')
with open('{}/purity.txt'.format(folder_path), 'r') as f:
purity = float(f.read())
input_df = input_df.assign(mut_cn=1)
input_df = input_df.assign(vaf=input_df.var_counts /
(input_df.ref_counts + input_df.var_counts))
input_df = input_df.assign(
total_cn=lambda x: x['minor_cn'] + x['major_cn'])
input_df = input_df.assign(
vaf_cn=input_df.vaf * input_df['total_cn'] / input_df['mut_cn'])
input_df = input_df.assign(
vaf_purity=input_df.apply(
lambda x: x['vaf']/purity *
((1 - purity) * 2 + purity * x['total_cn']) /
x['mut_cn'], axis=1))
input_df.sort_values(by='vaf_purity', inplace=True)
input_df.reset_index(inplace=True, drop=True)
input_df = input_df.assign(mutation_group=lambda x: x.index//100)
nbin = len(input_df)//100
input_df_filter = input_df[input_df.mutation_group <= nbin - 1]
cluster_id_list = np.zeros(input_df_filter.mutation_group.nunique())
i = 1
for chg_point in changepoints_tracksig_list:
cluster_id_list[(chg_point - 1):] = i
i += 1
input_df_filter = input_df_filter.assign(
pred_cluster_id=input_df_filter.apply(
lambda x: int(cluster_id_list[x['mutation_group']]), axis=1))
pred_signatures = np.zeros(len(all_sigs))
filename = '{}/subMU.csv'.format(folder_path)
sub_matrix = pd.read_csv(filename, sep='\t')
mu_mat_setting = sub_matrix[sub_matrix.columns[1:]].values.T
sub_sigs = sub_matrix.columns[1:]
idx = [list(all_sigs).index(s) for s in sub_sigs]
est_sigs = mixture_file[mixture_file.columns[1:]].mean(axis=1).values
pred_signatures[idx] = est_sigs
pred_profile = est_sigs.dot(mu_mat_setting)
mut_sig = np.moveaxis(
np.repeat([mixture_file.values[:, 1:].T[input_df_filter.mutation_group.astype(int)]],
96, axis=0), [0, 1, 2], [2, 0, 1])
big_mu = np.repeat([mu_mat_setting], len(input_df_filter), axis=0)
big_everything = (mut_sig * big_mu / len(est_sigs))[np.arange(len(input_df_filter)), :, input_df_filter.trinucleotide]
signature_mut = np.argmax(big_everything, axis=1)
input_df_filter = input_df_filter.assign(signature=signature_mut)
tracksig_signatures_1D_df = input_df_filter[['mutation_id', 'signature']]
final_1D_df = pd.merge(true_signatures_1D_df, tracksig_signatures_1D_df, on='mutation_id', how='inner', suffixes=['_true', '_tracksig'])
score_1D_tracksig = score_sig_1D_base(final_1D_df.signature_true.values, final_1D_df.signature_tracksig.values.astype(int))
# tracksigfreq
try:
mixture_file = pd.read_csv('{}/tracksigfreq/tracksigfreq_mixtures_cancertype.csv'.
format(folder_path), sep=',')
except FileNotFoundError:
print('non working')
try:
changepoint_file = pd.read_csv(
'{}/tracksigfreq/tracksigfreq_changepoints_cancertype.txt'.
format(folder_path), header=None, sep=' ')
changepoints_tracksig_list = changepoint_file.values[0]
except EmptyDataError:
changepoints_tracksig_list = np.array(list())
data_df = pd.read_csv('{}/tracksigfreq/vcaf.csv'.
format(folder_path), sep='\t')
cluster_id_list = np.zeros(data_df.bin.nunique())
i = 1
for chg_point in changepoints_tracksig_list:
cluster_id_list[(chg_point - 1):] = i
i += 1
data_df = data_df.assign(
pred_cluster_id=data_df.apply(lambda x: int(cluster_id_list[x['bin']-1]),
axis=1))
J_pred = len(changepoints_tracksig_list) + 1
weights_pred = data_df.groupby('pred_cluster_id').phi.count().values/len(data_df)
phi_pred_values = data_df.groupby('pred_cluster_id').phi.mean().values
est_clonal_idx = data_df.groupby('pred_cluster_id').phi.mean().idxmax()
data_df = data_df.assign(
pred_subclonal=(data_df.pred_cluster_id != est_clonal_idx).astype(int))
pred_signatures = np.zeros(len(all_sigs))
filename = '{}/subMU.csv'.format(folder_path)
sub_matrix = | pd.read_csv(filename, sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Generator reserve plots.
This module creates plots of reserve provision and shortage at the generation
and region level.
@author: <NAME>
"""
import logging
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import SetupSubplot, PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""reserves MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The reserves.py module contains methods that are
related to reserve provision and shortage.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
def reserve_gen_timeseries(self, figure_name: str = None, prop: str = None,
start: float = None, end: float= None,
timezone: str = "", start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a generation timeseries stackplot of total cumulative reserve provision by tech type.
The code will create either a facet plot or a single plot depending on
if the Facet argument is active.
If a facet plot is created, each scenario is plotted on a separate facet,
otherwise all scenarios are plotted on a single plot.
To make a facet plot, ensure the work 'Facet' is found in the figure_name.
Generation order is determined by the ordered_gen_categories.csv.
Args:
figure_name (str, optional): User defined figure output name. Used here
to determine if a Facet plot should be created.
Defaults to None.
prop (str, optional): Special argument used to adjust specific
plot settings. Controlled through the plot_select.csv.
Opinions available are:
- Peak Demand
- Date Range
Defaults to None.
start (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot before a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
end (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot after a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# If not facet plot, only plot first scenario
facet=False
if 'Facet' in figure_name:
facet = True
if not facet:
Scenarios = [self.Scenarios[0]]
else:
Scenarios = self.Scenarios
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"reserves_generators_Provision",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for region in self.Zones:
self.logger.info(f"Zone = {region}")
ncols, nrows = self.set_facet_col_row_dimensions(facet,multi_scenario=Scenarios)
grid_size = ncols*nrows
excess_axs = grid_size - len(Scenarios)
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
data_tables = []
for n, scenario in enumerate(Scenarios):
self.logger.info(f"Scenario = {scenario}")
reserve_provision_timeseries = self["reserves_generators_Provision"].get(scenario)
#Check if zone has reserves, if not skips
try:
reserve_provision_timeseries = reserve_provision_timeseries.xs(region,level=self.AGG_BY)
except KeyError:
self.logger.info(f"No reserves deployed in: {scenario}")
continue
reserve_provision_timeseries = self.df_process_gen_inputs(reserve_provision_timeseries)
if reserve_provision_timeseries.empty is True:
self.logger.info(f"No reserves deployed in: {scenario}")
continue
# unitconversion based off peak generation hour, only checked once
if n == 0:
unitconversion = self.capacity_energy_unitconversion(reserve_provision_timeseries,
sum_values=True)
if prop == "Peak Demand":
self.logger.info("Plotting Peak Demand period")
total_reserve = reserve_provision_timeseries.sum(axis=1)/unitconversion['divisor']
peak_reserve_t = total_reserve.idxmax()
start_date = peak_reserve_t - dt.timedelta(days=start)
end_date = peak_reserve_t + dt.timedelta(days=end)
reserve_provision_timeseries = reserve_provision_timeseries[start_date : end_date]
Peak_Reserve = total_reserve[peak_reserve_t]
elif prop == 'Date Range':
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
reserve_provision_timeseries = reserve_provision_timeseries[start_date_range : end_date_range]
else:
self.logger.info("Plotting graph for entire timeperiod")
reserve_provision_timeseries = reserve_provision_timeseries/unitconversion['divisor']
scenario_names = pd.Series([scenario] * len(reserve_provision_timeseries),name = 'Scenario')
data_table = reserve_provision_timeseries.add_suffix(f" ({unitconversion['units']})")
data_table = data_table.set_index([scenario_names],append = True)
data_tables.append(data_table)
mplt.stackplot(reserve_provision_timeseries,
color_dict=self.PLEXOS_color_dict,
labels=reserve_provision_timeseries.columns,
sub_pos=n)
mplt.set_subplot_timeseries_format(sub_pos=n)
if prop == "Peak Demand":
axs[n].annotate('Peak Reserve: \n' + str(format(int(Peak_Reserve), '.2f')) + ' {}'.format(unitconversion['units']),
xy=(peak_reserve_t, Peak_Reserve),
xytext=((peak_reserve_t + dt.timedelta(days=0.25)), (Peak_Reserve + Peak_Reserve*0.05)),
fontsize=13, arrowprops=dict(facecolor='black', width=3, shrink=0.1))
if not data_tables:
self.logger.warning(f'No reserves in {region}')
out = MissingZoneData()
outputs[region] = out
continue
# Add facet labels
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
# Add legend
mplt.add_legend(reverse_legend=True, sort_by=self.ordered_gen)
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(region)
plt.ylabel(f"Reserve Provision ({unitconversion['units']})",
color='black', rotation='vertical', labelpad=40)
data_table_out = pd.concat(data_tables)
outputs[region] = {'fig': fig, 'data_table': data_table_out}
return outputs
def total_reserves_by_gen(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a generation stacked barplot of total reserve provision by generator tech type.
A separate bar is created for each scenario.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"reserves_generators_Provision",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for region in self.Zones:
self.logger.info(f"Zone = {region}")
Total_Reserves_Out = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
reserve_provision_timeseries = self["reserves_generators_Provision"].get(scenario)
#Check if zone has reserves, if not skips
try:
reserve_provision_timeseries = reserve_provision_timeseries.xs(region,level=self.AGG_BY)
except KeyError:
self.logger.info(f"No reserves deployed in {scenario}")
continue
reserve_provision_timeseries = self.df_process_gen_inputs(reserve_provision_timeseries)
if reserve_provision_timeseries.empty is True:
self.logger.info(f"No reserves deployed in: {scenario}")
continue
# Calculates interval step to correct for MWh of generation
interval_count = self.get_sub_hour_interval_count(reserve_provision_timeseries)
# sum totals by fuel types
reserve_provision_timeseries = reserve_provision_timeseries/interval_count
reserve_provision = reserve_provision_timeseries.sum(axis=0)
reserve_provision.rename(scenario, inplace=True)
Total_Reserves_Out = pd.concat([Total_Reserves_Out, reserve_provision], axis=1, sort=False).fillna(0)
Total_Reserves_Out = self.create_categorical_tech_index(Total_Reserves_Out)
Total_Reserves_Out = Total_Reserves_Out.T
Total_Reserves_Out = Total_Reserves_Out.loc[:, (Total_Reserves_Out != 0).any(axis=0)]
if Total_Reserves_Out.empty:
out = MissingZoneData()
outputs[region] = out
continue
Total_Reserves_Out.index = Total_Reserves_Out.index.str.replace('_',' ')
Total_Reserves_Out.index = Total_Reserves_Out.index.str.wrap(5, break_long_words=False)
# Convert units
unitconversion = self.capacity_energy_unitconversion(Total_Reserves_Out,
sum_values=True)
Total_Reserves_Out = Total_Reserves_Out/unitconversion['divisor']
data_table_out = Total_Reserves_Out.add_suffix(f" ({unitconversion['units']}h)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Reserves_Out.index
mplt.barplot(Total_Reserves_Out, color=self.PLEXOS_color_dict,
stacked=True,
custom_tick_labels=tick_labels)
ax.set_ylabel(f"Total Reserve Provision ({unitconversion['units']}h)",
color='black', rotation='vertical')
# Add legend
mplt.add_legend(reverse_legend=True, sort_by=self.ordered_gen)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(region)
outputs[region] = {'fig': fig, 'data_table': data_table_out}
return outputs
def reg_reserve_shortage(self, **kwargs):
"""Creates a bar plot of reserve shortage for each region in MWh.
Bars are grouped by reserve type, each scenario is plotted as a differnet color.
The 'Shortage' argument is passed to the _reserve_bar_plots() method to
create this plot.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._reserve_bar_plots("Shortage", **kwargs)
return outputs
def reg_reserve_provision(self, **kwargs):
"""Creates a bar plot of reserve provision for each region in MWh.
Bars are grouped by reserve type, each scenario is plotted as a differnet color.
The 'Provision' argument is passed to the _reserve_bar_plots() method to
create this plot.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._reserve_bar_plots("Provision", **kwargs)
return outputs
def reg_reserve_shortage_hrs(self, **kwargs):
"""creates a bar plot of reserve shortage for each region in hrs.
Bars are grouped by reserve type, each scenario is plotted as a differnet color.
The 'Shortage' argument and count_hours=True is passed to the _reserve_bar_plots() method to
create this plot.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._reserve_bar_plots("Shortage", count_hours=True)
return outputs
def _reserve_bar_plots(self, data_set: str, count_hours: bool = False,
start_date_range: str = None,
end_date_range: str = None, **_):
"""internal _reserve_bar_plots method, creates 'Shortage', 'Provision' and 'Shortage' bar
plots
Bars are grouped by reserve type, each scenario is plotted as a differnet color.
Args:
data_set (str): Identifies the reserve data set to use and pull
from the formatted h5 file.
count_hours (bool, optional): if True creates a 'Shortage' hours plot.
Defaults to False.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"reserve_{data_set}", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for region in self.Zones:
self.logger.info(f"Zone = {region}")
Data_Table_Out=pd.DataFrame()
reserve_total_chunk = []
for scenario in self.Scenarios:
self.logger.info(f'Scenario = {scenario}')
reserve_timeseries = self[f"reserve_{data_set}"].get(scenario)
# Check if zone has reserves, if not skips
try:
reserve_timeseries = reserve_timeseries.xs(region,level=self.AGG_BY)
except KeyError:
self.logger.info(f"No reserves deployed in {scenario}")
continue
interval_count = self.get_sub_hour_interval_count(reserve_timeseries)
reserve_timeseries = reserve_timeseries.reset_index(["timestamp","Type","parent"],drop=False)
# Drop duplicates to remove double counting
reserve_timeseries.drop_duplicates(inplace=True)
# Set Type equal to parent value if Type equals '-'
reserve_timeseries['Type'] = reserve_timeseries['Type'].mask(reserve_timeseries['Type'] == '-', reserve_timeseries['parent'])
reserve_timeseries.set_index(["timestamp","Type","parent"],append=True,inplace=True)
# Groupby Type
if count_hours == False:
reserve_total = reserve_timeseries.groupby(["Type"]).sum()/interval_count
elif count_hours == True:
reserve_total = reserve_timeseries[reserve_timeseries[0]>0] #Filter for non zero values
reserve_total = reserve_total.groupby("Type").count()/interval_count
reserve_total.rename(columns={0:scenario},inplace=True)
reserve_total_chunk.append(reserve_total)
if reserve_total_chunk:
reserve_out = | pd.concat(reserve_total_chunk,axis=1, sort='False') | pandas.concat |
# License: Apache-2.0
from gators.binning import QuantileDiscretizer
from pandas.testing import assert_frame_equal
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data():
n_bins = 4
X = pd.DataFrame({
'A': [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
'B': [1, 1, 0, 1, 0, 0],
'C': ['a', 'b', 'c', 'd', 'e', 'f'],
'D': [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
'F': [3, 1, 2, 1, 2, 3]}
)
X_expected = pd.DataFrame({
'A': {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
'B': {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
'C': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f'},
'D': {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
'F': {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
'A__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '3.0', 4: '1.0', 5: '2.0'},
'B__bin': {0: '2.0', 1: '2.0', 2: '0.0', 3: '2.0', 4: '0.0', 5: '0.0'},
'D__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '2.0', 4: '2.0', 5: '1.0'},
'F__bin': {0: '3.0', 1: '0.0', 2: '1.0', 3: '0.0', 4: '1.0', 5: '3.0'}
})
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_int16():
n_bins = 4
X = pd.DataFrame({
'A': [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
'B': [1, 1, 0, 1, 0, 0],
'C': ['a', 'b', 'c', 'd', 'e', 'f'],
'D': [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
'F': [3, 1, 2, 1, 2, 3]}
)
X[list('ABDF')] = X[list('ABDF')].astype(np.int16)
X_expected = pd.DataFrame({
'A': {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
'B': {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
'C': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f'},
'D': {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
'F': {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
'A__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '3.0', 4: '1.0', 5: '2.0'},
'B__bin': {0: '2.0', 1: '2.0', 2: '0.0', 3: '2.0', 4: '0.0', 5: '0.0'},
'D__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '2.0', 4: '2.0', 5: '1.0'},
'F__bin': {0: '3.0', 1: '0.0', 2: '1.0', 3: '0.0', 4: '1.0', 5: '3.0'}
})
X_expected[list('ABDF')] = X_expected[list('ABDF')].astype(np.int16)
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_inplace():
n_bins = 4
X = pd.DataFrame({
'A': [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
'B': [1, 1, 0, 1, 0, 0],
'C': ['a', 'b', 'c', 'd', 'e', 'f'],
'D': [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
'F': [3, 1, 2, 1, 2, 3]}
)
X_expected = pd.DataFrame({
'A': {0: '0.0', 1: '3.0', 2: '0.0', 3: '3.0', 4: '1.0', 5: '2.0'},
'B': {0: '2.0', 1: '2.0', 2: '0.0', 3: '2.0', 4: '0.0', 5: '0.0'},
'C': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f'},
'D': {0: '0.0', 1: '3.0', 2: '0.0', 3: '2.0', 4: '2.0', 5: '1.0'},
'F': {0: '3.0', 1: '0.0', 2: '1.0', 3: '0.0', 4: '1.0', 5: '3.0'}
})
obj = QuantileDiscretizer(n_bins, inplace=True).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_no_num():
X = pd.DataFrame({'C': ['a', 'b', 'c', 'd', 'e', 'f']})
X_expected = pd.DataFrame({'C': ['a', 'b', 'c', 'd', 'e', 'f']})
n_bins = 3
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_num():
n_bins = 4
X = pd.DataFrame({
'A': [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
'B': [1, 1, 0, 1, 0, 0],
'D': [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
'F': [3, 1, 2, 1, 2, 3]}
)
X_expected = pd.DataFrame({
'A': {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
'B': {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
'D': {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
'F': {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
'A__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '3.0', 4: '1.0', 5: '2.0'},
'B__bin': {0: '2.0', 1: '2.0', 2: '0.0', 3: '2.0', 4: '0.0', 5: '0.0'},
'D__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '2.0', 4: '2.0', 5: '1.0'},
'F__bin': {0: '3.0', 1: '0.0', 2: '1.0', 3: '0.0', 4: '1.0', 5: '3.0'}
})
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_num_inplace():
n_bins = 4
X = pd.DataFrame({
'A': [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
'B': [1, 1, 0, 1, 0, 0],
'D': [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
'F': [3, 1, 2, 1, 2, 3]}
)
X_expected = pd.DataFrame({
'A': {0: '0.0', 1: '3.0', 2: '0.0', 3: '3.0', 4: '1.0', 5: '2.0'},
'B': {0: '2.0', 1: '2.0', 2: '0.0', 3: '2.0', 4: '0.0', 5: '0.0'},
'D': {0: '0.0', 1: '3.0', 2: '0.0', 3: '2.0', 4: '2.0', 5: '1.0'},
'F': {0: '3.0', 1: '0.0', 2: '1.0', 3: '0.0', 4: '1.0', 5: '3.0'}
})
obj = QuantileDiscretizer(n_bins, inplace=True).fit(X)
return obj, X, X_expected
###
@pytest.fixture
def data_ks():
n_bins = 4
X = ks.DataFrame({
'A': [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
'B': [1, 1, 0, 1, 0, 0],
'C': ['a', 'b', 'c', 'd', 'e', 'f'],
'D': [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
'F': [3, 1, 2, 1, 2, 3]}
)
X_expected = pd.DataFrame({
'A': {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
'B': {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
'C': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f'},
'D': {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
'F': {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
'A__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '3.0', 4: '1.0', 5: '2.0'},
'B__bin': {0: '2.0', 1: '2.0', 2: '0.0', 3: '2.0', 4: '0.0', 5: '0.0'},
'D__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '2.0', 4: '2.0', 5: '1.0'},
'F__bin': {0: '3.0', 1: '0.0', 2: '1.0', 3: '0.0', 4: '1.0', 5: '3.0'}
})
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_int16_ks():
n_bins = 4
X = ks.DataFrame({
'A': [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
'B': [1, 1, 0, 1, 0, 0],
'C': ['a', 'b', 'c', 'd', 'e', 'f'],
'D': [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
'F': [3, 1, 2, 1, 2, 3]}
)
X[list('ABDF')] = X[list('ABDF')].astype(np.int16)
X_expected = pd.DataFrame({
'A': {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
'B': {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
'C': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f'},
'D': {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
'F': {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
'A__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '3.0', 4: '1.0', 5: '2.0'},
'B__bin': {0: '2.0', 1: '2.0', 2: '0.0', 3: '2.0', 4: '0.0', 5: '0.0'},
'D__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '2.0', 4: '2.0', 5: '1.0'},
'F__bin': {0: '3.0', 1: '0.0', 2: '1.0', 3: '0.0', 4: '1.0', 5: '3.0'}
})
X_expected[list('ABDF')] = X_expected[list('ABDF')].astype(np.int16)
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_inplace_ks():
n_bins = 4
X = ks.DataFrame({
'A': [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
'B': [1, 1, 0, 1, 0, 0],
'C': ['a', 'b', 'c', 'd', 'e', 'f'],
'D': [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
'F': [3, 1, 2, 1, 2, 3]}
)
X_expected = pd.DataFrame({
'A': {0: '0.0', 1: '3.0', 2: '0.0', 3: '3.0', 4: '1.0', 5: '2.0'},
'B': {0: '2.0', 1: '2.0', 2: '0.0', 3: '2.0', 4: '0.0', 5: '0.0'},
'C': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f'},
'D': {0: '0.0', 1: '3.0', 2: '0.0', 3: '2.0', 4: '2.0', 5: '1.0'},
'F': {0: '3.0', 1: '0.0', 2: '1.0', 3: '0.0', 4: '1.0', 5: '3.0'}
})
obj = QuantileDiscretizer(n_bins, inplace=True).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_no_num_ks():
n_bins = 3
X = ks.DataFrame({'C': ['a', 'b', 'c', 'd', 'e', 'f']})
X_expected = pd.DataFrame({'C': ['a', 'b', 'c', 'd', 'e', 'f']})
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_num_ks():
n_bins = 4
X = ks.DataFrame({
'A': [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
'B': [1, 1, 0, 1, 0, 0],
'D': [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
'F': [3, 1, 2, 1, 2, 3]}
)
X_expected = | pd.DataFrame({
'A': {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
'B': {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
'D': {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
'F': {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
'A__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '3.0', 4: '1.0', 5: '2.0'},
'B__bin': {0: '2.0', 1: '2.0', 2: '0.0', 3: '2.0', 4: '0.0', 5: '0.0'},
'D__bin': {0: '0.0', 1: '3.0', 2: '0.0', 3: '2.0', 4: '2.0', 5: '1.0'},
'F__bin': {0: '3.0', 1: '0.0', 2: '1.0', 3: '0.0', 4: '1.0', 5: '3.0'}
})
obj = QuantileDiscretizer(n_bins).fit(X) | pandas.DataFrame |
"""
Load and process pokec social network
Data includes:
edge structure
user profiles
data from https://snap.stanford.edu/data/soc-Pokec.html
"""
import os
import numpy as np
import pandas as pd
import gzip
def _load_profiles(profile_file):
# df = pd.read_csv('filename.tar.gz', compression='gzip', header=0, sep=',', quotechar='"'
names = str.split("user_id public completion_percentage gender region last_login registration age body "
"I_am_working_in_field spoken_languages hobbies I_most_enjoy_good_food pets body_type "
"my_eyesight eye_color hair_color hair_type completed_level_of_education favourite_color "
"relation_to_smoking relation_to_alcohol sign_in_zodiac on_pokec_i_am_looking_for love_is_for_me "
"relation_to_casual_sex my_partner_should_be marital_status children relation_to_children "
"I_like_movies I_like_watching_movie I_like_music I_mostly_like_listening_to_music "
"the_idea_of_good_evening I_like_specialties_from_kitchen fun I_am_going_to_concerts "
"my_active_sports my_passive_sports profession I_like_books life_style music cars politics "
"relationships art_culture hobbies_interests science_technologies computers_internet education "
"sport movies travelling health companies_brands more")
usecols = str.split("user_id public completion_percentage gender region last_login registration age")
profiles = pd.read_csv(profile_file, names=names, index_col=False, usecols=usecols, compression='gzip', header=None, sep='\t')
profiles.set_index('user_id', inplace=True, drop=False)
return profiles
def _process_profiles(profiles):
"""
Subset the profiles to strip out attributes that are freely fillable by users
Fix datatypes for remainders
"""
# keep_attributes = str.split("user_id public completion_percentage gender region last_login registration age")
# p2=profiles[keep_attributes]
p2 = profiles
p2['region'] = p2['region'].astype('category')
p2['public'] = p2['public'].astype('category')
p2['gender'] = p2['gender'].astype('category')
p2['last_login'] = pd.to_datetime(p2['last_login'])
p2['registration'] = | pd.to_datetime(p2['registration']) | pandas.to_datetime |
import numpy as np
import pandas as pd
import datetime as dt
import os
import zipfile
from datetime import datetime, timedelta
from urllib.parse import urlparse
study_prefix = "U01"
def get_user_id_from_filename(f):
#Get user id from from file name
return(f.split(".")[3])
def get_file_names_from_zip(z, file_type=None, prefix=study_prefix):
#Extact file list
file_list = list(z.filelist)
if(filter is None):
filtered = [f.filename for f in file_list if (prefix in f.filename) and (".csv" in f.filename)]
else:
filtered = [f.filename for f in file_list if (file_type in f.filename and prefix in f.filename)]
return(filtered)
def get_data_catalog(catalog_file, data_file, data_dir, dict_dir):
dc=pd.read_csv(catalog_file)
dc=dc.set_index("Data Product Name")
dc.data_file=data_dir+data_file #add data zip file field
dc.data_dir=data_dir #add data zip file field
dc.dict_dir=dict_dir #add data distionary directory field
return(dc)
def get_data_dictionary(data_catalog, data_product_name):
dictionary_file = data_catalog.dict_dir + data_catalog.loc[data_product_name]["Data Dictionary File Name"]
dd=pd.read_csv(dictionary_file)
dd=dd.set_index("ElementName")
dd.data_file_name = data_catalog.loc[data_product_name]["Data File Name"] #add data file name pattern field
dd.name = data_product_name #add data product name field
dd.index_fields = data_catalog.loc[data_product_name]["Index Fields"] #add index fields
dd.description = data_catalog.loc[data_product_name]["Data Product Description"]
return(dd)
def get_df_from_zip(file_type,zip_file, participants):
#Get participant list from participants data frame
participant_list = list(participants["Participant ID"])
#Open data zip file
z = zipfile.ZipFile(zip_file)
#Get list of files of specified type
file_list = get_file_names_from_zip(z, file_type=file_type)
#Open file inside zip
dfs=[]
for file_name in file_list:
sid = get_user_id_from_filename(file_name)
if(sid in participant_list):
f = z.open(file_name)
file_size = z.getinfo(file_name).file_size
if file_size > 0:
df = pd.read_csv(f, low_memory=False)
df["Subject ID"] = sid
dfs.append(df)
else:
print('warning %s is empty (size = 0)' % file_name)
df = pd.concat(dfs)
return(df)
def fix_df_column_types(df, dd):
#Set Boolean/String fields to string type to prevent
#interpretation as numeric for now. Leave nans in to
#indicate missing data.
for field in list(df.keys()):
if not (field in dd.index): continue
dd_type = dd.loc[field]["DataType"]
if dd_type in ["Boolean","String","Categorical"]:
if field == 'url':
urls = df[field].values
for index, url in enumerate(urls):
parsed = urlparse(url)
df[field].values[index] = parsed.path[1:]
else:
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else str(x))
elif dd_type in ["Ordinal"]:
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else int(x))
elif dd_type in ["Time"]:
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else pd.to_timedelta(x))
elif dd_type in ["Date"]:
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else datetime.strptime(x, "%Y-%m-%d"))
elif dd_type in ["DateTime"]:
#Keep only time for now
max_length = max([len(str(x).split(':')[-1]) for x in df[field].values]) # length of last item after ':'
if max_length < 6: # this includes time with AM/PM
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else pd.to_timedelta(x[11:]))
else: # for example: 2020-06-12 23:00:1592002802
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else
pd.to_timedelta(pd.to_datetime(x[:16]).strftime("%H:%M:%S")))
#print('\n%s nlargest(10) =\n%s' % (field, df[field].value_counts().nlargest(10)))
return(df)
def get_participant_info(data_catalog):
file = data_catalog.data_dir + data_catalog.loc["Participant Information"]["Data File Name"]
df = pd.read_csv(file)
return(df)
def get_participants_by_type(data_catalog, participant_type):
pi = get_participant_info(data_catalog)
check_type = []
for type_i in pi["Participant Type"].values:
if str(type_i).find(participant_type) >= 0:
check_type.append(True)
else:
check_type.append(False)
pi = pi[check_type]
return(pi)
def crop_data(participants_df, df, b_display, b_crop_end=True):
#Crop before the intervention start date
#Set b_crop_end = True to also crop after the end date (for withdrew status)
participants_df = participants_df.set_index("Participant ID")
fields = list(df.keys())
#Create an observation indicator for an observed value in any
#of the above fields. Sort to make sure data frame is in date order
#per participant
obs_df = 0+((0+~df[fields].isnull()).sum(axis=1)>0)
obs_df.sort_index(axis=0, inplace=True,level=1)
#Get the participant ids according to the data frame
participants = list(obs_df.index.levels[0])
frames = []
for p in participants:
intervention_date = participants_df.loc[p]['Intervention Start Date']
dates = pd.to_datetime(obs_df[p].index)
#Check if there is any data for the participant
if(len(obs_df[p]))>0:
new_obs_df = obs_df[p].copy()
if str(intervention_date).lower() != "nan":
#Check if intervention date is past today's date
intervention_date = pd.to_datetime(intervention_date)
new_obs_df = new_obs_df.loc[dates >= intervention_date]
dates = pd.to_datetime(new_obs_df.index)
today = pd.to_datetime(dt.date.today())
if (intervention_date > today) and b_display:
print('{:<3} intervention date {} is past today\'s date {}'.format(
p, intervention_date.strftime('%Y-%m-%d'), today.strftime('%Y-%m-%d')))
#Crop before the intervention start date
dates_df = pd.to_datetime(df.loc[p].index)
new_df = df.loc[p].copy()
new_df = new_df.loc[dates_df >= intervention_date]
if b_crop_end:
status = participants_df.loc[p]["Participant Status"]
end_date = participants_df.loc[p]['End Date']
if status == 'withdrew':
end_date = pd.to_datetime(end_date)
dates_df = pd.to_datetime(new_df.index)
new_df = new_df.loc[dates_df <= end_date]
new_df['Subject ID'] = p
new_df = new_df.reset_index()
date_name = 'DATE'
columns = list(new_df.columns)
if date_name not in columns:
for col_name in columns:
if col_name.find('Date') >= 0:
date_name = col_name
new_df['Date'] = | pd.to_datetime(new_df[date_name]) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
#------------------------------
# Import the needed libraries
#------------------------------
import pandas as pd
import numpy as np
import csv, os, sys, re
import logging
import argparse
import gzip
logging.basicConfig(level=logging.INFO,
format='%(asctime)s : %(levelname)s : %(message)s',
datefmt='%H:%M:%S')
logger = logging.getLogger(__name__)
'''
this file contains a VCF class to help parse VCF files
can be used by sourcing this file in python:
exec(open("path_to_script/Process_VCF.py").read())
then run:
vcf = VCF("path_to_VCF_file")
vcf.configure_vcf()
vcf.parseINFO()
vcf.parseEncoding()
vcf.fullVCF()
If you have edited a vcf and want to print the new vcf in the correct vcf format,
then run the following.
vcf.writeVCF(output = <outout path file name>)
'''
def unique(list_obj):
unique_list = []
for i in list_obj:
if i not in unique_list:
unique_list.append(i)
return unique_list
class VCF:
'''
Define the VCF object
'''
def __init__(self, vcf_path):
'''
Create properties for the new object and assign values to them
Parse the arguments given to the script
should include: The VCF file of interest
HapMap file destination
Output file location
'''
#***********************************
# create and assign the attributes
#***********************************
self.vcf_path = vcf_path
#------------------------------------------
# Read in the VCF file as a pandas dataframe and store it in the object
#------------------------------------------
logger.info("Reading VCF File")
self.vcf = pd.read_csv(self.vcf_path,
sep='\t', low_memory=False, comment='#', header =None, index_col = False,
names=["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "ENCODING"])
#********************************************
# Additional methods for the Adaboost Object
#********************************************
def configure_vcf(self):
'''
Function to read in and configure the VCF files
Configure the VCF and separate the lines into a dictionary by chromosome
Return the VCF header and dictionary
'''
logger.info("Processing the VCF")
vcf_header = []
vcf_dic = {}
# Check if the file was gunzipped and treat it appropriately
if self.vcf_path.endswith('.gz'):
vcf = gzip.open( self.vcf_path, mode='rt' )
else:
vcf = open( self.vcf_path, "r" )
for line in csv.reader(vcf, delimiter = "\t"):
# check to see if header, is so append the line to header
if line[0][0] == "#":
vcf_header.append(line)
continue
# else append the line to the dictionary based on the chromosome
chromosome = line[0]
position = line[1]
REF = line[3]
ALT = line[4]
new_key = str(chromosome) + ":" + str(position) + ":" + str(REF) + ":" + str(ALT)
if new_key in vcf_dic: #vcf_dic.has_key(new_key):
logger.warning("Two varinats with the same ID were found: \n\t{} \n\tDuplicates could be present".format(new_key))
vcf_dic[new_key] = line
vcf.close()
# Add the vcf header and body to the new object
self.vcf_header = vcf_header
self.vcf_body = vcf_dic
return( self )
def parseINFO(self):
'''
Parse the vcf INFO Column
Load in the data VCF then convert INFO column into pandas dataframe
'''
logger.info("Processing the VCF INFO")
## subset the data to get the get 'Chr', 'Pos','REF','ALT'
df_vcf_subset = self.vcf[['CHROM', 'POS','REF','ALT']]
#---------------------------------------------------------------------
# Load in the data VCF and convert info column into pandas dataframe
#---------------------------------------------------------------------
## Read in the data header of the vcf file to get info column ID's
## Separate the info column of the vcf for each variant
## create a pandas dataframe for the information column
# lists to hold the ID's and the values
info_id = []
all_info = []
# Get the ID's from the header
for i in self.vcf_header:
if i[0][0:11] == "##INFO=<ID=":
info_id.append(str(i[0].split(",")[0][11:]))
# print(info_id)
# Iterate through each variant
for i in self.vcf_body:
info_num = [None]*len(info_id)
## split the info section
info = self.vcf_body[i][7].split(";")
if "" in info:
info.remove("")
for value in info:
## pull out the ID and value 'ID=Value'
temp = value.split("=")
## If the ID has no value (given by IndexError), make binary indicator, 1=pressent
try:
info_num[info_id.index(temp[0])] = temp[1]
except IndexError:
info_num[info_id.index(temp[0])] = 1
all_info.append(info_num)
df_info = pd.DataFrame(data = all_info)
df_info.columns = info_id
# print(df_info.head())
self.info = df_info
return self
def parseEncoding(self):
'''
Parse the ENCODING column in the VCF
'''
logger.info("Parsing the encoding.")
# Get all possible names
FORMAT_row_lists = list(self.vcf.FORMAT.str.split(":"))
total = [j for i in FORMAT_row_lists for j in i]
column_names = unique(total)
# Parse the encodings
encoding = self.vcf.ENCODING.str.split(":")
# go over each row and combine the FORMAT with the ENCODING
all_rows = []
for i in range(len(FORMAT_row_lists)):
# make the dictionary and append to list
a = dict(zip(FORMAT_row_lists[i], encoding[i]))
all_rows.append(a)
# convert the list dictionaries into a dataframe
encoding_df = pd.DataFrame(all_rows)
self.encoding = encoding_df
return self
def fullVCF(self):
'''
Adding the parsed info columnn
'''
logger.info("Adding the parsed info and encoding sections.")
# check
# make sure each variant has its associated info
# checkDF(self.vcf, self.info)
# combine the two data frames
self.vcf_full = | pd.concat([self.vcf, self.info], axis=1) | pandas.concat |
#%%
#importing...
import yfinance as yf
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
from datetime import datetime as dt
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
Scaler = MinMaxScaler(feature_range=(0,1))
from sklearn.linear_model import LinearRegression
#imports for model
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
from sklearn.model_selection import train_test_split
import math
from sklearn.metrics import mean_squared_error,accuracy_score
import sys
#sys.path.append('../DLpart/')
#from PredictStock import Technicals
import datetime
class LSTMPrediction:
def __init__(self,symbol,look_back):
self.symbol = symbol
self.timeframe = look_back
def fetchFromYahoo(self):
yobj = yf.Ticker(self.symbol)
tickerDict = yobj.info
#print(yobj.info.keys())
df = yobj.history(period=self.timeframe)
df = df.drop(['Stock Splits','Dividends'],axis=1)
df.index = pd.to_datetime(df.index)
#print('\n'+tickerDict['longBusinessSummary'])
print(df.tail())
plt.plot(df['Close'])
return df,tickerDict
def get_train_test_dataset(self,df,training_size=0.70,testing_size=0.30):
try:
print('this will return a training and test data')
print('\n'+'Recent Data' + '\n',df.tail())
print('MEAN CLOSE: ',df['Close'].mean())
print('MAX CLOSE: ',df['Close'].max())
print('MIN CLOSE: ',df['Close'].min())
close_price = df.reset_index()['Close']
close_price = Scaler.fit_transform(np.array(close_price).reshape(-1,1))
train_size = int(len(close_price)*training_size)
test_size = int(len(close_price*testing_size))
train_data = close_price[0:train_size,:]
test_data = close_price[train_size:len(close_price),:1]
return train_data,test_data
except ValueError:
print('Try a different Scrip')
def prepare_data_for_LSTM_krish(self,dataset,timestep=1):
dataX, dataY = [], []
for i in range(len(dataset)- timestep-1):
record = dataset[i:(i+timestep),0]
dataX.append(record)
dataY.append(dataset[i + timestep, 0])
return np.array(dataX), np.array(dataY)
def prepare_data_for_LSTM_kaggle(self,dataset):
dataX = []
dataY = []
for i in range(60, len(dataset)):
dataX.append(dataset[i-60:i, 0])
dataY.append(dataset[i, 0])
if i<=61 :
print(dataX)
print(dataY)
print()
dataX, dataY = np.array(dataX), np.array(dataY)
return dataX, dataY
def reshape_for_LSTM(self,train_data, test_data):
train_data = train_data.reshape(train_data.shape[0],train_data.shape[1],1)
test_data = test_data.reshape(test_data.shape[0],test_data.shape[1],1)
return train_data, test_data
def create_LSTM_model(self,lstm_layers_after_main=0,lstm_units=32,shape=(),loss='mean_squared_error',optimizer='adam'):
dropout = 0.0
model = Sequential()
model.add(LSTM(lstm_units,return_sequences=True,input_shape=shape))
if lstm_layers_after_main > 2 and lstm_layers_after_main < 5:
dropout = 0.4
elif lstm_layers_after_main <= 2:
dropout = 0.1
for i in range(lstm_layers_after_main):
model.add(LSTM(lstm_units,return_sequences=True))
if i % 2 == 0:
continue
model.add(Dropout(dropout))
model.add(LSTM(lstm_units))
model.add(Dense(1))
print('Dropping out ' + str(dropout*100) + '%')
model.summary()
model.compile(loss=loss,optimizer=optimizer)
return model
class LinearRegPrediction:
def get_preds_lin_reg(self, df, target_col='Close'):
regressor = LinearRegression()
x = df.drop(target_col, axis=1)
y = df[target_col]
xtrain, xtest, ytrain, ytest = train_test_split(x,y,test_size=0.1, random_state=0)
regressor.fit(xtrain, ytrain)
y_pred = regressor.predict(xtest)
ytest = np.array(ytest).reshape(-1,1)
y_pred = np.array(y_pred).reshape(-1,1)
print(regressor.score(ytest,y_pred))
#pred_min = min(y_pred)
#print(pred_min)
valid = pd.DataFrame()
valid['Valid'] = ytest
valid['Prediction'] = y_pred
print('Standard Deviation: ',np.std(y_pred))
print('RMSE: ' , np.sqrt(mean_squared_error(ytest,y_pred)))
class Technicals:
def __init__(self,symbol):
self.symbol = symbol
def EMA(self,timeframe=9,on_field='Close',plot=False, period = "1y", interval = "1d"):
df = yf.Ticker(self.symbol).history(period=period, interval=interval)
df = df.drop(['Stock Splits','Dividends'],axis=1)
df.index = pd.to_datetime(df.index)
EMA = df[on_field].ewm(span=timeframe, adjust=False).mean()
df_new = df[[on_field]]
df_new.reset_index(level=0, inplace=True)
df_new.columns=['ds','y']
if plot == True:
plt.figure(figsize=(16,8))
plt.plot(df_new.ds, df_new.y, label='price')
plt.plot(df_new.ds, EMA, label='EMA line',color='red')
plt.show()
#print('Latest EMA on '+on_field+': ',EMA[len(EMA)-1],'\n')
#return EMA
return EMA[len(EMA)-1]
def MACD(self,on_field='Close',plot=False):
df = yf.Ticker(self.symbol).history(period="1y")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df.index = pd.to_datetime(df.index)
df_new = df[[on_field]]
df_new.reset_index(level=0, inplace=True)
df_new.columns=['ds','y']
#df_new.head()
EMA12 = df_new.y.ewm(span=12, adjust=False).mean()
EMA26 = df_new.y.ewm(span=26, adjust=False).mean()
MACD = EMA12-EMA26
EMA9 = MACD.ewm(span=9, adjust=False).mean()
#plt.plot(df_new.ds, df_new.y, label='price')
if plot == True:
plt.figure(figsize=(16,8))
plt.plot(df_new.ds, MACD, label=self.symbol+' MACD', color='blue')
plt.plot(df_new.ds, EMA9, label=self.symbol+' Signal Line', color='red')
plt.legend(loc='upper left')
plt.show()
#print('\n')
#print(EMA9[len(EMA9)-1], MACD[len(MACD)-1])
if MACD[len(MACD)-1] > MACD[len(MACD)-2]:
return True
else:
return False
# if MACD[len(MACD)-1]-EMA9[len(EMA9)-1] <= 4 and MACD[len(MACD)-1]-EMA9[len(EMA9)-1] >= 0:
# print('ALERT: MACD crossover about to occur, Sell side')
# elif MACD[len(MACD)-1]-EMA9[len(EMA9)-1] >= -4 and MACD[len(MACD)-1]-EMA9[len(EMA9)-1] <= 0:
# print('ALERT: MACD crossover about to occur, Buy side')
# else:
# print('No MACD crossovers')
#return EMA9[len(EMA9)-1], MACD[len(MACD)-1] #latest value of EMA9 line and MACD value
def RSI_backUpCode(self, period = 14):
# If the RSI value is over 70, the security is considered overbought, if the value is lower than 30,
# it is considered to be oversold
# Using a conservative approach, sell when the RSI value intersects the overbought line
# buy when the value intersects the oversold line (for blue chip stocks)
yobj = yf.Ticker(self.symbol)
df = yobj.history(period="1y")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df_index = pd.to_datetime(df.index)
change = []
gain = []
loss = []
AvgGain = []
AvgLoss = []
RS = []
RSI = []
df_new = pd.DataFrame(df['Close'], index=df.index)
change.insert(0,0)
#change calc
for i in range(1,len(df_new)):
diff = df_new.Close[i] - df_new.Close[i-1]
change.append(diff)
df_new['Change'] = change
#Gain and loss
for i in range(len(df_new)):
if df_new.Change[i] > 0:
gain.append(df_new.Change[i])
loss.append(0)
elif df_new.Change[i] < 0:
loss.append(abs(df_new.Change[i]))
gain.append(0)
else:
gain.append(0)
loss.append(0)
df_new['Gain'] = gain
df_new['Loss'] = loss
#average gain/loss
averageSum_forgain = 0
averageSum_forloss = 0
averageGain = 0
averageLoss = 0
count = 1
for i in range(0,len(df_new)):
averageSum_forgain = averageSum_forgain + df_new.Gain[i]
averageGain = averageSum_forgain/count
AvgGain.insert(i,round(averageGain,4))
averageSum_forloss = averageSum_forloss + df_new.Loss[i]
averageLoss = averageSum_forloss/count
AvgLoss.insert(i,round(averageLoss,4))
count+=1
if averageGain == 0 or averageLoss == 0:
RS.append(0.0)
else:
RS.append(averageGain/averageLoss)
df_new['AvgGain'] = AvgGain
df_new['AvgLoss'] = AvgLoss
df_new['RS'] = RS
rsi = 0
for i in range(0,len(df_new)):
rsi = 100 - 100/(1+df_new.RS[i])
RSI.append(round(rsi,2))
df_new['RSI'] = RSI
plt.figure(figsize=(16,8))
plt.plot(df_index[len(df_new)-period:len(df_new)],df_new.iloc[len(df_new)-period:len(df_new),-1], label='RSI value')
plt.legend(loc='upper left')
plt.show()
print('\nCurrent RSI value: ' , df_new['RSI'][-1])
Latest_RSI_value = float(df_new['RSI'][-1])
return df_new, Latest_RSI_value
def RSI(self,period = 14, plot = False):
df = yf.Ticker(self.symbol).history(period="1y")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df_index = pd.to_datetime(df.index)
change = []
gain = []
loss = []
AvgGain = []
AvgLoss = []
RS = []
RSI = []
df_new = pd.DataFrame(df['Close'], index=df.index)
change.insert(0,0)
#change calc
for i in range(1,len(df_new)):
diff = df_new.Close[i] - df_new.Close[i-1]
change.append(diff)
df_new['Change'] = change
#Gain and loss
for i in range(len(df_new)):
if df_new.Change[i] > 0:
gain.append(df_new.Change[i])
loss.append(0)
elif df_new.Change[i] < 0:
loss.append(abs(df_new.Change[i]))
gain.append(0)
else:
gain.append(0)
loss.append(0)
df_new['Gain'] = gain
df_new['Loss'] = loss
#average gain/loss
averageSum_forgain = 0
averageSum_forloss = 0
averageGain = 0
averageLoss = 0
count = 1
for i in range(0,len(df_new)):
averageSum_forgain = averageSum_forgain + df_new.Gain[i]
averageGain = averageSum_forgain/count
AvgGain.insert(i,averageGain)
averageSum_forloss = averageSum_forloss + df_new.Loss[i]
averageLoss = averageSum_forloss/count
AvgLoss.insert(i,averageLoss)
count+=1
if averageGain == 0 or averageLoss == 0:
RS.append(0.0)
else:
RS.append(averageGain/averageLoss)
df_new['AvgGain'] = AvgGain
df_new['AvgLoss'] = AvgLoss
df_new['RS'] = RS
rsi = 0
for i in range(len(df)-14,len(df)):
rsi = 100 - 100/(1+df_new.RS[i])
RSI.append(round(rsi,2))
#df_new['RSI'] = RSI
if plot == True:
plt.figure(figsize=(16,8))
plt.plot(df_index[len(df_new)-period:len(df_new)],RSI, label='RSI value')
plt.legend(loc='upper left')
plt.show()
print('\nCurrent RSI value: ' , RSI[len(RSI)-1])
Latest_RSI_value = RSI[-1]
Previous_day_rsi_value = RSI[-2]
if (Previous_day_rsi_value < Latest_RSI_value) and (Latest_RSI_value >= 40) and (Latest_RSI_value <= 60):
return True
else:
return False
#return df_new, RSI
#return RSI
#return Latest_RSI_value
def BollingerBands(self, degree_of_freedom = 20, period = 20, on_field = 'Close'):
yobj = yf.Ticker(self.symbol)
df = yobj.history(period="1mo")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df_index = pd.to_datetime(df.index)
#print(df[on_field].rolling(window = period).sum()/period)
#SMA calculated
MA = df[on_field].rolling(window = period).sum()/period
typical_price = []
#printing SMA
#printing BOLU
BOLU = []
BOLD = []
for i in range(len(df)-period,len(df)):
#typical price = (high+low+close)/3
typical_price.append((df.iloc[i,1] + df.iloc[i,2] + df.iloc[i,3]) / 3)
typical_price = pd.Series(typical_price)
for i in range(len(typical_price)):
std = 2*( math.sqrt( math.pow(i-typical_price.mean(),2) / len(typical_price) ) )
BOLU.append(typical_price[i] + std)
BOLD.append(typical_price[i] - std)
# BOLU = pd.Series(BOLU)
# BOLD = pd.Series(BOLD)
print("Middle value: " + str(MA.iloc[-1]))
print("Upper Band: " + str(BOLU[-1]))
print("Lower Band: " + str(BOLD[-1]))
#general analysis
class StockListAnalysis:
def __init__(self):
self.niftyColumns = ['SYMBOL','OPEN','HIGH','LOW','PREVCLOSE','LTP','TODAYS_CHANGE',
'CHANGE_%','VOLUME','VALUE','52WH','52WL','1Y_CHANGE%','1M_CHANGE%']
self.niftySectorColumns = ['INDEX','CURRENT','%CHANGE','OPEN','HIGH','LOW','PREVCLOSE','PREVDAY','1W_CLOSE','1M_CLOSE','1Y_CLOSE',
'52WH','52WL','1Y_CHANGE%','1M_CHANGE%']
try:
self.nifty_100_data = pd.read_csv('../PreFedIndexData/MW-NIFTY-100-'+datetime.datetime.strftime(datetime.datetime.today(),"%d-%b-%Y")+'.csv', names=self.niftyColumns,header=0)
self.nifty_sector_data = pd.read_csv('../PreFedIndexData/MW-All-Indices-'+datetime.datetime.strftime(datetime.datetime.today(),"%d-%b-%Y")+'.csv', names=self.niftySectorColumns, header=0)
except FileNotFoundError:
self.nifty_100_data = | pd.read_csv('PreFedIndexData/MW-NIFTY-100-'+'12-Jun-2021'+'.csv', names=self.niftyColumns,header=0) | pandas.read_csv |
import pandas as pd
from scripts.python.pheno.datasets.filter import filter_pheno, get_passed_fields
from scripts.python.pheno.datasets.features import get_column_name, get_default_statuses_ids, get_status_dict, get_default_statuses, get_sex_dict
from scripts.python.preprocessing.serialization.routines.pheno_betas_checking import get_pheno_betas_with_common_subjects
from scripts.python.routines.betas import betas_drop_na
import plotly.graph_objects as go
from scripts.python.routines.manifest import get_manifest
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.histogram import add_histogram_trace
from scripts.python.routines.plot.layout import add_layout
import json
from pathlib import Path
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = | pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset') | pandas.read_excel |
import pandas as pd
import numpy as np
from utils.cross_val import get_cv_results
import matplotlib.pyplot as plt
from utils.nemenyi import nemenyi, nemenyi_unrolled_plot
plot_pars = {"size": (5, 2.5),
"font_scale": 0.7,
"w": 0.3,
"h": 0.2,
"b": 0.2}
# ----------------------------------------------------------------------------------
# -------------------------------- Fourier Smoothing -------------------------------
# ----------------------------------------------------------------------------------
# variable: MAPE, independent var: n harmonics, groups: time series
results = np.load('data/cv_res_fourier_144.npy', allow_pickle=True).item()
k_cv = len(results[list(results.keys())[0]])
resall = pd.DataFrame()
for k in results.keys():
results_k = get_cv_results(results[k])
results_k['series'] = k
resall = pd.concat([resall, pd.DataFrame(results_k)], axis=0)
# rename columns for plot
resall = resall[['mape_miso', 'mape_mimo', 'mape']]
resall.columns = ['miso', 'mimo', 'mbt']
fig, ax = nemenyi_unrolled_plot(resall, 'rank [-]', 'n freq [-]', k_cv=k_cv, rot=60, **plot_pars)
plt.title('MAPE')
plt.savefig('figs/stats_fourier.pdf')
plt.show()
# ----------------------------------------------------------------------------------
# --------------------------------- Hierarchical -----------------------------------
# ----------------------------------------------------------------------------------
# variable: MAPE, independent var: group level, groups: first 3 steps ahead
results = np.load('data/hierarchical_scores.npy', allow_pickle=True)
k_cv = len(results)
results = get_cv_results(results)
sa_filter = np.arange(3).astype(float)
group_filter = ['bu', 'rec', 'mgb']
score_filter = 'mape'
res = pd.DataFrame(columns=group_filter)
for k, v in results.items():
if k not in group_filter:
continue
v = v[v['sa'].isin(sa_filter)]
v = v[score_filter]
res[k] = v
# rename columns for plot
res.columns = ['bu' ,'rec', 'mbt']
fig, ax = nemenyi_unrolled_plot(res, 'rank [-]', 'aggregation group [-]', k_cv=k_cv, **plot_pars)
plt.title('MAPE')
plt.savefig('figs/stats_hierarchical.pdf')
plt.show()
# ----------------------------------------------------------------------------------
# --------------------------------- Quantiles --------------------------------------
# ----------------------------------------------------------------------------------
# variable: QS, independent var: quantile, groups: 24 steps ahead
results_all = np.load('data/cv_res_quantiles_all_squared_partial.npy', allow_pickle=True).item()
refit = get_cv_results(results_all['refit']['all'])
squared_refit = get_cv_results(results_all['suqared_refit']['all'])
results_all = get_cv_results(results_all['no_refit']['all'])
results_all['mbt'] = results_all['mgb']
del results_all['mgb']
results_all['mbt refit'] = refit['mgb']
results_all['mbt lin-quad refit'] = squared_refit['mgb']
n_quantiles = 11
alphas = np.linspace(1/n_quantiles, 1-1/n_quantiles, n_quantiles)
group_filter = ['mimo', 'mbt refit', 'mbt lin-quad refit']
score_filter = 'skill'
res = pd.DataFrame()
for k in group_filter:
res_k = pd.DataFrame(results_all[k][score_filter], columns=np.arange(24), index=alphas)
res_k.index.name = 'quantile'
res_k.index = np.round(res_k.index, 2)
res_k = res_k.reset_index().melt('quantile').set_index('quantile')[['value']]
res_k.columns = [k]
res = pd.concat([res, res_k], axis=1)
fig, ax = nemenyi_unrolled_plot(res, 'rank [-]', r'$\alpha$ [-]', k_cv=k_cv, **plot_pars)
plt.title('QS')
plt.savefig('figs/stats_quantile_QS.pdf')
plt.show()
# variable: reliability distance, independent var: quantile, groups: 24 steps ahead
group_filter = ['mimo', 'mbt refit', 'mbt lin-quad refit']
score_filter = 'reliability'
res = pd.DataFrame()
for k in group_filter:
res_k = pd.DataFrame(results_all[k][score_filter], columns=np.arange(24), index=alphas)
# retrieve reliability distance from perfect reliability
res_k = (res_k - alphas.reshape(-1, 1)).abs()
res_k.index.name = 'quantile'
res_k.index = np.round(res_k.index, 2)
res_k = res_k.reset_index().melt('quantile').set_index('quantile')[['value']]
res_k.columns = [k]
res = | pd.concat([res, res_k], axis=1) | pandas.concat |
#! /usr/bin/env python
"""
Script to observe document counts by language and content type
"""
import os
import json
from argparse import ArgumentParser
from collections import Counter, defaultdict
from multiprocessing import Pool
from typing import Generator
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def count_docs(path: str, key: str) -> tuple[str, str]:
"""Retrieves language and content type for each file"""
print(f"Extracting data from {path}")
with open(path) as file:
data = json.load(file)
if key == "language":
language = data.get("site_language")
if key == "domain":
language = path.split("/")[-3]
content_type = data.get("content_type").capitalize()
return language, content_type
class DocumentCounter:
def __init__(self) -> None:
# Distribution of document counts by language and content type
self.doc_content_distribution: defaultdict[str, Counter[str]] = defaultdict(
Counter
)
def count(self, language: str, content_type: str) -> None:
"""Counts each document by language or domain"""
print(f"Counting {language}")
self.doc_content_distribution[language][content_type] += 1
def histogram(self) -> None:
"""Creates histogram from document counts distribution by language or domain"""
df = | pd.DataFrame.from_dict(self.doc_content_distribution, orient="index") | pandas.DataFrame.from_dict |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
from pandas.core import common as com
from pandas.util import testing as tm
class TestCaching:
def test_slice_consolidate_invalidate_item_cache(self):
# this is chained assignment, but will 'work'
with option_context('chained_assignment', None):
# #3970
df = DataFrame({"aa": np.arange(5), "bb": [2.2] * 5})
# Creates a second float block
df["cc"] = 0.0
# caches a reference to the 'bb' series
df["bb"]
# repr machinery triggers consolidation
repr(df)
# Assignment to wrong series
df['bb'].iloc[0] = 0.17
df._clear_item_cache()
tm.assert_almost_equal(df['bb'][0], 0.17)
def test_setitem_cache_updating(self):
# GH 5424
cont = ['one', 'two', 'three', 'four', 'five', 'six', 'seven']
for do_ref in [False, False]:
df = DataFrame({'a': cont,
"b": cont[3:] + cont[:3],
'c': np.arange(7)})
# ref the cache
if do_ref:
df.loc[0, "c"]
# set it
df.loc[7, 'c'] = 1
assert df.loc[0, 'c'] == 0.0
assert df.loc[7, 'c'] == 1.0
# GH 7084
# not updating cache on series setting with slices
expected = DataFrame({'A': [600, 600, 600]},
index=date_range('5/7/2014', '5/9/2014'))
out = DataFrame({'A': [0, 0, 0]},
index=date_range('5/7/2014', '5/9/2014'))
df = DataFrame({'C': ['A', 'A', 'A'], 'D': [100, 200, 300]})
# loop through df to update out
six = | Timestamp('5/7/2014') | pandas.Timestamp |
# !/usr/bin/env python
#
# classes.py
"""
Main classes for the worklist parser.
"""
#
# Copyright © 2020-2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# stdlib
from pprint import pformat
from typing import Any, Dict, List, Optional, Sequence, Union
from uuid import UUID
# 3rd party
import attr
import lxml.etree # type: ignore
import pandas # type: ignore
from attr_utils.docstrings import add_attrs_doc
from attr_utils.serialise import serde
from domdf_python_tools.bases import Dictable
from domdf_python_tools.doctools import prettify_docstrings
# this package
from mh_utils.utils import element_to_bool, strip_string
from mh_utils.worklist_parser.columns import Column, columns
from mh_utils.worklist_parser.enums import AttributeType
from mh_utils.worklist_parser.parser import parse_params, parse_sample_info
from mh_utils.xml import XMLFileMixin
__all__ = ["JobData", "Worklist", "Checksum", "Macro", "Attribute"]
pandas.DataFrame.__module__ = "pandas"
class JobData(Dictable):
"""
Represents an entry in the worklist.
:param id: The ID of the job.
:param job_type: The type of job. TODO: enum of values
:param run_status: The status of the analysis. TODO: enum of values
:param sample_info: Optional ``key: value`` mapping of information about the sample.
"""
def __init__(
self,
id: Union[str, UUID], # noqa: A002 # pylint: disable=redefined-builtin
job_type: int,
run_status: int,
sample_info: Optional[dict] = None,
):
super().__init__()
if isinstance(id, UUID):
self.id = id
else:
self.id = UUID(str(id))
self.job_type = int(job_type)
self.run_status = int(run_status)
if sample_info:
self.sample_info = sample_info
else:
self.sample_info = {}
__slots__ = ["id", "job_type", "run_status", "sample_info"]
# dtypes
# 8: Str
# Inj Vol, Dilution and Equilib Time (min) 5
@classmethod
def from_xml(
cls,
element: lxml.objectify.ObjectifiedElement,
user_columns: Optional[Dict[str, Column]] = None,
) -> "JobData":
"""
Construct a :class:`~.JobData` object from an XML element.
:param element: The XML element to parse the data from
:param user_columns: Optional mapping of user column labels to
:class:`~mh_utils.worklist_parser.columns.Column` objects.
"""
return cls(
id=element.ID,
job_type=element.JobType,
run_status=element.RunStatus,
sample_info=parse_sample_info(element.SampleInfo, user_columns),
)
@property
def __dict__(self):
data = {}
for key in self.__slots__:
if key == "id":
data[key] = str(self.id)
else:
data[key] = getattr(self, key)
return data
def __repr__(self) -> str:
values = ", ".join(f"{key}={val!r}" for key, val in iter(self) if key != "sample_info")
return f"{self.__class__.__name__}({values})"
@prettify_docstrings
class Worklist(XMLFileMixin, Dictable):
"""
Class that represents an Agilent MassHunter worklist.
:param version: WorklistInfo version number
:param locked_run_mode: Flag to indicate whether the data was acquired in locked mode. Yes = -1. No = 0.
:param instrument_name: The name of the instrument.
:param params: Mapping of parameter names to values. TODO: Check
:param user_columns: Mapping of user columns to ??? TODO
:param jobs:
:param checksum: The checksum of the worklist file. The format is unknown.
"""
def __init__(
self,
version: float,
locked_run_mode: bool,
instrument_name: str,
params: dict,
user_columns: dict,
jobs: Sequence[JobData],
checksum: "Checksum",
):
super().__init__()
self.version = float(version)
self.locked_run_mode = bool(locked_run_mode)
self.instrument_name = str(instrument_name)
self.params = params
self.user_columns = user_columns
self.jobs = list(jobs)
self.checksum = checksum
__slots__ = ["version", "user_columns", "jobs", "checksum", "locked_run_mode", "instrument_name", "params"]
@property
def __dict__(self):
data = {}
for key in self.__slots__:
data[key] = getattr(self, key)
return data
@classmethod
def from_xml(cls, element: lxml.objectify.ObjectifiedElement) -> "Worklist":
"""
Construct a :class:`~.Worklist` object from an XML element.
"""
version = float(element.Version)
checksum = Checksum.from_xml(element.Checksum)
WorklistInfo = element.WorklistInfo
if WorklistInfo.LockedRunMode == -1:
locked_run_mode = True
elif WorklistInfo.LockedRunMode == 0:
locked_run_mode = False
else:
raise ValueError("Unknown value for 'LockedRunMode'")
instrument_name = str(WorklistInfo.Instrument)
params = parse_params(WorklistInfo.Params)
attributes_list: List[Attribute] = []
jobs_list: List[JobData] = []
user_columns: Dict[str, Column] = {}
for attribute in WorklistInfo.AttributeInformation.iterchildren("Attributes"):
attribute = Attribute.from_xml(attribute)
attributes_list.append(attribute)
if attribute.attribute_type != AttributeType.SystemDefined:
column = Column.from_attribute(attribute)
user_columns[column.name] = column
for job in WorklistInfo.JobDataList.iterchildren("JobData"):
jobs_list.append(JobData.from_xml(job, user_columns))
return cls(
version=version,
locked_run_mode=locked_run_mode,
instrument_name=instrument_name,
params=params,
user_columns=user_columns,
jobs=jobs_list,
checksum=checksum,
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({pformat(dict(self))})"
def as_dataframe(self) -> pandas.DataFrame:
"""
Returns the :class:`~.Worklist` as a :class:`pandas.DataFrame`.
:rtype:
.. clearpage::
"""
headers = [col for col in columns] + [col for col in self.user_columns]
data = []
for job in self.jobs:
row = []
for header_label in headers:
row.append(job.sample_info[header_label])
data.append(row)
# TODO: Sort columns by "reorder_id"
return | pandas.DataFrame(data, columns=headers) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import scipy
import scipy.stats
import pypeliner
import remixt.seqdataio
import remixt.config
def infer_snp_genotype(data, base_call_error=0.005, call_threshold=0.9):
""" Infer snp genotype based on binomial PMF
Args:
data (pandas.DataFrame): input snp data
KwArgs:
base_call_error (float): per base sequencing error
call_threshold (float): posterior threshold for calling a genotype
Input dataframe should have columns 'ref_count', 'alt_count'
The operation is in-place, and the input dataframe after the call will
have 'AA', 'AB', 'BB' columns, in addition to others.
"""
data['total_count'] = data['ref_count'] + data['alt_count']
data['likelihood_AA'] = scipy.stats.binom.pmf(data['alt_count'], data['total_count'], base_call_error)
data['likelihood_AB'] = scipy.stats.binom.pmf(data['alt_count'], data['total_count'], 0.5)
data['likelihood_BB'] = scipy.stats.binom.pmf(data['ref_count'], data['total_count'], base_call_error)
data['evidence'] = data['likelihood_AA'] + data['likelihood_AB'] + data['likelihood_BB']
data['posterior_AA'] = data['likelihood_AA'] / data['evidence']
data['posterior_AB'] = data['likelihood_AB'] / data['evidence']
data['posterior_BB'] = data['likelihood_BB'] / data['evidence']
data['AA'] = (data['posterior_AA'] >= call_threshold) * 1
data['AB'] = (data['posterior_AB'] >= call_threshold) * 1
data['BB'] = (data['posterior_BB'] >= call_threshold) * 1
def read_snp_counts(seqdata_filename, chromosome, num_rows=1000000):
""" Count reads for each SNP from sequence data
Args:
seqdata_filename (str): sequence data filename
chromosome (str): chromosome for which to count reads
KwArgs:
num_rows (int): number of rows per chunk for streaming
Returns:
pandas.DataFrame: read counts per SNP
Returned dataframe has columns 'position', 'ref_count', 'alt_count'
"""
snp_counts = list()
for alleles_chunk in remixt.seqdataio.read_allele_data(seqdata_filename, chromosome, chunksize=num_rows):
if len(alleles_chunk.index) == 0:
snp_counts.append(pd.DataFrame(columns=['position', 'ref_count', 'alt_count'], dtype=int))
continue
snp_counts_chunk = (
alleles_chunk
.groupby(['position', 'is_alt'])
.size()
.unstack(fill_value=0)
.reindex(columns=[0, 1])
.fillna(0)
.astype(int)
.rename(columns=lambda a: {0:'ref_count', 1:'alt_count'}[a])
.reset_index()
)
snp_counts.append(snp_counts_chunk)
snp_counts = pd.concat(snp_counts, ignore_index=True)
if len(snp_counts.index) == 0:
return pd.DataFrame(columns=['position', 'ref_count', 'alt_count']).astype(int)
# Consolodate positions split by chunking
snp_counts = snp_counts.groupby('position').sum().reset_index()
snp_counts.sort_values('position', inplace=True)
return snp_counts
def infer_snp_genotype_from_normal(snp_genotype_filename, seqdata_filename, chromosome, config):
""" Infer SNP genotype from normal sample.
Args:
snp_genotype_filename (str): output snp genotype file
seqdata_filename (str): input sequence data file
chromosome (str): id of chromosome for which haplotype blocks will be inferred
config (dict): relavent shapeit parameters including thousand genomes paths
The output snp genotype file will contain the following columns:
'position': het snp position
'AA': binary indicator for homozygous reference
'AB': binary indicator for heterozygous
'BB': binary indicator for homozygous alternate
"""
sequencing_base_call_error = remixt.config.get_param(config, 'sequencing_base_call_error')
het_snp_call_threshold = remixt.config.get_param(config, 'het_snp_call_threshold')
# Call snps based on reference and alternate read counts from normal
snp_counts_df = read_snp_counts(seqdata_filename, chromosome)
infer_snp_genotype(snp_counts_df, sequencing_base_call_error, het_snp_call_threshold)
snp_counts_df.to_csv(snp_genotype_filename, sep='\t', columns=['position', 'AA', 'AB', 'BB'], index=False)
def infer_snp_genotype_from_tumour(snp_genotype_filename, seqdata_filenames, chromosome, config):
""" Infer SNP genotype from tumour samples.
Args:
snp_genotype_filename (str): output snp genotype file
seqdata_filenames (str): input tumour sequence data files
chromosome (str): id of chromosome for which haplotype blocks will be inferred
config (dict): relavent shapeit parameters including thousand genomes paths
The output snp genotype file will contain the following columns:
'position': het snp position
'AA': binary indicator for homozygous reference
'AB': binary indicator for heterozygous
'BB': binary indicator for homozygous alternate
"""
sequencing_base_call_error = remixt.config.get_param(config, 'sequencing_base_call_error')
homozygous_p_value_threshold = remixt.config.get_param(config, 'homozygous_p_value_threshold')
# Calculate total reference alternate read counts in all tumours
snp_counts_df = pd.DataFrame(columns=['position', 'ref_count', 'alt_count']).astype(int)
for tumour_id, seqdata_filename in seqdata_filenames.items():
snp_counts_df = pd.concat([snp_counts_df, read_snp_counts(seqdata_filename, chromosome)], ignore_index=True)
snp_counts_df = snp_counts_df.groupby('position').sum().reset_index()
snp_counts_df['total_count'] = snp_counts_df['alt_count'] + snp_counts_df['ref_count']
snp_counts_df = snp_counts_df[snp_counts_df['total_count'] > 50]
binom_test_ref = lambda row: scipy.stats.binom_test(
row['ref_count'], row['total_count'],
p=sequencing_base_call_error, alternative='greater')
snp_counts_df['prob_no_A'] = snp_counts_df.apply(binom_test_ref, axis=1)
binom_test_alt = lambda row: scipy.stats.binom_test(
row['alt_count'], row['total_count'],
p=sequencing_base_call_error, alternative='greater')
snp_counts_df['prob_no_B'] = snp_counts_df.apply(binom_test_alt, axis=1)
snp_counts_df['has_A'] = snp_counts_df['prob_no_A'] < homozygous_p_value_threshold
snp_counts_df['has_B'] = snp_counts_df['prob_no_B'] < homozygous_p_value_threshold
snp_counts_df['AA'] = (snp_counts_df['has_A'] & ~snp_counts_df['has_B']) * 1
snp_counts_df['BB'] = (snp_counts_df['has_B'] & ~snp_counts_df['has_A']) * 1
snp_counts_df['AB'] = (snp_counts_df['has_A'] & snp_counts_df['has_B']) * 1
snp_counts_df.to_csv(snp_genotype_filename, sep='\t', columns=['position', 'AA', 'AB', 'BB'], index=False)
def infer_haps(haps_filename, snp_genotype_filename, chromosome, temp_directory, config, ref_data_dir):
""" Infer haplotype blocks for a chromosome using shapeit
Args:
haps_filename (str): output haplotype data file
snp_genotype_filename (str): input snp genotype file
chromosome (str): id of chromosome for which haplotype blocks will be inferred
temp_directory (str): directory in which shapeit temp files will be stored
config (dict): relavent shapeit parameters including thousand genomes paths
ref_data_dir (str): reference dataset directory
The output haps file will contain haplotype blocks for each heterozygous SNP position. The
file will be TSV format with the following columns:
'chromosome': het snp chromosome
'position': het snp position
'allele': binary indicator for reference (0) vs alternate (1) allele
'hap_label': label of the haplotype block
'allele_id': binary indicator of the haplotype allele
"""
def write_null():
with open(haps_filename, 'w') as haps_file:
haps_file.write('chromosome\tposition\tallele\thap_label\tallele_id\n')
accepted_chromosomes = [str(a) for a in range(1, 23)] + ['X']
if str(chromosome) not in accepted_chromosomes:
write_null()
return
# Temporary directory for shapeit files
try:
os.makedirs(temp_directory)
except OSError:
pass
# If we are analyzing male data and this is chromosome X
# then there are no het snps and no haplotypes
if chromosome == 'X' and not remixt.config.get_param(config, 'is_female'):
write_null()
return
# Impute 2 files for thousand genomes data by chromosome
phased_chromosome = chromosome
if chromosome == 'X':
phased_chromosome = remixt.config.get_param(config, 'phased_chromosome_x')
genetic_map_filename = remixt.config.get_filename(config, ref_data_dir, 'genetic_map', chromosome=phased_chromosome)
hap_filename = remixt.config.get_filename(config, ref_data_dir, 'haplotypes', chromosome=phased_chromosome)
legend_filename = remixt.config.get_filename(config, ref_data_dir, 'legend', chromosome=phased_chromosome)
snp_genotype_df = | pd.read_csv(snp_genotype_filename, sep='\t') | pandas.read_csv |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix, eye
from .value_parser import parse_nodeid
def supertypes_of_nodes(
types_to_supertype: List[str],
type_nodes: pd.DataFrame,
type_references: pd.DataFrame,
) -> pd.DataFrame:
# Computes the closure
closure = typing_transitive_reflexive(type_nodes, type_references)
closure = closure.set_index("Trg")
typeindex = pd.Index(types_to_supertype)
closure = closure[closure.index.isin(typeindex)].reset_index()
closure = closure.rename(columns={"Src": "supertype", "Trg": "type"})
return closure
def hierarchical_references_trg_has_modelling_rule(
references: pd.DataFrame, type_references: pd.DataFrame, type_nodes: pd.DataFrame
):
hierarchical_refs = hierarchical_references(
inst_references=references,
type_references=type_references,
type_nodes=type_nodes,
)
has_modelling_rule = has_modelling_rule_references(
inst_references=references,
type_references=type_references,
type_nodes=type_nodes,
)[["Src"]].rename(columns={"Src": "id"})
hierarchical_refs_has_modelling_rule = hierarchical_refs.set_index("Trg").join(
has_modelling_rule.set_index("id"), how="inner"
)
hierarchical_refs_has_modelling_rule = (
hierarchical_refs_has_modelling_rule.reset_index().rename(
columns={"index": "Trg"}
)
)
return hierarchical_refs_has_modelling_rule[["Src", "Trg", "ReferenceType"]].copy()
# Useful for keeping forward refs for methods when instantiating
def hierarchical_references_trg_has_no_modelling_rule(
references: pd.DataFrame, type_references: pd.DataFrame, type_nodes: pd.DataFrame
):
hierarchical_refs = hierarchical_references(
inst_references=references,
type_references=type_references,
type_nodes=type_nodes,
)
has_modelling_rule = has_modelling_rule_references(
inst_references=references,
type_references=type_references,
type_nodes=type_nodes,
)[["Src"]].rename(columns={"Src": "id"})
has_modelling_rule = has_modelling_rule.set_index("id")
hierarchical_refs_has_no_modelling_rule = hierarchical_refs.set_index(
"Trg", drop=False
)
trg_no_modelling_rule = ~hierarchical_refs_has_no_modelling_rule.index.isin(
has_modelling_rule.index
)
hierarchical_refs_has_no_modelling_rule = hierarchical_refs_has_no_modelling_rule[
trg_no_modelling_rule
].copy()
return hierarchical_refs_has_no_modelling_rule[
["Src", "Trg", "ReferenceType"]
].copy()
def find_relatives(
nodes: pd.DataFrame,
nodes_key_col: str,
edges: pd.DataFrame,
relative_type: str,
cutoff: int = None,
keep_paths: bool = False,
) -> pd.DataFrame:
"""
Find the relatives to the provided nodes
The
Parameters:
nodes (pd.DataFrame): The dataframe with nodes to find relatives from.
nodes_key_col (str): The key column to use for indexing in the nodes DataFrame
edges(pd.DataFrame): The edges to follow in the traverse
cutoff (int): Max number of steps to go for the traverse
keep_paths(bool):
Returns:
"""
new_nodes = nodes.copy()
orig_cols = nodes.columns.values.tolist()
new_nodes[0] = nodes[nodes_key_col]
new_nodes["len_path"] = 0
new_nodes = new_nodes.set_index(0, drop=False)
path_cols = [0]
if relative_type.lower().startswith("a"):
col_src = "Trg"
col_trg = "Src"
else:
col_src = "Src"
col_trg = "Trg"
# Set the index on reference column based on relativetype(direction)
edge_join = edges.set_index(col_src)
result = [new_nodes]
i = 1
while (new_nodes.shape[0] > 0) and ((cutoff is None) or (cutoff >= i)):
joined_nodes = new_nodes.join(edge_join, how="inner")
#
new_nodes = joined_nodes[orig_cols + [col_trg] + path_cols].copy()
if len(new_nodes) > 0:
if keep_paths:
new_nodes[i] = new_nodes[col_trg]
path_cols.append(i)
else:
new_nodes[0] = new_nodes[col_trg]
new_nodes["len_path"] = i
new_nodes = new_nodes.set_index(col_trg)
result.append(new_nodes)
i = i + 1
if len(result) > 1:
resconc = pd.concat(result).reset_index()
else:
resconc = result[0]
resconc["end"] = resconc[0]
if keep_paths:
for i in range(1, i - 1):
resconc["end"] = resconc[i].combine_first(resconc["end"])
return resconc
def resolve_id_from_nodeid(nodes: pd.DataFrame, nodeid: str):
nodeid_instance = parse_nodeid(nodeid)
nodes = nodes.set_index("NodeId")
theid = nodes.loc[nodeid_instance, "id"]
return theid
def resolve_ids_from_browsenames(nodes: pd.DataFrame, browsenames: List[str]):
nodes = nodes.set_index("BrowseName")
hst_id = nodes.loc[browsenames, "id"].reset_index(drop=True)
return hst_id
def has_subtype_references(
references: pd.DataFrame, type_nodes: pd.DataFrame, id_col: str
) -> pd.DataFrame:
hst_id = type_nodes.loc[
(type_nodes["NodeClass"] == "UAReferenceType")
& (type_nodes["BrowseName"] == "HasSubtype"),
id_col,
].iloc[0]
subtype = references[references["ReferenceType"] == hst_id].copy()
return subtype
def has_type_definition_references(
references: pd.DataFrame, type_nodes: pd.DataFrame
) -> pd.DataFrame:
htd_id = type_nodes.loc[
(type_nodes["NodeClass"] == "UAReferenceType")
& (type_nodes["BrowseName"] == "HasTypeDefinition"),
"id",
].iloc[0]
htd = references[references["ReferenceType"] == htd_id].copy()
return htd
def has_property_references(
inst_references: pd.DataFrame,
type_references: pd.DataFrame,
type_nodes: pd.DataFrame,
) -> pd.DataFrame:
propref_id = type_nodes.loc[
(type_nodes["NodeClass"] == "UAReferenceType")
& (type_nodes["BrowseName"] == "HasProperty"),
"id",
].iloc[0]
prop_ref = constrain_to_reference_type(
inst_references, type_nodes, type_references, [propref_id]
)
return prop_ref
def has_modelling_rule_references(
inst_references: pd.DataFrame,
type_references: pd.DataFrame,
type_nodes: pd.DataFrame,
) -> pd.DataFrame:
propref_id = type_nodes.loc[
(type_nodes["NodeClass"] == "UAReferenceType")
& (type_nodes["BrowseName"] == "HasModellingRule"),
"id",
].iloc[0]
prop_ref = constrain_to_reference_type(
inst_references, type_nodes, type_references, [propref_id]
)
return prop_ref
def hierarchical_references(
inst_references: pd.DataFrame,
type_references: pd.DataFrame,
type_nodes: pd.DataFrame,
) -> pd.DataFrame:
hierref_id = type_nodes.loc[
(type_nodes["NodeClass"] == "UAReferenceType")
& (type_nodes["BrowseName"] == "HierarchicalReferences"),
"id",
].iloc[0]
prop_ref = constrain_to_reference_type(
inst_references, type_nodes, type_references, [hierref_id]
)
return prop_ref
def fast_transitive_closure(references: pd.DataFrame) -> pd.DataFrame:
# Quickly and memory-efficiently compute the transitive closure of a reference
# Transitive closure: https://en.wikipedia.org/wiki/Transitive_closure
# We do not consider ReferenceType or store underlying paths in this closure
# The references are converted to a matrix where each unique Src/Trg is assigned a column and row
# We have row(Src), col(Trg) == 1 if there is a reference from Src to Trg, 0 otherwise
# A sparse matrix representation is used in order to reduce memory consumption.
# The transitive closure is given by iterative dot products of this matrix with itself until we reach a fixed point
# At the end of each iteration, a cell is converted to 1 if it is above 0 or 0 otherwise.
# This is in order to ensure that the algorithm terminates
assert (
references["Src"] == references["Trg"]
).sum() == 0, "There should be no references r such that r(n,n)"
codes, uniques = pd.factorize(pd.concat([references["Src"], references["Trg"]]))
src_codes = codes[0 : references.shape[0]]
trg_codes = codes[references.shape[0] :]
data = np.ones(references.shape[0])
sparmat = coo_matrix(
(data, (src_codes, trg_codes)),
shape=(uniques.shape[0], uniques.shape[0]),
dtype=bool,
)
sparmat = sparmat + eye(uniques.shape[0])
fixedp = False
while not fixedp:
before = sparmat
sparmat = sparmat.dot(sparmat) > 0
if before.sum(axis=None) == sparmat.sum(axis=None):
fixedp = True
sparmat = sparmat - eye(uniques.shape[0])
sparmat = sparmat.tocoo()
src = sparmat.row
trg = sparmat.col
src_nids = uniques.take(src)
trg_nids = uniques.take(trg)
hierarchy = pd.DataFrame({"Src": src_nids, "Trg": trg_nids})
return hierarchy
def fast_hierarchy_transitive_closure(
inst_references: pd.DataFrame,
constrained_references: pd.DataFrame,
type_nodes: pd.DataFrame,
type_references: pd.DataFrame,
interesting_types: pd.Series,
) -> pd.DataFrame:
# Computes a navigation hierarchy between instances of interesting_types
# The navigation hierarchy is first computed between all types (given references in inst_references)
# Then, it is restricted to only concern instances of interesting_types
constrained_references = constrained_references[["Src", "Trg", "ReferenceType"]]
hierarchy = fast_transitive_closure(constrained_references)
types = type_nodes[type_nodes["NodeClass"].str.endswith("Type")].set_index(
"BrowseName"
)
interesting_ids_types = types.loc[pd.Index(interesting_types), "id"].to_list()
sub_inter_types = subtypes_of_nodes(
interesting_ids_types, type_nodes, type_references
)
inter_subtypes_index = pd.Index(sub_inter_types["subtype"])
htd = has_type_definition_references(inst_references, type_nodes).set_index("Trg")
htd = htd[htd.index.isin(inter_subtypes_index)]
inst_inter_type_index = pd.Index(htd["Src"])
hierarchy = hierarchy.set_index("Src")
hierarchy = hierarchy[hierarchy.index.isin(inst_inter_type_index)].reset_index()
hierarchy = hierarchy.set_index("Trg")
hierarchy = hierarchy[hierarchy.index.isin(inst_inter_type_index)].reset_index()
return hierarchy
def signal_variables(
instance_references: pd.DataFrame,
type_nodes: pd.DataFrame,
type_references: pd.DataFrame,
) -> pd.DataFrame:
reftypes = type_nodes[type_nodes["NodeClass"] == "UAVariableType"]
signalvar_type = reftypes.loc[
reftypes["BrowseName"] == "BaseDataVariableType", "id"
].iloc[0]
signalvar_types = subtypes_of_nodes([signalvar_type], type_nodes, type_references)
signalvar_types_index = pd.Index(signalvar_types["subtype"])
inst_htd = has_type_definition_references(instance_references, type_nodes)
inst_htd = inst_htd.set_index("Trg")
signal_vars = inst_htd[inst_htd.index.isin(signalvar_types_index)]["Src"]
return signal_vars
def property_variables(
instance_references: pd.DataFrame,
type_nodes: pd.DataFrame,
type_references: pd.DataFrame,
) -> pd.DataFrame:
reftypes = type_nodes[type_nodes["NodeClass"] == "UAVariableType"]
propvar_type = reftypes.loc[reftypes["BrowseName"] == "PropertyType", "id"].iloc[0]
propvar_types = subtypes_of_nodes([propvar_type], type_nodes, type_references)
propvar_types_index = pd.Index(propvar_types["subtype"])
inst_htd = has_type_definition_references(instance_references, type_nodes)
inst_htd = inst_htd.set_index("Trg")
prop_vars = inst_htd[inst_htd.index.isin(propvar_types_index)]["Src"]
return prop_vars
def typing_transitive_reflexive(
type_nodes: pd.DataFrame, type_references: pd.DataFrame
) -> pd.DataFrame:
subtype = has_subtype_references(type_references, type_nodes, "id")[["Src", "Trg"]]
# Add transitive closure
subtype_closure = fast_transitive_closure(subtype)
unique_types = pd.concat([type_references["Src"], type_references["Trg"]]).unique()
# Add reflexive closure
subtype_closure = pd.concat(
[subtype_closure, pd.DataFrame({"Src": unique_types, "Trg": unique_types})]
)
return subtype_closure
def subtypes_of_nodes(
types_to_subtype: Union[pd.Series, List[str]],
type_nodes: pd.DataFrame,
type_references: pd.DataFrame,
) -> pd.DataFrame:
# Computes the closure
closure = typing_transitive_reflexive(type_nodes, type_references)
closure = closure.set_index("Src")
typeindex = pd.Index(types_to_subtype)
closure = closure[closure.index.isin(typeindex)].reset_index()
closure = closure.rename(columns={"Trg": "subtype", "Src": "type"})
return closure
def closest_parent(
inst_nodes: pd.DataFrame,
inst_references: pd.DataFrame,
type_nodes: pd.DataFrame,
type_references: pd.DataFrame,
interesting_types: List[str],
uavar_type: str,
) -> pd.DataFrame:
sub_inter_types = subtypes_of_nodes(interesting_types, type_nodes, type_references)
inter_subtypes_index = pd.Index(sub_inter_types["subtype"])
htd = has_type_definition_references(inst_references, type_nodes).set_index("Trg")
htd = htd[htd.index.isin(inter_subtypes_index)]
inst_inter_type_index = pd.Index(htd["Src"])
# Remove incoming edges to objects of desired type
inst_references = inst_references.set_index("Trg")
inst_references = inst_references[
~inst_references.index.isin(inst_inter_type_index)
].reset_index()
signals = signal_variables(inst_references, type_nodes, type_references)
signals_index = | pd.Index(signals) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 5 10:55:13 2021
@author: <NAME>
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LightSource
import numpy as np
import progressbar as pgb
from scipy import ndimage
from scipy.interpolate import RectBivariateSpline
from skimage import exposure, filters
from zipfile import ZipFile, is_zipfile
from copy import deepcopy
import stecto as st
try:
#delete the accesor to avoid warning from pandas
del pd.DataFrame.imlog
except AttributeError:
pass
@pd.api.extensions.register_dataframe_accessor('imlog')
class BHImage:
def __init__(self, df, *args, **kwargs):
self.data = df
@classmethod
# @staticmethod
def from_file(cls, fullpath, *args, **kwargs):
filename = kwargs.pop('filename', None)
image_type = kwargs.pop('image_type', 'single_array')
if is_zipfile(fullpath):
df_image = cls._from_zip_file(fullpath, filename, image_type, **kwargs)
else:
if image_type == 'rgb':
df_image = cls._read_rgb_file(fullpath, **kwargs)
elif image_type == 'single_array':
df_image = cls._read_single_array_file(fullpath, **kwargs)
return df_image.copy()
@classmethod
def _from_zip_file(cls, fullpath, filename, image_type, **kwargs):
with ZipFile(fullpath) as myzip:
check_file_lst = [filename in item for item in myzip.namelist()]
if any(check_file_lst):
file_found_index = check_file_lst.index(True)
log_file = myzip.namelist()[file_found_index]
else:
raise ValueError(f'Could not find log named {filename} inside zip file')
with myzip.open(log_file, 'r') as log:
if image_type == 'rgb':
df_image = cls._read_rgb_file(log, **kwargs)
elif image_type == 'single_array':
df_image = cls._read_single_array_file(log, **kwargs)
return df_image
@staticmethod
def _read_single_array_file(path, **kwargs):
print('\nloading single array log from file!!!')
start_md = kwargs.pop('start_md', -np.inf)
end_md = kwargs.pop('end_md', np.inf)
df_image = pd.read_csv(path, index_col=0, **kwargs)
df_image.index.name = 'depth'
#name columns
num_cols = len(df_image.columns)
df_image.columns=np.linspace(0, 360, num_cols)
df_image = df_image.imlog.crop_depth(start_md=start_md, end_md=end_md)
return df_image
@staticmethod
def _read_rgb_file(file, **kwargs):
print('\nloading rgb log from file. This might take a while!!!')
skiprows = kwargs.pop('skiprows', None)
start_md = kwargs.pop('start_md', -np.inf)
end_md = kwargs.pop('end_md', np.inf)
all_values_lst = []
depth_lst=[]
for n, line in pgb.progressbar(enumerate(file)):
line = line.decode()
if n in [0, skiprows]:
continue
lst = line.split(',')
depth_val = float(lst.pop(0))
if depth_val<start_md:
continue
elif depth_val>end_md:
break
depth_lst.append(depth_val)
line_lst=[]
for rgb_trio in lst:
line_lst.extend([int(v) for v in rgb_trio.strip().split('.')])
all_values_lst.append(line_lst)
index_tuples = []
n_cols = int(len(all_values_lst[0])/3)
for angle in np.linspace(0,360,n_cols):
for color in ['r','g','b']:
index_tuples.append((angle, color))
df = pd.DataFrame(all_values_lst, index=depth_lst,
columns=pd.MultiIndex.from_tuples(index_tuples))
return df
def crop_depth(self, start_md=None, end_md=None):
if (start_md is None) and (end_md is None):
return self.data
if start_md is None:
start_md = self.data.index.min()
if end_md is None:
end_md = self.data.index.max()
resolution = self.data.index.to_series().diff().mean()
new_index = pd.Index(np.arange(start_md,end_md,resolution))
df_image=self.data.reindex(index=new_index, method='nearest')
return df_image
def plot_image(self, *args, **kwargs):
df_image = self.data
ax = kwargs.pop('ax', None)
colorbar = kwargs.pop('colorbar', False)
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
if df_image.columns.nlevels > 1:
#the image is multichannel
try:
arr_to_plot = df_image.imlog.rgba_to_array()
print('image is rgba')
except KeyError:
#the image might be rgb
try:
arr_to_plot = df_image.imlog.rgb_to_array()
print('image is rgb')
except KeyError:
raise KeyError('The image seems in multichannel mode but could not be transformed to an array')
elif df_image.columns.nlevels == 1:
#the image is most certainly in single array mode
arr_to_plot = df_image.values
print('image is single array')
log_extent = (0, 360, df_image.index.max(), df_image.index.min())
cax = ax.imshow(arr_to_plot, extent=log_extent, aspect='auto', **kwargs)
if not ax.yaxis_inverted():
ax.invert_yaxis()
if colorbar:
cbar = fig.colorbar(cax)
ax.set_ylabel('MD (m)')
ax.set_xlabel('degrees (°)')
ax.set_xticks(range(0,360,90))
ax.grid()
if colorbar:
cbar.ax.set_ylabel('')
return fig, ax
def to_shaded_image(self, **kwargs):
'''
Uses matplotlibs's shade function to generate a shaded relief
version of the log
Parameters
----------
**kwargs : TYPE
DESCRIPTION.
Returns
-------
None.
'''
azdeg = kwargs.pop('azdeg', 0)
altdeg = kwargs.pop('altdeg', 20)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
cmap = kwargs.pop('cmap', 'viridis')
if vmin is None:
vmin = np.nanpercentile(self.data.values, 5)
if vmax is None:
vmax = np.nanpercentile(self.data.values, 95)
ls = LightSource(azdeg=azdeg,
altdeg=altdeg)
z = ls.shade(self.data.values, plt.get_cmap(cmap),
vmin=vmin,
vmax=vmax, **kwargs)
z = z.flatten().reshape((z.shape[0],-1))
new_columns= pd.MultiIndex.from_product((self.data.columns, ['r','g','b','a']))
df = pd.DataFrame(z, columns=new_columns, index=self.data.index)
return df
def to_binary(self, percentile_threshold=90):
limit = np.nanpercentile(self.data.values, percentile_threshold)
new_arr = np.empty_like(self.data.values)
new_arr[self.data.values>=limit]=1
new_arr[self.data.values<limit] =0
return self.array_to_df(new_arr)
def apply_median_filter(self, size=20):
data = ndimage.median_filter(self.data.values, size=size)
df = self.array_to_df(data)
return df
def equalize_adapthist(self, **kwargs):
if any(self.data.dtypes == float):
df = self.data.imlog.min_max_scaling()
else:
df = self.data
image_eq = exposure.equalize_adapthist(df.values, **kwargs)
df_eq = self.array_to_df(image_eq)
return df_eq
def equalize_histogram(self, **kwargs):
image_eq = exposure.equalize_hist(self.data.values, **kwargs)
df = self.array_to_df(image_eq)
return df
def min_max_scaling(self):
'''
Scales the dataframe so values are between 0 - 1
Returns
-------
pd.DataFrame
DESCRIPTION.
'''
df_max = self.data.max().max()
df_min = self.data.min().min()
df_scaled = self.data.applymap(lambda x: (x-df_min)/(df_max-df_min), na_action='ignore')
return df_scaled
def sharpen(self, kernel=None):
if kernel is None:
kernel = np.array([[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]])
highpass = ndimage.convolve(self.data.values, kernel)
return self.array_to_df(highpass)
def unsharp_masking(self, kernel=None):
if kernel is None:
kernel = np.array([[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, -476, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]])
blurred = ndimage.convolve(self.data.values, kernel)
sharp = self.data.values + blurred
return self.array_to_df(sharp)
def highpass_gaussian(self, sigma, **kwargs):
lowpass = ndimage.gaussian_filter(self.data.values, sigma, **kwargs)
gauss_highpass = self.data.values - lowpass
return self.array_to_df(gauss_highpass)
def difference(self, other):
other = other.reindex(index=self.data.index, method='nearest')
return other - self.data
def flatten_with_fracture(self, fracture, *args, **kwargs):
pivot_point_index = kwargs.pop('pivot_pt_index', 0)
df_im = self.data
df_fr = fracture.polyline
#MD that will be used as a pivot point
md_pivot = df_fr.md.iloc[pivot_point_index]
left_index = df_im.index - md_pivot
left_df = pd.DataFrame(df_im.iloc[:,0].values, index=left_index)
for im_col, polyline_md in zip(df_im.iloc[:,1:], df_fr.md.iloc[1:]):
right_index = df_im.index - polyline_md
right_df = pd.DataFrame(df_im[im_col].values, index=right_index)
left_df = pd.merge_asof(left_df.sort_index(), right_df,
left_index=True, right_index=True, direction='nearest')
if im_col == 10:
pass
left_df.columns = df_im.columns
im_flattened_by_polyline = left_df
return BHImage(data=im_flattened_by_polyline)
def extract_data_along_fracture(self, fracture):
y = self.data.columns
x = self.data.index
z = self.data.values
interp_func = RectBivariateSpline(x, y, z)
xi = fracture.polyline.md
yi = fracture.polyline.index
zi = interp_func.ev(xi, yi)
return zi
def build_fracture_df(self, *args, **kwargs):
path_out = kwargs.pop('path_out', None)
fig, ax = self.plot_image(shade=False, **kwargs)
coord_lst = fig.ginput(-1, timeout=0, mouse_pop=2, mouse_stop=3)
df = pd.DataFrame(coord_lst, columns=['x', 'md'])
if path_out is None:
pass
else:
df.to_csv(path_out, index=False)
return Fracture(polyline=df)
def is_rgb_image(self):
return all(self.data.dtypes=='object')
def rgb_to_array(self):
#suppose a uniform type
dtype = self.data.dtypes[0][0]
image = np.empty((self.data.shape[0], int(self.data.shape[1]/3), 3),
dtype= dtype)
# breakpoint()
image[:,:,0] = self.data.loc[:,(slice(None),'r')]
image[:,:,1] = self.data.loc[:,(slice(None),'g')]
image[:,:,2] = self.data.loc[:,(slice(None),'b')]
return image
def rgba_to_array(self):
#suppose a uniform type
dtype = self.data.dtypes[0][0]
image = np.empty((self.data.shape[0], int(self.data.shape[1]/4), 4),
dtype= dtype)
# breakpoint()
image[:,:,0] = self.data.loc[:,(slice(None),'r')]
image[:,:,1] = self.data.loc[:,(slice(None),'g')]
image[:,:,2] = self.data.loc[:,(slice(None),'b')]
image[:,:,3] = self.data.loc[:,(slice(None),'a')]
return image
def array_to_df(self, array):
df = pd.DataFrame(array, index=self.data.index, columns=self.data.columns)
return df
class Fracture():
def __init__(self, *args, **kwargs):
#polyline is a DataFrame with the coordinates of the "sinusoidal"
#representing the fracture in the borehole wall
self.polyline = kwargs.pop('polyline', None)
self.strike = kwargs.pop('strike', None)
self.dip = kwargs.pop('dip', None)
def load_polyline(self, path_in, *args, **kwargs):
df_stru = | pd.read_csv(path_in, index_col='x') | pandas.read_csv |
import pandas as pd
from pandas._testing import assert_frame_equal
from nempy.spot_markert_backend import objective_function
def test_energy():
bidding_ids = pd.DataFrame({
'unit': ['A', 'A', 'B', 'B'],
'capacity_band': ['1', '2', '1', '2'],
'service': ['energy', 'energy', 'energy', 'energy'],
'variable_id': [1, 2, 3, 4]
})
price_bids = pd.DataFrame({
'unit': ['A', 'B'],
'1': [16.0, 23.0],
'2': [17.0, 18.0]
})
unit_info = pd.DataFrame({
'unit': ['A', 'B'],
'dispatch_type': ['generator', 'load']
})
output = objective_function.bids(bidding_ids, price_bids, unit_info)
expected = pd.DataFrame({
'unit': ['A', 'A', 'B', 'B'],
'capacity_band': ['1', '2', '1', '2'],
'service': ['energy', 'energy', 'energy', 'energy'],
'variable_id': [1, 2, 3, 4],
'cost': [16.0, 17.0, -23.0, -18.0],
'dispatch_type': ['generator', 'generator', 'load', 'load']
})
expected.index = list(expected.index)
| assert_frame_equal(output, expected) | pandas._testing.assert_frame_equal |
"""
Test indicators.py functions for common indicators to be extracted from an OHLC dataset
Author: <NAME>
"""
import unittest
import indicators
import pandas as pd
class TestIndicators(unittest.TestCase):
def test_checkGreenCandle(self):
candleGreen = {"Open": 1.2, "Close": 1.5}
candleRed = {"Open": 3.4, "Close": 2}
self.assertEqual(indicators.checkGreenCandle(candleGreen),True)
self.assertEqual(indicators.checkGreenCandle(candleRed),False)
def test_checkEngulfingCandleOverPeriod(self):
candleSet = []
candleSet.append({"Open": 1, "Close": 2})
candleSet.append({"Open": 3, "Close": 0.5})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.checkEngulfingCandleOverPeriod(candleSet), [0,-1])
candleSet = []
candleSet.append({"Open": 5, "Close": 4})
candleSet.append({"Open": 3, "Close": 6})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.checkEngulfingCandleOverPeriod(candleSet), [0,1])
def test_sumGainsAndLossesOverPeriod(self):
candleSet = []
candleSet.append({"Open": 1, "Close": 2})
candleSet.append({"Open": 2, "Close": 1})
candleSet = pd.DataFrame(candleSet)
gainsTotal,lossesTotal = indicators.sumGainsAndLossesOverPeriod(candleSet)
self.assertEqual(gainsTotal,1)
self.assertEqual(lossesTotal,1)
candleSet = []
candleSet.append({"Open": 1, "Close": 2})
candleSet.append({"Open": 2, "Close": 3})
candleSet = pd.DataFrame(candleSet)
gainsTotal,lossesTotal = indicators.sumGainsAndLossesOverPeriod(candleSet)
self.assertEqual(gainsTotal,2)
self.assertEqual(lossesTotal,0)
candleSet = []
candleSet.append({"Open": 3, "Close": 2})
candleSet.append({"Open": 2, "Close": 1})
candleSet = pd.DataFrame(candleSet)
gainsTotal,lossesTotal = indicators.sumGainsAndLossesOverPeriod(candleSet)
self.assertEqual(gainsTotal,0)
self.assertEqual(lossesTotal,2)
"""
def test_computeRSI(self):
candleSet = []
for i in range (1,29):
candleSet.append({"Open": i, "Close": i+1})
candleSet = pd.DataFrame(candleSet)
"""
def test_computeSMAsOverPeriod(self):
candleSet = []
for i in range(250):
candleSet.append({"Open": 5, "Close": 5})
candleSet = pd.DataFrame(candleSet)
solution10,solution20,solution50,solution100,solution150,solution200 = [],[],[],[],[],[]
for i in range(250):
if i>10:
solution10.append(0)
else:
solution10.append(None)
if i>20:
solution20.append(0)
else:
solution20.append(None)
if i>50:
solution50.append(0)
else:
solution50.append(None)
if i>100:
solution100.append(0)
else:
solution100.append(None)
if i>150:
solution150.append(0)
else:
solution150.append(None)
if i>200:
solution200.append(0)
else:
solution200.append(None)
sMA10,sMA20,sMA50,sMA100,sMA150,sMA200 = indicators.computeSMAsOverPeriod(candleSet)
self.assertEqual(sMA10,solution10)
self.assertEqual(sMA20,solution20)
self.assertEqual(sMA50,solution50)
self.assertEqual(sMA100,solution100)
self.assertEqual(sMA150,solution150)
self.assertEqual(sMA200,solution200)
candleSet = []
for i in range(125):
candleSet.append({"Open": 5, "Close": 5})
candleSet.append({"Open": 10, "Close": 10})
candleSet = pd.DataFrame(candleSet)
solution10,solution20,solution50,solution100,solution150,solution200 = [],[],[],[],[],[]
for i in range(125):
if i==0:
solution10.append(None)
elif i*2-1>10:
solution10.append(0.25)
solution10.append(0.5)
else:
solution10.append(None)
solution10.append(None)
if i==0:
solution20.append(None)
elif i*2-1>20:
solution20.append(0.25)
solution20.append(0.5)
else:
solution20.append(None)
solution20.append(None)
if i==0:
solution50.append(None)
elif i*2-1>50:
solution50.append(0.25)
solution50.append(0.5)
else:
solution50.append(None)
solution50.append(None)
if i==0:
solution100.append(None)
elif i*2-1>100:
solution100.append(0.25)
solution100.append(0.5)
else:
solution100.append(None)
solution100.append(None)
if i==0:
solution150.append(None)
elif i*2-1>150:
solution150.append(0.25)
solution150.append(0.5)
else:
solution150.append(None)
solution150.append(None)
if i==0:
solution200.append(None)
elif i*2-1>200:
solution200.append(0.25)
solution200.append(0.5)
else:
solution200.append(None)
solution200.append(None)
solution10.append(0.25)
solution20.append(0.25)
solution50.append(0.25)
solution100.append(0.25)
solution150.append(0.25)
solution200.append(0.25)
sMA10,sMA20,sMA50,sMA100,sMA150,sMA200 = indicators.computeSMAsOverPeriod(candleSet)
self.assertEqual(sMA10,solution10)
self.assertEqual(sMA20,solution20)
self.assertEqual(sMA50,solution50)
self.assertEqual(sMA100,solution100)
self.assertEqual(sMA150,solution150)
self.assertEqual(sMA200,solution200)
def test_computeAverageTrueRange(self):
candleSet = []
for i in range (1,29):
candleSet.append({"Low": i, "High": i+1,"Close": i+1})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.computeAverageTrueRange(candleSet),1)
candleSet = []
for i in range (1,29):
candleSet.append({"Low": i, "High": i+5,"Close": i+5})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.computeAverageTrueRange(candleSet),5)
candleSet = []
for i in range (1,29):
candleSet.append({"Low": 1, "High": 1,"Close": 1})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.computeAverageTrueRange(candleSet),0)
def test_checkIncreaseTomorrow(self):
candleSet = []
candleSet.append({"Open": 1, "Close": 1})
candleSet.append({"Open": 1, "Close": 2})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.checkIncreaseTomorrow(candleSet["Close"],0.02),[1,0])
candleSet = []
candleSet.append({"Open": 1, "Close": 1})
candleSet.append({"Open": 1, "Close": 1})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.checkIncreaseTomorrow(candleSet["Close"],0.02),[0,0])
candleSet = []
candleSet.append({"Open": 1, "Close": 1})
candleSet.append({"Open": 1, "Close": 0.5})
candleSet = | pd.DataFrame(candleSet) | pandas.DataFrame |
import pandas as pd
import plotly.express as px
def iplot_bar_polar(self, theta, color, r='auto', template='xgridoff',
color_continuous_scale='auto', **kwds):
"""
It uses plotly.express.bar_polar.
In a polar bar plot, each row of 'color' is represented as a wedge mark in polar coordinates.
Parameters
----------
theta: str
wf.data colum with the directions in degrees (0 - 360)
color: str
wf.data colum with the data to plot with colors
r: str
wf.data column with the data to use as a radium.
If r = 'auto', r is the counts of 'theta' en each direction.
template: str
Plotly express style templates.
Options:
'ggplot2'
'seaborn'
'simple_white'
'plotly'
'plotly_white'
'plotly_dark'
'presentation'
'xgridoff'
'ygridoff'
'gridon'
'none'
color_continuous_scale: plotly.express.sequential
View https://plotly.com/python/colorscales/.
If color_continuous_scale = 'auto', color_continuous_scale = px.colors.sequential.Rainbow
**kwds: plotly.express.bar_polar arguments
Returns
-------
fig: plotly.graph_objects.Figure
"""
if color_continuous_scale == 'auto':
color_continuous_scale = px.colors.sequential.Rainbow
df = self.data.copy()
# Create directions
df['direction'] = 'N'
df.loc[df[theta].between(11.25, 33.75) , 'direction'] = 'NNE'
df.loc[df[theta].between(33.75, 56.25) , 'direction'] = 'NE'
df.loc[df[theta].between(56.25, 78.75) , 'direction'] = 'ENE'
df.loc[df[theta].between(78.75, 101.25) , 'direction'] = 'E'
df.loc[df[theta].between(101.25, 123.75) , 'direction'] = 'ESE'
df.loc[df[theta].between(123.75, 146.25) , 'direction'] = 'SE'
df.loc[df[theta].between(146.25, 168.75) , 'direction'] = 'SSE'
df.loc[df[theta].between(168.75, 191.25) , 'direction'] = 'S'
df.loc[df[theta].between(191.25, 213.75) , 'direction'] = 'SSW'
df.loc[df[theta].between(213.75, 236.25) , 'direction'] = 'SW'
df.loc[df[theta].between(236.25, 258.75) , 'direction'] = 'WSW'
df.loc[df[theta].between(258.75, 281.25) , 'direction'] = 'W'
df.loc[df[theta].between(281.25, 303.75) , 'direction'] = 'WNW'
df.loc[df[theta].between(303.75, 326.25) , 'direction'] = 'NW'
df.loc[df[theta].between(326.25, 348.75) , 'direction'] = 'NNW'
new_index = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S',
'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
# Create serie of counts for directions
s_dir = df['direction'].value_counts()
s_dir.rename('frequency', inplace=True)
# Create mean of directions
df_mean = df.groupby(['direction']).mean()
df_work = | pd.merge(s_dir, df_mean[color], right_index=True, left_index=True) | pandas.merge |
#<NAME> 5 Feb 2022
import sys
import pandas as pd
from sqlalchemy import create_engine
import numpy as np
def load_data(messages_filepath, categories_filepath):
"""
Returns the pandas DataFrame after merging 2 data sources (csv)
Parameters:
messages_filepath (str): file path of messages data
categories_filepath (str): file path of categoriess data
Returns:
pandas.core.frame.DataFrame after merging 2 files
"""
messages = | pd.read_csv(messages_filepath) | pandas.read_csv |
import requests
import datetime
import pandas as pd
import numpy as np
from bokeh.plotting import figure
from bokeh.models import (
Plot, graphs, ColumnDataSource,
Circle, Div,
HoverTool, BoxZoomTool, ResetTool, WheelZoomTool, PanTool,
Panel, Tabs, Span
)
from bokeh.palettes import Spectral4
from bokeh.io import show
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
import networkx as nx
from bokeh.io import output_file
from bokeh.models import Jitter
from bokeh.layouts import gridplot
from bokeh.transform import jitter
from bokeh.palettes import Category20
# Utils
def jitter_coordinates(df):
G = nx.DiGraph()
G.add_nodes_from(df.id.tolist())
edge_list = df[df["infectionSource"].apply(lambda x: isinstance(x, int))].apply(lambda row: (row["id"], str(row["infectionSource"])), axis=1).tolist()
G.add_edges_from(
edge_list
)
jittering = nx.spring_layout(G)
df["x"] = df.apply(lambda row: (row["x"] + jittering[row["id"]][0] * 5000), axis=1) #df["x"] + np.random.normal(0, 2000,df.shape[0])
df["y"] = df.apply(lambda row: (row["y"] + jittering[row["id"]][1] * 5000), axis=1) # df["y"] + np.random.normal(0, 2000,df.shape[0])
del G
return df
def set_case_status(df, page_json):
df["infectionSourceCountry"] = df["infectionSourceCountry"].fillna("Unknown")
for case in ("confirmed", "recovered", "deaths"):
cases = [str(person["id"]) for person in page_json[case]]
mask = df["id"].isin(cases)
df.loc[mask, "case"] = case
return df
def set_coordinates(df):
df_locs = | pd.read_csv("sairaanhoitopiirit.csv", sep=";") | pandas.read_csv |
"""
@author: <NAME>
"""
import numpy as np
import cv2
from keras_facenet import FaceNet
embedder = FaceNet()
from pil import Image
from matplotlib import pyplot
from numpy import asarray
import datetime
import os
import pandas as pd
import openpyxl
from openpyxl import load_workbook
trail = embedder.extract('images//' + 'sai (1).jpg', threshold=0.95)
def show_face(filename,box,name,info,required_size=(160, 160)):
# load image from file
image = Image.open(filename)
# convert to RGB, if needed
image = image.convert('RGB')
# convert to array
pixels = asarray(image)
x1,y1,width,height = box
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the model size
image = Image.fromarray(face)
# image = image.resize(required_size)
face_array = asarray(image)
pyplot.figure("Please Close this Window")
pyplot.title(name + '\n Date & Time ' + info)
pyplot.imshow(face_array)
pyplot.show()
return
def runit():
try:
detections = embedder.extract('image_0.png', threshold=0.95)
face_info = detections[0]
face_embedding = face_info['embedding']
box = face_info['box']
face_embedding=face_embedding.reshape(1,-1)
import facetrain
name_dict = np.load('name_dict.npy',allow_pickle='TRUE').item()
# names = np.load('names.npy')
# names = names.tolist()
model = facetrain.model
a = int(model.predict(face_embedding))
print(name_dict[a])
now= datetime.datetime.now()
date = datetime.datetime.now().date()
time = datetime.datetime.now().time()
print (now.strftime("%d-%m-%Y %H:%M:%S"))
show_face('image_0.png',box,name_dict[a],now.strftime("%d-%m-%Y %H:%M:%S"))
df = pd.DataFrame({'Name': [name_dict[a]],'Date': [date.strftime("%d-%m-%Y")],'Time': [time.strftime("%H:%M:%S")]})
folder = 'Attendence//'
sheet_name = folder + "Attendence_{}.xlsx".format(date.strftime("%d-%m-%Y"))
writer = | pd.ExcelWriter(sheet_name, engine='openpyxl') | pandas.ExcelWriter |
# --------------
import pandas as pd
from collections import Counter
# Load dataset
data= | pd.read_csv(path) | pandas.read_csv |
#!/usr/bin/env python
"""Module to deal with halos, to be used with HaloMaker.
This module is heavily inspired by the set of IDL routines originally
found in the Ramses Analysis ToolSuite (RATS).
TODO: Some more documentation
"""
import numpy as np
import pandas as pd
import yt
from yt.utilities.logger import ytLogger as mylog
import yt.utilities.fortran_utils as fpu
from yt.funcs import get_pbar
import os
import pandas as pd
class HaloList(object):
def __init__(self, ds, folder='.', contam=False):
"""
PandaList with halos and their properties
"""
self.folder = folder
self.iout = int(str(ds).split('_')[1])
if os.path.exists(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self)):
self.halos = pd.read_hdf(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self))
else:
self.halos = self._read_halos(data_set=ds, with_contam_option=contam)
if self.halos.index.size > 0:
self.halos.to_hdf(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self), 'hdf')
self.ds = ds
self.halos['bhid'] = -1 ; self.halos['galID'] = -1
self.halos['mgal'] = 0 ; self.halos['msink'] = 0
# read purity of halos
self.halos['pollution'] = 0
contam_file_path = '{s.folder}/Halos/{s.iout}/contam_halos{s.iout:03d}'.format(
s=self)
if os.path.exists(contam_file_path):
p = np.loadtxt(contam_file_path)
if len(p) > 0:
p = p.T
self.halos.loc[p[0], 'pollution'] = p[1]/p[2]
def get_halo(self, hid, fname=None):
halo = self.halos.loc[hid]
scale_mpc = float(self.ds.length_unit.in_units('Mpc'))
halostr = ("Halo {hid:.0f} (level {h.level:.0f}):\n"
"\tContains {h.nbpart:.0f} particles and {h.nbsub:.0f} subhalo(s)\n"
"\tCenter:\t\t ({h.x}, {h.y}, {h.z}) box units\n"
"\tVelocity:\t ({h.vx}, {h.vy}, {h.vz}) km/s\n"
"\tL:\t\t ({h.Lx}, {h.Ly}, {h.Lz}) ToCheck\n"
"\tMass:\t\t {h.m:.3e} Msun\n"
"\tMvir:\t\t {h.mvir:.3e} Msun\n"
"\tRadius:\t\t {h.r:.3e} Mpc ({rcodeunits:.3e} box units)\n"
"\tRvir:\t\t {h.rvir:.3e} Mpc ({rvcodeunits:.3e} box units)\n"
"\tTvir:\t\t {h.tvir:.3e} K".format(hid=hid,
h=halo,
rcodeunits=halo.r / scale_mpc,
rvcodeunits=halo.rvir / scale_mpc))
if fname is not None:
with open(fname, 'w') as f:
f.write(halostr)
return halostr
def get_halo_sphere(self, hid, rvir_factor=5):
halo_spheres = getattr(self, '_halo_spheres', {})
if (hid, rvir_factor) in halo_spheres:
return halo_spheres[hid, rvir_factor]
tmp = self.halos.loc[hid, ['x', 'y', 'z', 'rvir', 'vx', 'vy', 'vz']]\
.values
center = self.ds.arr(tmp[:3], 'code_length')
radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')
vel = self.ds.arr(tmp[4:7], 'km/s')
# Get a sphere centered on the halo
sphere = self.ds.sphere(center, radius)
sphere.set_field_parameter('bulk_velocity', vel)
halo_spheres[(hid, rvir_factor)] = sphere
self._halo_spheres = halo_spheres
return sphere
def plot_halo(self, hid, rvir_factor=5, field=('deposit', 'all_density'), folder='./',
weight_field=('index', 'ones'), cmap='viridis', slice=False,
axis='z', **kwargs):
'''Plot a given halo.
Parameters
----------
* hid, integer
The halo id to plot
* rvir_factor, float, default=5
Size of the region to plot in unit of Rvir
* field, tuple
The yt field to plot
* folder, string
The folder where to save the data
* weight_field, tuple
The field to weight the projection by.
* cmap, string
The colormap to use
* slice, boolean
If true, do a slice plot instead of a projection plot
* axis, 'x', 'y' or 'z'
The axis to project onto
'''
for k, v in kwargs.items():
print('%s: %s not supported' % (k, v))
if hid not in self.halos.index:
mylog.error('%s not found.' % hid)
return
# Get position
tmp = np.array(self.halos.loc[hid, ['x', 'y', 'z', 'rvir']])
center = self.ds.arr(tmp[:3], 'code_length')
radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')
# Get a sphere centered on the halo
sphere = self.ds.sphere(center, radius)
# Make a projection plot
p = yt.ProjectionPlot(self.ds, axis, field, data_source=sphere,
weight_field=weight_field)
p.set_cmap(field=field, cmap=cmap)
p.annotate_timestamp(corner='upper_left', time=True, redshift=True)
p.annotate_scale(corner='upper_right')
# TODO: annotate halos
# TODO: better name
p.save(folder)
# Accessors
def __getitem__(self, item):
if str(item) in self.halos:
return self.halos[item]
else:
return self.halos.ix[item]
# def __getattr__(self, name):
# return self.halos.__getattr__(name) # self.halos[name]
def __len__(self):
return len(self.halos)
def __iter__(self):
return self.halos.iterrows()
# Printing functions
def __str__(self):
return self.halos.__str__()
# Convenience functions
def _read_halos(self, data_set, with_contam_option=False):
halo_keys = ('ID', 'nbpart', 'level', 'min_part_id',
'host', 'hostsub', 'nbsub', 'nextsub',
'x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',
'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',
'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel')
filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(
s=self)
data = np.empty(shape=(0, len(halo_keys)), dtype=object)
yt.funcs.mylog.debug('Reading halo catalog %s (ds=%s)' % (filename, data_set))
offsets = {}
if os.path.exists(filename):
with open(filename, 'rb') as f:
[npart] = fpu.read_vector(f, 'i')
[massp] = fpu.read_vector(f, 'f')
[aexp] = fpu.read_vector(f, 'f')
[omega_t] = fpu.read_vector(f, 'f')
[age] = fpu.read_vector(f, 'f')
[nhalos, nsubs] = fpu.read_vector(f, 'i')
# Save the age/aexp, the mass of the particle,
# as well as the number of (sub)halos
self.nhalos = nhalos
self.nsubs = nsubs
self.aexp = aexp
self.age = age
self.massp = massp
data = np.empty(shape=(nhalos + nsubs, len(halo_keys)), dtype=object)
mylog.info('Brick: halos : %s' % nhalos)
mylog.info('Brick: sub halos : %s' % nsubs)
mylog.info('Brick: aexp : %s' % aexp)
#pbar = get_pbar('', nhalos+nsubs)
for ihalo in range(nhalos + nsubs):
pos = f.tell()
[nbpart] = fpu.read_vector(f, 'i') # Number of particles
listp = fpu.read_vector(f, 'i') # List of the particles IDs
[ID] = fpu.read_vector(f, 'i') # Halo ID
fpu.skip(f, 1) # Skip timestep
[level, host, hostsub, nbsub, nextsub] = fpu.read_vector(f, 'i')
[m] = fpu.read_vector(f, 'f') # Total mass
[x, y, z] = fpu.read_vector(f, 'f') # Center
[vx, vy, vz] = fpu.read_vector(f, 'f') # Velocity
[Lx, Ly, Lz] = fpu.read_vector(f, 'f') # Angular momentum
[r, a, b, c] = fpu.read_vector(f, 'f') # Shape (ellipticity)
[ek, ep, et] = fpu.read_vector(f, 'f') # Energetics
[spin] = fpu.read_vector(f, 'f') # Total angular momentum
[rvir, mvir, tvir, cvel] = fpu.read_vector(f, 'f') # Virial parameters
[rho0, r_c] = fpu.read_vector(f, 'f') # NFW params
if with_contam_option:
[contam] = fpu.read_vector(f, 'i') # Contamination
# Add the halo to the list
# halos.loc[ihalo] = [ID, nbpart, level, listp.min(),
# host, hostsub, nbsub, nextsub,
# x, y, z, vx, vy, vz, Lx, Ly, Lz,
# a, b, c, ek, ep, et, rho0, r_c,
# spin, m, r, mvir, rvir, tvir, cvel]
data[ihalo] = [ID, nbpart, level, listp.min(),
host, hostsub, nbsub, nextsub,
x, y, z, vx, vy, vz, Lx, Ly, Lz,
a, b, c, ek, ep, et, rho0, r_c,
spin, m, r, mvir, rvir, tvir, cvel]
#pbar.update()
offsets[ID] = pos
print('')
types = {}
for k in ('ID', 'nbpart', 'level', 'min_part_id',
'host', 'hostsub', 'nbsub', 'nextsub'):
types[k] = np.int64
for k in ('x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',
'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',
'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel'):
types[k] = np.float64
dd = {k: data[:, i].astype(types[k])
for i, k in enumerate(halo_keys)}
halos = | pd.DataFrame(dd) | pandas.DataFrame |
import matplotlib.cm as cm
import pandas as pd
import seaborn as sns
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
import matplotlib.pyplot as plt
import numpy as np
###############################################################################################################
# IMPORTANT: USE ONLY WITH LIST OF TWEETS CONTAINING A SIGNIFICANT AMOUNT FROM EACH USER PRESENT IN THE LIST #
# FOR EXAMPLE TWEETS OBTAINED WITH data-mining/getTimelines.py #
###############################################################################################################
FILENAME_TWEET = "../data-mining/results/timeline.csv" # List of tweets to consider
OUTPUT_FILENAME = "ReactionsVsFollowers.pdf" # Filename to store the plot
BUBBLE_SCALE = (300, 1600) # Scale of the bubbles
X_LOG = True # Wether or not to use log scale on X axis
Y_LOG = True # Wether or not to use log scale on Y axis
# Load all tweets
tweets = pd.read_csv(FILENAME_TWEET, dtype='str')
tweets.date = pd.to_datetime(tweets.date)
tweets.likes = pd.to_numeric(tweets.likes)
tweets.retweets = | pd.to_numeric(tweets.retweets) | pandas.to_numeric |
#!/home/pict/anaconda3/bin/python3.8
import pandas as pd
import numpy as np
filename = 'subword.mr'
mr = None
with open(filename) as f:
x = f.readlines()
if x != '\n':
mr = x
mr = [x.replace('\n', '') for x in mr]
mr = list(filter(None, mr))
filename = 'subword.en'
en = None
with open(filename) as f:
x = f.readlines()
if x != '\n':
en = x
en = [x.replace('\n', '') for x in en]
en = list(filter(None, en))
# df
mr_en = pd.DataFrame()
# add columns
mr_en['mr'] = mr[1:]
mr_en['en'] = en[1:]
def train_validate_test_split(df, train_percent=.6, validate_percent=.2, seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
validate_end = int(validate_percent * m) + train_end
train = df.iloc[perm[:train_end]]
validate = df.iloc[perm[train_end:validate_end]]
test = df.iloc[perm[validate_end:]]
return train, validate, test
# train test split
# train, validate, test = \
# np.split(mr_en.sample(frac=1, random_state=42),
# [int(.6*len(mr_en)), int(.8*len(mr_en))])
train, validate, test = \
train_validate_test_split(df=mr_en, train_percent=0.98, validate_percent=0.01, seed=42)
# train
pd.DataFrame(train['mr']).to_csv('train.mr', index=False)
pd.DataFrame(train['en']).to_csv('train.en', index=False)
# validate
| pd.DataFrame(validate['mr']) | pandas.DataFrame |
import pandas as pd
import re
import sys
from pymicruler.utils import util
from pymicruler.bp.BlockProcessor import BlockProcessor as BP
from pymicruler.bp.BlockInterpreter import BlockInterpreter as BI
from pymicruler.bp.NoteAnalysis import NoteAnalysis as NA
from pymicruler.bp.TaxonomyHandler import TaxonomyHandler as TH
from pymicruler.bp.ResourceCompiler import ResourceCompiler as RC
class EucastParser:
def __init__(self):
self.ires = pd.read_csv(util.Path.IRES.value)
self.note_analyser = NA()
self.b_i = BI()
self.r_c = RC()
self.all_sheets = pd.DataFrame()
self.table = pd.DataFrame()
self.guidelines = pd.DataFrame()
def run_eucast_parser(self, bp_path):
"""
Starts the parsing workflow for a given Eucast breakpoint table.
:param bp_path: Path to Eucast breakpoint table.
:type: String
:return: Parsed breakpoints
:rtype: Pandas DataFrame
"""
self.all_sheets = pd.DataFrame()
raw_table = pd.read_excel(
bp_path, sheet_name=None, na_values=[''], keep_default_na=False)
relevant_sheets = self._filter_sheets(raw_table)
for title, sheet in relevant_sheets.items():
organism = list(sheet)[0]
note_col = self._column_check(sheet)
reduced_sheet = self._column_removal(sheet, note_col)
breakpoints = self._process_sheet(reduced_sheet, organism)
self.all_sheets = pd.concat((self.all_sheets, breakpoints))
self.note_analyser.summarize_note_analysis()
self.table = self.b_i.run_block_interpreter(
self.all_sheets, self.note_analyser)
TH.run_quality_check(self.table)
TH.run_quality_check(self.ires)
out_table = self.r_c.run_compilation(self.table, self.ires)
return out_table
@staticmethod
def _filter_sheets(table):
"""
Removes sheets that do not contain breakpoints.
:param table: The parsed EUCAST clinical breakpoint table.
:type: Ordered Dictionary.
:return: Table containing only relevant sheets.
:rtype: Ordered Dictionary
"""
sheets = set(table.keys())
remove_sheets = sheets.intersection(set(util.Info.GEN_SHEETS.value))
[table.pop(k) for k in remove_sheets]
return table
@staticmethod
def _column_removal(sheet, note_col):
"""
Removes all columns that do not contain information for AST
interpretation.
:param sheet: Currently processed sheet of the EUCAST breakpoint table.
:type: Pandas Dataframe.
:param note_col: Column index of note text
:type: Integer
:return: Reduced table containing compound, R_value, S_value and Notes.
"""
relevant_cols = [k.value for k in util.ColPosStd]
relevant_cols.append(note_col)
sheet = sheet.iloc[:, relevant_cols]
sheet.columns = util.Cols.BP_OUT.value
return sheet
def _process_sheet(self, sheet, organism):
"""
Detects blocks within sheet, initializes analyses and returns
information.
:param sheet: One sheet of the Eucast breakpoint table
:type: Pandas DataFrame
:param organism: Name of the organism the breakpoints are applicable to
:type: String
:return: Dataframe with ordered breakpoint information
:rtype: Pandas DataFrame
"""
all_breakpoints = pd.DataFrame()
block_starters = self._get_blocks(sheet)
for idx in range(len(block_starters)-1):
block = sheet.iloc[block_starters[idx]:block_starters[idx+1]]
bp = BP()
breakpoints = bp.process_block(block, organism, self.note_analyser)
all_breakpoints = | pd.concat((all_breakpoints, breakpoints)) | pandas.concat |
import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from arguments import get_args_parser
RELATION_CLASS = [
'no_relation',
'org:top_members/employees',
'org:members',
'org:product',
'per:title',
'org:alternate_names',
'per:employee_of',
'org:place_of_headquarters',
'per:product',
'org:number_of_employees/members',
'per:children',
'per:place_of_residence',
'per:alternate_names',
'per:other_family',
'per:colleagues',
'per:origin',
'per:siblings',
'per:spouse',
'org:founded',
'org:political/religious_affiliation',
'org:member_of',
'per:parents',
'org:dissolved',
'per:schools_attended',
'per:date_of_death',
'per:date_of_birth',
'per:place_of_birth',
'per:place_of_death',
'org:founded_by',
'per:religion'
]
NUM2LABEL = {k:v for k, v in enumerate(RELATION_CLASS)}
LABEL2NUM = {v:k for k, v in NUM2LABEL.items()}
def softmax(arr: np.ndarray, axis: int = -1):
c = arr.max(axis=axis, keepdims=True)
s = arr - c
nominator = np.exp(s)
denominator = nominator.sum(axis=axis, keepdims=True)
probs = nominator / denominator
return probs
def get_temps(tokenizer):
args = get_args_parser()
temps = {}
with open(args.data_dir + "/" + args.temps, "r") as f:
for i in f.readlines():
i = i.strip().split("\t")
info = {}
info['name'] = i[1].strip()
info['temp'] = [
[tokenizer.mask_token, ':'],
# ['the', tokenizer.mask_token],
[tokenizer.mask_token, tokenizer.mask_token, tokenizer.mask_token],
[tokenizer.mask_token, ':'],
# ['the', tokenizer.mask_token],
]
print (i)
info['labels'] = [
(i[2],),
(i[3],i[4],i[5]),
(i[6],)
]
print (info)
temps[info['name']] = info
return temps
def set_seeds(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False # for faster training, but not deterministic
class FocalLoss(torch.nn.Module):
def __init__(self, alpha: float = None, gamma: float = 0.5, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
def get_confusion_matrix(logit_or_preds, labels, is_logit=True):
preds = np.argmax(logit_or_preds, axis=1).ravel() if is_logit else logit_or_preds
cm = confusion_matrix(labels, preds)
norm_cm = cm / np.sum(cm, axis=1)[:,None]
cm = | pd.DataFrame(norm_cm, index=RELATION_CLASS, columns=RELATION_CLASS) | pandas.DataFrame |
#Code for Preprocessing the COVID-19 data
import pandas as pd
import numpy as np
import datetime
train_df = pd.read_csv("/opt/train.csv")
test_df = pd.read_csv("/opt/test.csv")
train_df.apply(lambda col: col.isnull().value_counts(), axis=0)
test_df.apply(lambda col: col.isna().value_counts(), axis=0)
train_df["Province_State"] = train_df["Province_State"].fillna("")
test_df["Province_State"] = test_df["Province_State"].fillna("")
train_df["Date"] = | pd.to_datetime(train_df["Date"]) | pandas.to_datetime |
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import h5py
import scipy
from astropy.time import Time
from icepyx import icesat2data as ipd
def getATL03(f,beam):
# height of each received photon, relative to the WGS-84 ellipsoid (with some, not all corrections applied, see background info above)
heights=f[beam]['heights']['h_ph'][:]
# latitude (decimal degrees) of each received photon
lats=f[beam]['heights']['lat_ph'][:]
# longitude (decimal degrees) of each received photon
lons=f[beam]['heights']['lon_ph'][:]
# seconds from ATLAS Standard Data Product Epoch. use the epoch parameter to convert to gps time
dt=f[beam]['heights']['delta_time'][:]
# confidence level associated with each photon event
# -2: TEP
# -1: Events not associated with a specific surface type
# 0: noise
# 1: buffer but algorithm classifies as background
# 2: low
# 3: medium
# 4: high
# Surface types for signal classification confidence
# 0=Land; 1=Ocean; 2=SeaIce; 3=LandIce; 4=InlandWater
conf=f[beam]['heights']['signal_conf_ph'][:,2] #choose column 2 for confidence of sea ice photons
# number of ATL03 20m segments
n_seg, = f[beam]['geolocation']['segment_id'].shape
# first photon in the segment (convert to 0-based indexing)
Segment_Index_begin = f[beam]['geolocation']['ph_index_beg'][:] - 1
# number of photon events in the segment
Segment_PE_count = f[beam]['geolocation']['segment_ph_cnt'][:]
# along-track distance for each ATL03 segment
Segment_Distance = f[beam]['geolocation']['segment_dist_x'][:]
# along-track distance (x) for photon events
x_atc = np.copy(f[beam]['heights']['dist_ph_along'][:])
# cross-track distance (y) for photon events
y_atc = np.copy(f[beam]['heights']['dist_ph_across'][:])
for j in range(n_seg):
# index for 20m segment j
idx = Segment_Index_begin[j]
# number of photons in 20m segment
cnt = Segment_PE_count[j]
# add segment distance to along-track coordinates
x_atc[idx:idx+cnt] += Segment_Distance[j]
df03=pd.DataFrame({'lats':lats,'lons':lons,'x':x_atc,'y':y_atc,'heights':heights,'dt':dt,'conf':conf})
return df03
def getATL07(f,beam):
lats = f[beam+'/sea_ice_segments/latitude'][:]
lons = f[beam+'/sea_ice_segments/longitude'][:]
dt = f[beam+'/sea_ice_segments/delta_time'][:]
seg_x = f[beam+'/sea_ice_segments/seg_dist_x'][:]
heights = f[beam+'/sea_ice_segments/heights/height_segment_height'][:]
conf = f[beam+'/sea_ice_segments/heights/height_segment_confidence'][:]
stype = f[beam+'/sea_ice_segments/heights/height_segment_type'][:]
ssh_flag = f[beam+'/sea_ice_segments/heights/height_segment_ssh_flag'][:]
gauss = f[beam+'/sea_ice_segments/heights/height_segment_w_gaussian'][:]
photon_rate = f[beam+'/sea_ice_segments/stats/photon_rate'][:]
cloud = f[beam+'/sea_ice_segments/stats/cloud_flag_asr'][:]
mss = f[beam+'/sea_ice_segments/geophysical/height_segment_mss'][:]
ocean_tide = f[beam+'/sea_ice_segments/geophysical/height_segment_ocean'][:]
lpe_tide = f[beam+'/sea_ice_segments/geophysical/height_segment_lpe'][:]
ib = f[beam+'/sea_ice_segments/geophysical/height_segment_ib'][:]
df07=pd.DataFrame({'lats':lats,'lons':lons,'heights':heights,'dt':dt,'conf':conf,'stype':stype,'ssh_flag':ssh_flag, 'gauss':gauss,'photon_rate':photon_rate,'cloud':cloud,'mss':mss,'ocean':ocean_tide,'lpe':lpe_tide,'ib':ib})
return df07
#--------- READERS COPIED FROM 2019 HACKWEEK TUTORIALS (not including the dictionary/xarray readers) -----
def getATL03data(fileT, numpyout=False, beam='gt1l'):
""" Pandas/numpy ATL03 reader
Written by <NAME>, June 2018 (<EMAIL>)
I've picked out the variables from ATL03 I think are of most interest to
sea ice users, but by no means is this an exhastive list.
See the xarray or dictionary readers to load in the more complete ATL03 dataset
or explore the hdf5 files themselves (I like using the app Panpoly for this) to
see what else you might want
Args:
fileT (str): File path of the ATL03 dataset
numpy (flag): Binary flag for outputting numpy arrays (True) or pandas dataframe (False)
beam (str): ICESat-2 beam (the number is the pair, r=strong, l=weak)
returns:
either: select numpy arrays or a pandas dataframe
"""
# Open the file
try:
ATL03 = h5py.File(fileT, 'r')
except:
'Not a valid file'
lons=ATL03[beam+'/heights/lon_ph'][:]
lats=ATL03[beam+'/heights/lat_ph'][:]
# Number of seconds since the GPS epoch on midnight Jan. 6, 1980
delta_time=ATL03[beam+'/heights/delta_time'][:]
# #Add this value to delta time parameters to compute the full gps_seconds
atlas_epoch=ATL03['/ancillary_data/atlas_sdp_gps_epoch'][:]
# Conversion of delta_time to a calendar date
# This function seems pretty convoluted but it works for now..
# I'm sure there is a simpler function we can use here instead.
temp = ut.convert_GPS_time(atlas_epoch[0] + delta_time, OFFSET=0.0)
# Express delta_time relative to start time of granule
delta_time_granule=delta_time-delta_time[0]
year = temp['year'][:].astype('int')
month = temp['month'][:].astype('int')
day = temp['day'][:].astype('int')
hour = temp['hour'][:].astype('int')
minute = temp['minute'][:].astype('int')
second = temp['second'][:].astype('int')
dFtime=pd.DataFrame({'year':year, 'month':month, 'day':day,
'hour':hour, 'minute':minute, 'second':second})
# Primary variables of interest
# Photon height
heights=ATL03[beam+'/heights/h_ph'][:]
print(heights.shape)
# Flag for signal confidence
# column index: 0=Land; 1=Ocean; 2=SeaIce; 3=LandIce; 4=InlandWater
# values:
#-- -1: Events not associated with a specific surface type
#-- 0: noise
#-- 1: buffer but algorithm classifies as background
#-- 2: low
#-- 3: medium
#-- 4: high
signal_confidence=ATL03[beam+'/heights/signal_conf_ph'][:,2]
# Add photon rate, background rate etc to the reader here if we want
ATL03.close()
dF = pd.DataFrame({'heights':heights, 'lons':lons, 'lats':lats,
'signal_confidence':signal_confidence,
'delta_time':delta_time_granule})
# Add the datetime string
dFtimepd=pd.to_datetime(dFtime)
dF['datetime'] = pd.Series(dFtimepd, index=dF.index)
# Filter out high elevation values
#dF = dF[(dF['signal_confidence']>2)]
# Reset row indexing
#dF=dF.reset_index(drop=True)
return dF
# Or return as numpy arrays
# return along_track_distance, heights
def getATL07data(fileT, numpy=False, beamNum=1, maxElev=1e6):
""" Pandas/numpy ATL07 reader
Written by <NAME>, June 2018 (<EMAIL>)
I've picked out the variables from ATL07 I think are of most interest to sea ice users,
but by no means is this an exhastive list.
See the xarray or dictionary readers to load in the more complete ATL07 dataset
or explore the hdf5 files themselves (I like using the app Panpoly for this) to see what else
you might want
Args:
fileT (str): File path of the ATL07 dataset
numpy (flag): Binary flag for outputting numpy arrays (True) or pandas dataframe (False)
beamNum (int): ICESat-2 beam number (1 to 6)
maxElev (float): maximum surface elevation to remove anomalies
returns:
either: select numpy arrays or a pandas dataframe
Updates:
V3 (June 2018) added observatory orientation flag, read in the beam number, not the string
V2 (June 2018) used astropy to more simply generate a datetime instance form the gps time
"""
# Open the file
try:
ATL07 = h5py.File(fileT, 'r')
except:
return 'Not a valid file'
#flag_values: 0, 1, 2; flag_meanings : backward forward transition
orientation_flag=ATL07['orbit_info']['sc_orient'][:]
if (orientation_flag==0):
print('Backward orientation')
beamStrs=['gt1l', 'gt1r', 'gt2l', 'gt2r', 'gt3l', 'gt3r']
elif (orientation_flag==1):
print('Forward orientation')
beamStrs=['gt3r', 'gt3l', 'gt2r', 'gt2l', 'gt1r', 'gt1l']
elif (orientation_flag==2):
print('Transitioning, do not use for science!')
beamStr=beamStrs[beamNum-1]
print(beamStr)
lons=ATL07[beamStr+'/sea_ice_segments/longitude'][:]
lats=ATL07[beamStr+'/sea_ice_segments/latitude'][:]
# Along track distance
# I removed the first point so it's distance relative to the start of the beam
along_track_distance=ATL07[beamStr+'/sea_ice_segments/seg_dist_x'][:] - ATL07[beamStr+'/sea_ice_segments/seg_dist_x'][0]
# Height segment ID (10 km segments)
height_segment_id=ATL07[beamStr+'/sea_ice_segments/height_segment_id'][:]
# Number of seconds since the GPS epoch on midnight Jan. 6, 1980
delta_time=ATL07[beamStr+'/sea_ice_segments/delta_time'][:]
# Add this value to delta time parameters to compute full gps time
atlas_epoch=ATL07['/ancillary_data/atlas_sdp_gps_epoch'][:]
leapSecondsOffset=37
gps_seconds = atlas_epoch[0] + delta_time - leapSecondsOffset
# Use astropy to convert from gps time to datetime
tgps = Time(gps_seconds, format='gps')
tiso = Time(tgps, format='datetime')
# Primary variables of interest
# Beam segment height
elev=ATL07[beamStr+'/sea_ice_segments/heights/height_segment_height'][:]
# Flag for potential leads, 0=sea ice, 1 = sea surface
ssh_flag=ATL07[beamStr+'/sea_ice_segments/heights/height_segment_ssh_flag'][:]
#Quality metrics for each segment include confidence level in the surface height estimate,
# which is based on the number of photons, the background noise rate, and the error measure provided by the surface-finding algorithm.
# Height quality flag, 1 for good fit, 0 for bad
quality=ATL07[beamStr+'/sea_ice_segments/heights/height_segment_quality'][:]
elev_rms = ATL07[beamStr+'/sea_ice_segments/heights/height_segment_rms'][:] #RMS difference between modeled and observed photon height distribution
seg_length = ATL07[beamStr+'/sea_ice_segments/heights/height_segment_length_seg'][:] # Along track length of segment
height_confidence = ATL07[beamStr+'/sea_ice_segments/heights/height_segment_confidence'][:] # Height segment confidence flag
reflectance = ATL07[beamStr+'/sea_ice_segments/heights/height_segment_asr_calc'][:] # Apparent surface reflectance
ssh_flag = ATL07[beamStr+'/sea_ice_segments/heights/height_segment_ssh_flag'][:] # Flag for potential leads, 0=sea ice, 1 = sea surface
seg_type = ATL07[beamStr+'/sea_ice_segments/heights/height_segment_type'][:] # 0 = Cloud covered
gauss_width = ATL07[beamStr+'/sea_ice_segments/heights/height_segment_w_gaussian'][:] # Width of Gaussian fit
# Geophysical corrections
# NOTE: All of these corrections except ocean tides, DAC,
# and geoid undulations were applied to the ATL03 photon heights.
# AVISO dynamic Atmospheric Correction (DAC) including inverted barometer (IB) effect (±5cm)
dac = ATL07[beamStr+'/sea_ice_segments/geophysical/height_segment_dac'][:]
# Solid Earth Tides (±40 cm, max)
earth = ATL07[beamStr+'/sea_ice_segments/geophysical/height_segment_earth'][:]
# Geoid (-105 to +90 m, max)
geoid = ATL07[beamStr+'/sea_ice_segments/geophysical/height_segment_geoid'][:]
# Local displacement due to Ocean Loading (-6 to 0 cm)
loadTide = ATL07[beamStr+'/sea_ice_segments/geophysical/height_segment_load'][:]
# Ocean Tides including diurnal and semi-diurnal (harmonic analysis),
# and longer period tides (dynamic and self-consistent equilibrium) (±5 m)
oceanTide = ATL07[beamStr+'/sea_ice_segments/geophysical/height_segment_ocean'][:]
# Deformation due to centrifugal effect from small variations in polar motion
# (Solid Earth Pole Tide) (±1.5 cm, the ocean pole tide ±2mm amplitude is considered negligible)
poleTide = ATL07[beamStr+'/sea_ice_segments/geophysical/height_segment_pole'][:]
# Mean sea surface (±2 m)
# Taken from ICESat and CryoSat-2, see Kwok and Morison [2015])
mss = ATL07[beamStr+'/sea_ice_segments/geophysical/height_segment_mss'][:]
# Photon rate of the given segment
photon_rate = ATL07[beamStr+'/sea_ice_segments/stats/photon_rate'][:]
# Estimated background rate from sun angle, reflectance, surface slope
background_rate = ATL07[beamStr+'/sea_ice_segments/stats/backgr_calc'][:]
ATL07.close()
if numpy:
# list the variables you want to output here..
return along_track_dist, elev
else:
dF = pd.DataFrame({'elev':elev, 'lons':lons, 'lats':lats, 'ssh_flag':ssh_flag,
'quality_flag':quality,
'delta_time':delta_time,
'along_track_distance':along_track_distance,
'height_segment_id':height_segment_id,
'photon_rate':photon_rate,'background_rate':background_rate,
'datetime':tiso, 'mss': mss, 'seg_length':seg_length})
# Add the datetime string
#dFtimepd=pd.to_datetime(dFtime)
#dF['datetime'] = pd.Series(dFtimepd, index=dF.index)
# Filter out high elevation values
dF = dF[(dF['elev']<maxElev)]
# Reset row indexing
dF=dF.reset_index(drop=True)
return dF
def getATL10data(fileT, beam='gt1r', minFreeboard=0, maxFreeboard=10):
""" Pandas/numpy ATL10 reader
Written by <NAME>, June 2018 (<EMAIL>)
I've picked out the variables from ATL10 I think are of most interest to sea ice users,
but by no means is this an exhastive list.
See the xarray or dictionary readers to load in the more complete ATL10 dataset
or explore the hdf5 files themselves (I like using the app Panpoly for this) to see what else you might want
Args:
fileT (str): File path of the ATL10 dataset
beamStr (str): ICESat-2 beam (the number is the pair, r=strong, l=weak)
maxFreeboard (float): maximum freeboard (meters)
returns:
pandas dataframe
Versions:
v1: June 2018
v2: June 2020 - cleaned things up, changed the time function slightly to be consistent with Ellen's ATL07 reader.
"""
print('ATL10 file:', fileT)
f1 = h5py.File(fileT, 'r')
# Freeboards
freeboard=f1[beam]['freeboard_beam_segment']['beam_freeboard']['beam_fb_height'][:]
ssh_flag=f1[beam]['freeboard_beam_segment']['height_segments']['height_segment_ssh_flag'][:]
# ATL07 heights
height=f1[beam]['freeboard_beam_segment']['height_segments']['height_segment_height'][:]
# Freeboard confidence and freeboard quality flag
freeboard_confidence=f1[beam]['freeboard_beam_segment']['beam_freeboard']['beam_fb_confidence'][:]
freeboard_quality=f1[beam]['freeboard_beam_segment']['beam_freeboard']['beam_fb_quality_flag'][:]
# Delta time in gps seconds
delta_time = f1[beam]['freeboard_beam_segment']['beam_freeboard']['delta_time'][:]
# Along track distance from the equator
seg_x = f1[beam]['freeboard_beam_segment']['beam_freeboard']['seg_dist_x'][:]
# Height segment ID (10 km segments)
height_segment_id=f1[beam]['freeboard_beam_segment']['beam_freeboard']['height_segment_id'][:]
lons=f1[beam]['freeboard_beam_segment']['beam_freeboard']['longitude'][:]
lats=f1[beam]['freeboard_beam_segment']['beam_freeboard']['latitude'][:]
# Time since the start of the granule
deltaTimeRel=delta_time-delta_time[0]
# #Add this value to delta time parameters to compute full gps_seconds
atlas_epoch=f1['/ancillary_data/atlas_sdp_gps_epoch'][0]
gps_seconds = atlas_epoch + delta_time
## Use astropy to convert GPS time to UTC time
tiso=Time(gps_seconds,format='gps').utc.datetime
dF = | pd.DataFrame({'freeboard':freeboard, 'freeboard_quality':freeboard_quality, 'height':height, 'ssh_flag':ssh_flag, 'lon':lons, 'lat':lats, 'delta_time':delta_time,'deltaTimeRel':deltaTimeRel,
'height_segment_id':height_segment_id,'datetime': tiso, 'seg_x':seg_x}) | pandas.DataFrame |
# coding=utf-8
import pandas as pd
import numpy as np
import re
from matplotlib.ticker import FuncFormatter
def number_formatter(number, pos=None):
"""Convert a number into a human readable format."""
magnitude = 0
while abs(number) >= 1000:
magnitude += 1
number /= 1000.0
return '%.1f%s' % (number, ['', 'K', 'M', 'B', 'T', 'Q'][magnitude])
def cuenta_tipo_de_dato(df,tipo):
"""
Esta función crea la tabla con información sobre la cantidad de cada tipo de dato encontrado en el csv
==========
* Args:
- df: el data frame al que se le va a realizar el conteo del tipo de dato.
- tipo: El nombre del tipo de dato que estamos buscando.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
# Para encontrar el tipo de dato numérico
>>conteo_nuericos = cuenta_tipo_de_dato(df, 'numerico')
# Para encontrar el tipo de dato texto
>>conteo_texto = cuenta_tipo_de_dato(df, 'object')
"""
vars_type = df.dtypes
vars_type = pd.DataFrame(vars_type, columns = ['tipo'])
if tipo == 'numerico':
cantidad_tipo = len(vars_type.loc[vars_type["tipo"] == "int64"])
cantidad_tipo = cantidad_tipo + len(vars_type.loc[vars_type["tipo"] == "float64"])
else:
cantidad_tipo = len(vars_type.loc[vars_type["tipo"] == tipo])
return cantidad_tipo
def cuenta_nulos_por_columnas(df):
"""
Función que realiza una tabla con la cuenta de missing values por columna y obtiene la proporción que estos missing
values representan del total.
==========
* Args:
- df: el data frame al que se le va a realizar el conteo de los nulos por cada columna.
* Return:
- Data Frame: entrega el data frame que indica cuantos elementos nulos fueron encontrados en cada columna.
==========
Ejemplo:
>>faltates_por_columna = cuenta_nulos_por_columnas(df)
"""
valores_nulos = df.isnull().sum()
porcentaje_valores_nulos = 100 * df.isnull().sum() / len(df)
tabla_valores_nulos = pd.concat([valores_nulos, porcentaje_valores_nulos], axis=1)
tabla_valores_nulos_ordenada = tabla_valores_nulos.rename(
columns={0: 'Missing Values', 1: '% del Total'})
tabla_valores_nulos_ordenada = tabla_valores_nulos_ordenada[
tabla_valores_nulos_ordenada.iloc[:, 1] != 0].sort_values(
'% del Total', ascending=False).round(1)
print("El dataframe tiene " + str(df.shape[1]) + " columnas.\n"
"Hay " + str(tabla_valores_nulos_ordenada.shape[0]) +
" columnas que tienen NA's.")
return tabla_valores_nulos_ordenada
def CreaTablaConteoPorcentaje(df, nomColumna, booleanNA):
"""
Esta función crea la tabla con información sobre los conteos y el porcentaje al que corresponden del total de los datos.
==========
* Args:
- df: el data frame completo.
- nomColumna: El nombre de la columna sobre la que se quiere realizar la tabla.
- booleanNA: Indicador booleano que indica si se requiere que se muestren los NA's en la tabla.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
>>df = CreaTablaConteoPorcentaje(df, 'RESPUESTA', True)
"""
df_resultado = df[nomColumna].value_counts(dropna=booleanNA)
df_resultado = pd.DataFrame(data=df_resultado)
df_resultado = df_resultado[nomColumna].map('{:,}'.format)
df_resultado = pd.DataFrame(data=df_resultado)
#obteniendo los porcentajes
df_resultado['porcentaje'] = df[nomColumna].value_counts(dropna=booleanNA, normalize=True).mul(100).round(2).astype(str)+'%'
return df_resultado
def CreaTablaConteoPorcentaje_sin_stringformat(df, nomColumna, booleanNA):
"""
Esta función crea la tabla con información sobre los conteos y el porcentaje al que corresponden del total de los datos.
==========
* Args:
- df: el data frame completo.
- nomColumna: El nombre de la columna sobre la que se quiere realizar la tabla.
- booleanNA: Indicador booleano que indica si se requiere que se muestren los NA's en la tabla.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
>>df = CreaTablaConteoPorcentaje(df, 'RESPUESTA', True)
"""
df_resultado = df[nomColumna].value_counts(dropna=booleanNA)
df_resultado = pd.DataFrame(data=df_resultado)
#obteniendo los porcentajes
df_resultado['porcentaje'] = df[nomColumna].value_counts(dropna=booleanNA, normalize=True).mul(100).round(2).astype(str)+'%'
return df_resultado
def StringLowercase(df):
"""
Función cambiar todos los strings de un dataframe a lowercase
(columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringLowercase(df)
"""
### Columnas
DataFrameColumns = df.columns
for col in DataFrameColumns:
df.rename(columns={col:col.lower()}, inplace=True)
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].str.lower()
return df
def StringAcentos(df):
"""
Función para eliminar acentos, dieresis y eñes de los strings de un
dataframe (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringAcentos(df)
"""
### Columnas
df.columns = df.columns.str.replace('á', 'a')
df.columns = df.columns.str.replace('é', 'e')
df.columns = df.columns.str.replace('í', 'i')
df.columns = df.columns.str.replace('ó', 'o')
df.columns = df.columns.str.replace('ú', 'u')
df.columns = df.columns.str.replace('ü', 'u')
df.columns = df.columns.str.replace('ñ', 'n')
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')
return df
def StringStrip(df):
"""
Función para eliminar espacios al inicio y al final de los strings de un
dataframe (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringStrip(df)
"""
### Columnas
df.columns = [col.strip() for col in df.columns]
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].apply(lambda x: x.strip() if isinstance(x, str) else x)
return df
def StringEspacios(df):
"""
Función para eliminar espacios dobles (o mas) de los strings de un
dataframe (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringEspacios(df)
"""
### Columnas
df.columns = [re.sub(' +', ' ', col) for col in df.columns]
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].apply(lambda x: re.sub(' +', ' ', x) if isinstance(x, str) else x)
return df
def EstandarizaFormato(df):
"""
Función para estandarizar un dataframe: minúsculas, sin espacios en blanco,
sin signos de puntuación (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = EstandarizaFormato(df)
"""
### Minúsculas
df = StringLowercase(df)
### Acentos
df = StringAcentos(df)
### Quitamos espacios al principio y al final
df = StringStrip(df)
### Quitamos espacios
df = StringEspacios(df)
### Quita espacios en columnas
df.columns = df.columns.str.replace(' ', '_')
return df
def prepara_dataset(df):
"""
Esta función hace las correcciones al dataset.
==========
* Args:
- df: el data frame al que se le van a hacer las correcciones.
* Return:
- Data Frame: entrega el data frame corregido.
==========
Ejemplo:
# Para encontrar el tipo de dato numérico
>>df = prepara_dataset(df)
"""
# Estandarizamos formato
df = EstandarizaFormato(df)
# cambiamos los tipos de variable
df = df.astype({"año_hechos":'category', "mes_hechos":'category', "delito":'category', "categoria_delito":'category',"fiscalia":'category', "agencia":'category'})
# cambiamos la columna geo_point
new = df['geo_point'].str.split(",", n = 1, expand = True)
df["latitud"]= new[0]
df["longitud"]= new[1]
# cambiamos el tipo para latitud y longitud
df = df.astype({"latitud":'float64', "longitud":'float64'})
# Eliminamos la columna geo_point
#df.drop(columns =["geo_point"], inplace = True)
# Eliminamos la columna geo_shape
#df.drop(columns =["geo_shape"], inplace = True)
return df
def genera_profiling_de_numericos(df,lista_numericas,vars_type):
"""
Función que genera un perfilamiento para los datos numéricos.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento para variables numéricas.
- lista_numericas: una lista con el nombre de las variables que son de tipo numérico.
- vars_type: tabla generada por la función cuenta_tipo_de_dato de este mismo script.
* Return:
- Data Frame: Data Frame con el perfilamiento para las variables numéricas.
==========
Ejemplo:
>>vars_type = cuenta_tipo_de_dato(df)
# Extraemos el nombre de las variables numericas:
>>variables_int = vars_type.loc[vars_type["tipo"] == "int64"]
>>variables_float = vars_type.loc[vars_type["tipo"] == "float64"]
>>variables_numericas = variables_int.append(variables_float, ignore_index=True)
>>lista_numericas = list(variables_numericas['variable'])
# Generamos el perfilamiento para esas variables
>>perfilamiento_de_numericas = genera_profiling_de_numericos(df,lista_numericas,vars_type)
"""
# Obtenemos los estadísticos de la columna si es numérica
lista_perfilamiento_numerico = ['Tipo','Número de observaciones', 'Media', 'Desviación estándar',
'Cuartil 25%','Cuartil 50%','Cuartil 75%','Mínimo','Máximo',
'Número de observaciones únicas','Número de faltantes','Top1/veces/%',
'Top2/veces/%','Top3/veces/%'
,'Top4/veces/%','Top5/veces/%']
datos_dataframe_profiling_numericas = {'Métrica':lista_perfilamiento_numerico}
dataframe_profiling_numericas = pd.DataFrame(data=datos_dataframe_profiling_numericas)
for col in lista_numericas:
# tipo de dato
vars_type_num = pd.DataFrame(vars_type)
#vars_type_num
df_tipo = pd.DataFrame(data=vars_type_num.loc[vars_type_num["variable"] == col])
tipo_dato=df_tipo['tipo'][0]
#print(tipo_dato)
# Obtenemos las métricas relevantes
descr_col = df[col].describe()
descr_col = pd.DataFrame(descr_col)
descr_col['Métrica']=descr_col.index
descr_col.columns=['valor','Métrica']
# número de observaciones
medida = 'count'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
num_observaciones_num = metrica['valor'][0]
#print(num_observaciones_num)
# media
medida = 'mean'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
media_obs_num = metrica['valor'][0]
media_obs_num = media_obs_num.round(2)
#print(media_obs_num)
# desviacion estándar
medida = 'std'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
sd_obs_num = metrica['valor'][0]
sd_obs_num = sd_obs_num.round(2)
#print(sd_obs_num)
# cuartil 25
medida = '25%'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
cuant_25_obs_num = metrica['valor'][0]
cuant_25_obs_num = cuant_25_obs_num.round(2)
#print(cuant_25_obs_num)
# cuartil 50
medida = '50%'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
cuant_50_obs_num = metrica['valor'][0]
cuant_50_obs_num = cuant_50_obs_num.round(2)
#print(cuant_50_obs_num)
#cuant_50_obs_num = agua.quantile(q=0.25)
#print(cuant_50_obs_num)
# cuartil 75
medida = '75%'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
cuant_75_obs_num = metrica['valor'][0]
cuant_75_obs_num = cuant_75_obs_num.round(2)
#print(cuant_75_obs_num)
#cuant_75_obs_num = agua.quantile(q=0.25)
#print(cuant_75_obs_num)
# minimo
medida = 'min'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
minimo_obs_num = metrica['valor'][0]
minimo_obs_num = minimo_obs_num.round(2)
#print(minimo_obs_num)
# maximo
medida = 'max'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
maximo_obs_num = metrica['valor'][0]
maximo_obs_num = maximo_obs_num.round(2)
#print(maximo_obs_num)
# numero de observaciones unicas
num_obs_unicas_obs_num = df[col].nunique()
#print(num_obs_unicas_obs_num)
# Número de observaciones con valores faltantes
obs_faltantes_obs_num = df[col].isna().sum()
# top 5 observaciones repetidas
# df_resultado = df[col].value_counts(dropna=True)
# df_resultado = pd.DataFrame(df_resultado)
# df_resultado.columns=['conteo_top_5']
# df_resultado=df_resultado.sort_values('conteo_top_5', ascending = False)
#top5 = df_resultado.head(5)
#print(top5)
# generamos tabla para las modas
tabla_importantes = CreaTablaConteoPorcentaje(df,str(col),True)
tabla_importantes.columns = ['conteo','porcentaje']
top1 = tabla_importantes.index[0]
veces1 = list(tabla_importantes['conteo'])[0]
porcentaje1 = list(tabla_importantes['porcentaje'])[0]
datos_top1 = [top1,veces1,porcentaje1]
# #datos_top1 = list([tabla_importantes[0:1]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1]
if(len(tabla_importantes)>1):
top2 = tabla_importantes.index[1]
veces2 = list(tabla_importantes['conteo'])[1]
porcentaje2 = list(tabla_importantes['porcentaje'])[1]
datos_top2 = [top2,veces2,porcentaje2]
# datos_top2 = list([tabla_importantes[1:2]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2]
else:
datos_top2 = ['N/A','N/A','N/A']
if(len(tabla_importantes)>2):
top3 = tabla_importantes.index[2]
veces3 = list(tabla_importantes['conteo'])[2]
porcentaje3 = list(tabla_importantes['porcentaje'])[2]
datos_top3 = [top3,veces3,porcentaje3]
# # datos_top3 = list([tabla_importantes[2:3]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje','top3/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2,datos_top3]
else:
datos_top3 = ['N/A','N/A','N/A']
if(len(tabla_importantes)>3):
top4 = tabla_importantes.index[3]
veces4 = list(tabla_importantes['conteo'])[3]
porcentaje4 = list(tabla_importantes['porcentaje'])[3]
datos_top4 = [top4,veces4,porcentaje4]
# datos_top4 = list([tabla_importantes[3:4]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje','top3/veces/porcentaje'
# ,'top4/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2,datos_top3,
# datos_top4]
else:
datos_top4 = ['N/A','N/A','N/A']
if(len(tabla_importantes)>4):
top5 = tabla_importantes.index[4]
veces5 = list(tabla_importantes['conteo'])[4]
porcentaje5 = list(tabla_importantes['porcentaje'])[4]
datos_top5 = [top5,veces5,porcentaje5]
# datos_top5 = list([tabla_importantes[4:5]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje','top3/veces/porcentaje'
# ,'top4/veces/porcentaje','top5/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2,datos_top3,
# datos_top4,datos_top5]
else:
datos_top5 = ['N/A','N/A','N/A']
#print(obs_faltantes_obs_num)
datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
maximo_obs_num,num_obs_unicas_obs_num,obs_faltantes_obs_num,datos_top1,datos_top2,datos_top3,
datos_top4,datos_top5]
# datos_dataframe_profiling_numericas = {'metrica':lista_perfilamiento_numerico}
# dataframe_profiling_numericas = pd.DataFrame(data=datos_dataframe_profiling_numericas)
dataframe_profiling_numericas[col]=datos_variable
return dataframe_profiling_numericas
def genera_profiling_general(df):
"""
Función que genera la tabla con un perfilamiento general del data set, sin entrar al detalle por variable.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento general.
* Return:
- Data Frame: entrega el data frame con un perfilamiento general del data set.
==========
Ejemplo:
>>perfilamiento_general = genera_profiling_general(df)
"""
cuenta_de_variables = len(df.columns)
cuenta_observaciones = len(df)
total_celdas = cuenta_de_variables*cuenta_observaciones
# Contamos el tipo de datos del dataset
vars_type = df.dtypes
vars_type = pd.DataFrame(vars_type, columns = ['tipo'])
# Asignamos un valor para cada tipo
## Numéricas
cantidad_numericas = len(vars_type.loc[vars_type["tipo"] == "int64"])
cantidad_numericas = cantidad_numericas + len(vars_type.loc[vars_type["tipo"] == "float64"])
#print(cantidad_numericas)
## Fechas
cantidad_fecha = len(vars_type.loc[vars_type["tipo"] == "datetime64[ns]"])
#print(cantidad_fecha)
## Categoricas
cantidad_categoricas = len(vars_type.loc[vars_type["tipo"] == "category"])
#print(cantidad_categoricas)
## Texto
cantidad_texto = len(vars_type.loc[vars_type["tipo"] == "object"])
#print(cantidad_texto)
# Contamos los faltantes
nulos_totales = cuenta_nulos_por_columnas(df)['Missing Values'].sum()
#print(nulos_totales)
# Obtenemos el porcentaje de datos que son faltantes
nulos_porcentaje = ((nulos_totales/(total_celdas))*100).round(1).astype(str)+'%'
#print(nulos_porcentaje)
# Obtenemos el total de columnas duplicadas
ds_duplicados = df.duplicated(subset=None, keep='first')
ds_duplicados = pd.DataFrame(ds_duplicados,columns = ['duplicated'])
numero_de_duplicados = len(ds_duplicados.loc[ds_duplicados["duplicated"] == True])
#print(numero_de_duplicados)
# Obtenemos el porcentaje de duplicados
porcentaje_de_duplicados = str(((numero_de_duplicados/(total_celdas))*100))+'%'
#print(porcentaje_de_duplicados)
estadisticas = ['Total de variables','Conteo de observaciones','Total de celdas',
'Cantidad de variables numéricas','Cantidad de variables de fecha',
'Cantidad de variables categóricas', 'Cantidad de variables de texto',
'Valores faltantes','Porcentaje de valores faltantes',
'Renglones duplicados', 'Porcentaje de valores duplicados']
valores_estadisticas = [cuenta_de_variables,cuenta_observaciones,total_celdas,cantidad_numericas,
cantidad_fecha,cantidad_categoricas,cantidad_texto,nulos_totales,nulos_porcentaje,
numero_de_duplicados,porcentaje_de_duplicados]
valores = {'Estadísticas':estadisticas,'Resultado':valores_estadisticas}
df_perfilamiento_general = pd.DataFrame(data=valores)
return df_perfilamiento_general
def genera_profiling_de_categorias(df, lista_category,vars_type):
"""
Función que genera un perfilamiento para los datos categóricos.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento para variables categóricas.
- lista_category: una lista con el nombre de las variables que son de tipo categórico.
- vars_type: tabla generada por la función cuenta_tipo_de_dato de este mismo script.
* Return:
- Data Frame: Data Frame con el perfilamiento para las variables categóricas.
==========
Ejemplo:
>>vars_type = cuenta_tipo_de_dato(df)
# Extraemos el nombre de las variables categoricas:
>>variables_category = vars_type.loc[vars_type["tipo"] == "category"]
>>lista_category = list(variables_category['variable'])
# Generamos el perfilamiento para esas variables
>>profiling_de_categorias = genera_profiling_de_categorias(df,lista_category,vars_type)
"""
# Obtenemos los estadísticos de la columna si es catagorica
lista_perfilamiento_categorico = ['Tipo','Número de categorías', 'Número de observaciones',
'Observaciones nulas','% Observaciones nulas', 'Valores únicos',
'Moda1/veces/%','Moda2/veces/%','Moda3/veces/%']
datos_dataframe_profiling_categoricos = {'Métrica':lista_perfilamiento_categorico}
dataframe_profiling_categoricas = | pd.DataFrame(data=datos_dataframe_profiling_categoricos) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020/12/31 4:01 下午
# @File : compare_eval_result.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc :
import json
import pandas as pd
import requests
def collect_data(devfile="../data_root_dir/newcos/dev.json", eval_results="../output_root_dir/newcos/eval_results-newcos.json"):
"""
生成excel, 对比main.trainer.py生成的结果和devfile
:param devfile: 训练文件,格式是 [(text, keyword, labels),..]
:param eval_results: main.trainer.py生成的文件output文件中的json文件 [(predid, probality)]
:return:
"""
labels = ["是","否"]
with open(devfile) as f:
dev_data = json.load(f)
with open(eval_results) as f:
eval_data = json.load(f)
assert len(dev_data) == len(eval_data)
data = []
for d, res in zip(dev_data, eval_data):
one_data = {"text": d[0], "keyword":d[1], "label": d[2], "predict":labels[res[0]], "probability": format(res[1], "0.3f")}
data.append(one_data)
df = pd.DataFrame(data)
excel_file = "result2.xlsx"
writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{excel_file}")
return data
def compare_model(hostname='http://127.0.0.1:3314'):
"""
把收集到的数据,放到线上,对比一下准确率,不是和咱们自己的模型对比
:param hostname:
:return:
"""
url = hostname + '/lavector/rest/aspect-sentiment-batch'
headers = {'Content-Type': 'application/json'}
mydata = collect_data()
post_data = []
for d in mydata:
one = (d["text"], [d["keyword"]])
post_data.append(one)
data = {'channel': 'jd', 'data': post_data}
print(f"发送请求到{url}, 数据量{len(post_data)}")
res = requests.post(url, data=json.dumps(data), headers=headers)
result = res.json()
myresults = []
for r in result['result']:
keyword_list = list(r.keys())
pres_list = list(r.values())
assert len(keyword_list) == 1
assert len(pres_list) == 1
keyword = keyword_list[0]
pres = pres_list[0]
for k,v in pres.items():
if v == 1:
if k == "负向":
predict = "消极"
elif k =="正向":
predict = "积极"
else:
predict = "中性"
myresults.append([keyword,predict])
assert len(post_data) == len(myresults)
#保存到文件
newdata = []
for d, res in zip(mydata, myresults):
if res[0] != d["keyword"]:
print(f"这条数据预测回来的关键字不一致{res[0]}")
continue
d["online_predict"] = res[1]
newdata.append(d)
df = pd.DataFrame(newdata)
excel_file = "result_online.xlsx"
writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{excel_file}")
return newdata
def read_result_online():
"""
读取result_online.xlsx,比较
上文,关键字,下午的字数比
pretext + keyword + posttest
predict 表示的结果是75个字的,25+25+25的结果
online_predict 表示的结果是 15+30+20
:return:
"""
df = pd.read_excel("result_online.xlsx")
total = 0
predict_yes = 0
online_yes = 0
for index, row in df.iterrows():
label = row['label']
predict = row['predict']
online_predict = row['online_predict']
if predict != online_predict:
total += 1
if predict == label:
predict_yes +=1
elif online_predict == label:
online_yes +=1
else:
print("都没预测正确")
print(row)
print()
print(f"共有{total}个不一样, 75个字预测的结果是{predict_yes}, 线上65个字的预测结果是{online_yes}")
def dopredict(test_data, url="http://127.0.0.1:5000/api/predict_macbert", type=None):
"""
预测结果
:param test_data:
:return:
"""
if type:
data = {'data': test_data, 'type':type}
else:
data = {'data': test_data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
result = r.json()
return result
def download_data_and_compare(hostname=["http://192.168.50.139:8081/api/"], dirpath="/opt/lavector/absa/", jsonfile=["192.168.50.139_500_8081_0129.json"], isabsa=True, result_excel="result.xlsx", export_wrong_examples_excel="wrong.xlsx",correct_examples_excel= "correct.xlsx", type=None):
"""
从label_studio的某个hostname下载数据,然后预测,最后给出结果
:return:
"""
from absa_api import export_data
#从label-studio下载文
original_data = []
for hname, jfile in zip(hostname,jsonfile):
json_file = export_data(hostname=hname, dirpath=dirpath, jsonfile=jfile, proxy=False)
#加载从label-studio获取的到json文件
with open(json_file, 'r') as f:
data = json.load(f)
print(f"共收集主机{hname}的数据{len(data)} 条")
original_data.extend(data)
data = predict_comare_excel(original_data, result_excel=result_excel, export_wrong_examples_excel=export_wrong_examples_excel,correct_examples_excel= correct_examples_excel, isabsa=isabsa, type=type)
return data
def download_data_and_compare_same(hostname=["http://192.168.50.139:8081/api/","http://192.168.50.139:8080/api/"], dirpath="/opt/lavector/absa/", jsonfile=["192.168.50.139_500_8081_0129.json","192.168.50.139_500_8080_0129.json"], isabsa=True):
"""
对比相同的hostname的数据
从label_studio的某个hostname下载数据,然后预测,最后给出结果
:return:
"""
from absa_api import export_data
#从label-studio下载文
if len(hostname) != 2:
raise Exception("必须准2个hostname,里面包含相同的评估数据")
result = []
for hname, jfile in zip(hostname,jsonfile):
original_data = []
json_file = export_data(hostname=hname, dirpath=dirpath, jsonfile=jfile, proxy=False)
#加载从label-studio获取的到json文件
with open(json_file, 'r') as f:
data = json.load(f)
print(f"共收集主机{hname}的数据{len(data)} 条")
original_data.extend(data)
predict_data, excel_data = predict_comare_excel(original_data, isabsa=isabsa)
result.append([hname, predict_data, excel_data])
#对比2个人标注的数据
diffrent_data = []
print(f"对比host为 {result[0][0], result[1][0]}")
hname1, data1, pre1 = result[0]
hname2, data2, pre2 = result[1]
if len(data1) != len(data2):
raise Exception("两个人标注的数据总数不一致")
for d1, d2 in zip(data1,data2):
if d1[0] != d2[0]:
print("这条数据不一致")
else:
if d1[4] != d2[4]:
print(f"2个人标注的标签不一致")
print(d1[0])
print(d1[1])
print(d1[4])
print(d2[4])
one_data = {"text": d1[0], "keyword": d1[1], "P1_label": d1[4], "P2_label": d2[4], "location": d1[2:4]}
diffrent_data.append(one_data)
print(f"不一致的数据总量是{len(diffrent_data)}")
df = pd.DataFrame(diffrent_data)
writer = | pd.ExcelWriter("diffrent.xlsx", engine='xlsxwriter') | pandas.ExcelWriter |
from flask import *
import pandas as pd
import os
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
from flask_ngrok import run_with_ngrok
import numpy as np
app = Flask(__name__)
run_with_ngrok(app)
basedir = os.path.abspath(os.path.dirname(__file__))
DIR = 'static/data/'
bitcoin_time_series = pd.read_csv(DIR + "cmc_plus_gold_fixed.csv", parse_dates = ['date'])
gtrend_time_series = pd.read_csv(DIR + "daily_gtrend_data_cmc.csv", parse_dates = ['date'])
dataset = bitcoin_time_series.copy()
dataset['gtrend'] = gtrend_time_series['bitcoin']
train_dates = dataset['date']
del gtrend_time_series
dataset = dataset.drop('date', axis = 1)
dataset = dataset.drop('index', axis = 1)
scaler = MinMaxScaler().fit(dataset)
dataset_scaled = scaler.transform(dataset)
@app.route('/')
def index():
loss_adam = pd.read_csv('static/data/loss_ogru/loss_history.csv')
loss_adam_h = pd.read_csv('static/data/loss_ogru/loss_history_H.csv')
loss_adam_hg = pd.read_csv('static/data/loss_ogru/loss_history_HG.csv')
loss_adam_htrend = pd.read_csv('static/data/loss_ogru/loss_history_HTrend.csv')
loss_sgd = pd.read_csv('static/data/loss_ogru/loss_history_sgd.csv')
loss_sgd_h = pd.read_csv('static/data/loss_ogru/loss_history_sgd_H.csv')
loss_sgd_hg = pd.read_csv('static/data/loss_ogru/loss_history_sgd_HG.csv')
loss_sgd_htrend = | pd.read_csv('static/data/loss_ogru/loss_history_sgd_HTrend.csv') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: <NAME>, <NAME>
# @Date: Monday, July 16th 2018, 1:28:46 pm
"""
Raw Data Loader functions for PyBpod rig
Module contains one loader function per raw datafile
"""
import json
import logging
import wave
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
from typing import Union
import numpy as np
import pandas as pd
from iblutil.io import jsonable
from ibllib.io.video import assert_valid_label
from ibllib.misc import version
from ibllib.time import uncycle_pgts, convert_pgts
_logger = logging.getLogger('ibllib')
def trial_times_to_times(raw_trial):
"""
Parse and convert all trial timestamps to "absolute" time.
Float64 seconds from session start.
0---BpodStart---TrialStart0---------TrialEnd0-----TrialStart1---TrialEnd1...0---ts0---ts1---
tsN...absTS = tsN + TrialStartN - BpodStart
Bpod timestamps are in microseconds (µs)
PyBpod timestamps are is seconds (s)
:param raw_trial: raw trial data
:type raw_trial: dict
:return: trial data with modified timestamps
:rtype: dict
"""
ts_bs = raw_trial['behavior_data']['Bpod start timestamp']
ts_ts = raw_trial['behavior_data']['Trial start timestamp']
# ts_te = raw_trial['behavior_data']['Trial end timestamp']
def convert(ts):
return ts + ts_ts - ts_bs
converted_events = {}
for k, v in raw_trial['behavior_data']['Events timestamps'].items():
converted_events.update({k: [convert(i) for i in v]})
raw_trial['behavior_data']['Events timestamps'] = converted_events
converted_states = {}
for k, v in raw_trial['behavior_data']['States timestamps'].items():
converted_states.update({k: [[convert(i) for i in x] for x in v]})
raw_trial['behavior_data']['States timestamps'] = converted_states
shift = raw_trial['behavior_data']['Bpod start timestamp']
raw_trial['behavior_data']['Bpod start timestamp'] -= shift
raw_trial['behavior_data']['Trial start timestamp'] -= shift
raw_trial['behavior_data']['Trial end timestamp'] -= shift
assert(raw_trial['behavior_data']['Bpod start timestamp'] == 0)
return raw_trial
def load_bpod(session_path):
"""
Load both settings and data from bpod (.json and .jsonable)
:param session_path: Absolute path of session folder
:return: dict settings and list of dicts data
"""
return load_settings(session_path), load_data(session_path)
def load_data(session_path: Union[str, Path], time='absolute'):
"""
Load PyBpod data files (.jsonable).
Bpod timestamps are in microseconds (µs)
PyBpod timestamps are is seconds (s)
:param session_path: Absolute path of session folder
:type session_path: str, Path
:return: A list of len ntrials each trial being a dictionary
:rtype: list of dicts
"""
if session_path is None:
_logger.warning("No data loaded: session_path is None")
return
path = Path(session_path).joinpath("raw_behavior_data")
path = next(path.glob("_iblrig_taskData.raw*.jsonable"), None)
if not path:
_logger.warning("No data loaded: could not find raw data file")
return None
data = jsonable.read(path)
if time == 'absolute':
data = [trial_times_to_times(t) for t in data]
return data
def load_camera_frameData(session_path, camera: str = 'left', raw: bool = False) -> pd.DataFrame:
""" Loads binary frame data from Bonsai camera recording workflow.
Args:
session_path (StrPath): Path to session folder
camera (str, optional): Load FramsData for specific camera. Defaults to 'left'.
raw (bool, optional): Whether to return raw or parsed data. Defaults to False.
Returns:
parsed: (raw=False, Default)
pandas.DataFrame: 4 int64 columns: {
Timestamp, # float64 (seconds from session start)
embeddedTimeStamp, # float64 (seconds from session start)
embeddedFrameCounter, # int64 (Frame number from session start)
embeddedGPIOPinState # object (State of each of the 4 GPIO pins as a
# list of numpy boolean arrays
# e.g. np.array([True, False, False, False])
}
raw:
pandas.DataFrame: 4 int64 columns: {
Timestamp, # UTC ticks from BehaviorPC
# (100's of ns since midnight 1/1/0001)
embeddedTimeStamp, # Camera timestamp (Needs unclycling and conversion)
embeddedFrameCounter, # Frame counter (int)
embeddedGPIOPinState # GPIO pin state integer representation of 4 pins
}
"""
camera = assert_valid_label(camera)
fpath = Path(session_path).joinpath("raw_video_data")
fpath = next(fpath.glob(f"_iblrig_{camera}Camera.frameData*.bin"), None)
assert fpath, f"{fpath}\nFile not Found: Could not find bin file for cam <{camera}>"
rdata = np.fromfile(fpath, dtype=np.float64)
assert rdata.size % 4 == 0, "Dimension mismatch: bin file length is not mod 4"
rows = int(rdata.size / 4)
data = np.reshape(rdata.astype(np.int64), (rows, 4))
df_dict = dict.fromkeys(
["Timestamp", "embeddedTimeStamp", "embeddedFrameCounter", "embeddedGPIOPinState"]
)
df = pd.DataFrame(data, columns=df_dict.keys())
if raw:
return df
df_dict["Timestamp"] = (data[:, 0] - data[0, 0]) / 10_000_000 # in seconds from first frame
camerats = uncycle_pgts(convert_pgts(data[:, 1]))
df_dict["embeddedTimeStamp"] = camerats - camerats[0] # in seconds from first frame
df_dict["embeddedFrameCounter"] = data[:, 2] - data[0, 2] # from start
gpio = (np.right_shift(np.tile(data[:, 3], (4, 1)).T, np.arange(31, 27, -1)) & 0x1) == 1
df_dict["embeddedGPIOPinState"] = [np.array(x) for x in gpio.tolist()]
parsed_df = pd.DataFrame.from_dict(df_dict)
return parsed_df
def load_camera_ssv_times(session_path, camera: str):
"""
Load the bonsai frame and camera timestamps from Camera.timestamps.ssv
NB: For some sessions the frame times are in the first column, in others the order is reversed.
NB: If using the new bin file the bonsai_times is a float in seconds since first frame
:param session_path: Absolute path of session folder
:param camera: Name of the camera to load, e.g. 'left'
:return: array of datetimes, array of frame times in seconds
"""
camera = assert_valid_label(camera)
video_path = Path(session_path).joinpath('raw_video_data')
if next(video_path.glob(f'_iblrig_{camera}Camera.frameData*.bin'), None):
df = load_camera_frameData(session_path, camera=camera)
return df['Timestamp'].values, df['embeddedTimeStamp'].values
file = next(video_path.glob(f'_iblrig_{camera.lower()}Camera.timestamps*.ssv'), None)
if not file:
raise FileNotFoundError()
# NB: Numpy has deprecated support for non-naive timestamps.
# Converting them is extremely slow: 6000 timestamps takes 0.8615s vs 0.0352s.
# from datetime import timezone
# c = {0: lambda x: datetime.fromisoformat(x).astimezone(timezone.utc).replace(tzinfo=None)}
# Determine the order of the columns by reading one line and testing whether the first value
# is an integer or not.
with open(file, 'r') as f:
line = f.readline()
type_map = OrderedDict(bonsai='<M8[ns]', camera='<u4')
try:
int(line.split(' ')[1])
except ValueError:
type_map.move_to_end('bonsai')
ssv_params = dict(names=type_map.keys(), dtype=','.join(type_map.values()), delimiter=' ')
ssv_times = np.genfromtxt(file, **ssv_params) # np.loadtxt is slower for some reason
bonsai_times = ssv_times['bonsai']
camera_times = uncycle_pgts(convert_pgts(ssv_times['camera']))
return bonsai_times, camera_times
def load_embedded_frame_data(session_path, label: str, raw=False):
"""
Load the embedded frame count and GPIO for a given session. If the file doesn't exist,
or is empty, None values are returned.
:param session_path: Absolute path of session folder
:param label: The specific video to load, one of ('left', 'right', 'body')
:param raw: If True the raw data are returned without preprocessing, otherwise frame count is
returned starting from 0 and the GPIO is returned as a dict of indices
:return: The frame count, GPIO
"""
count = load_camera_frame_count(session_path, label, raw=raw)
gpio = load_camera_gpio(session_path, label, as_dicts=not raw)
return count, gpio
def load_camera_frame_count(session_path, label: str, raw=True):
"""
Load the embedded frame count for a given session. If the file doesn't exist, or is empty,
a None value is returned.
:param session_path: Absolute path of session folder
:param label: The specific video to load, one of ('left', 'right', 'body')
:param raw: If True the raw data are returned without preprocessing, otherwise frame count is
returned starting from 0
:return: The frame count
"""
if session_path is None:
return
label = assert_valid_label(label)
video_path = Path(session_path).joinpath('raw_video_data')
if next(video_path.glob(f'_iblrig_{label}Camera.frameData*.bin'), None):
df = load_camera_frameData(session_path, camera=label)
return df['embeddedFrameCounter'].values
# Load frame count
glob = video_path.glob(f'_iblrig_{label}Camera.frame_counter*.bin')
count_file = next(glob, None)
count = np.fromfile(count_file, dtype=np.float64).astype(int) if count_file else []
if len(count) == 0:
return
if not raw:
count -= count[0] # start from zero
return count
def load_camera_gpio(session_path, label: str, as_dicts=False):
"""
Load the GPIO for a given session. If the file doesn't exist, or is empty, a None value is
returned.
The raw binary file contains uint32 values (saved as doubles) where the first 4 bits
represent the state of each of the 4 GPIO pins. The array is expanded to an n x 4 array by
shifting each bit to the end and checking whether it is 0 (low state) or 1 (high state).
:param session_path: Absolute path of session folder
:param label: The specific video to load, one of ('left', 'right', 'body')
:param as_dicts: If False the raw data are returned boolean array with shape (n_frames, n_pins)
otherwise GPIO is returned as a list of dictionaries with keys ('indices', 'polarities').
:return: An nx4 boolean array where columns represent state of GPIO pins 1-4.
If as_dicts is True, a list of dicts is returned with keys ('indices', 'polarities'),
or None if the dictionary is empty.
"""
if session_path is None:
return
raw_path = Path(session_path).joinpath('raw_video_data')
label = assert_valid_label(label)
# Load pin state
if next(raw_path.glob(f'_iblrig_{label}Camera.frameData*.bin'), False):
df = load_camera_frameData(session_path, camera=label, raw=False)
gpio = np.array([x for x in df['embeddedGPIOPinState'].values])
if len(gpio) == 0:
return [None] * 4 if as_dicts else None
else:
GPIO_file = next(raw_path.glob(f'_iblrig_{label}Camera.GPIO*.bin'), None)
# This deals with missing and empty files the same
gpio = np.fromfile(GPIO_file, dtype=np.float64).astype(np.uint32) if GPIO_file else []
# Check values make sense (4 pins = 16 possible values)
if not np.isin(gpio, np.left_shift(np.arange(2 ** 4, dtype=np.uint32), 32 - 4)).all():
_logger.warning('Unexpected GPIO values; decoding may fail')
if len(gpio) == 0:
return [None] * 4 if as_dicts else None
# 4 pins represented as uint32
# For each pin, shift its bit to the end and check the bit is set
gpio = (np.right_shift(np.tile(gpio, (4, 1)).T, np.arange(31, 27, -1)) & 0x1) == 1
if as_dicts:
if not gpio.any():
_logger.error('No GPIO changes')
return [None] * 4
# Find state changes for each pin and construct a dict of indices and polarities for each
edges = np.vstack((gpio[0, :], np.diff(gpio.astype(int), axis=0)))
# gpio = [(ind := np.where(edges[:, i])[0], edges[ind, i]) for i in range(4)]
# gpio = [dict(zip(('indices', 'polarities'), x)) for x in gpio_] # py3.8
gpio = [{'indices': np.where(edges[:, i])[0],
'polarities': edges[edges[:, i] != 0, i]}
for i in range(4)]
# Replace empty dicts with None
gpio = [None if x['indices'].size == 0 else x for x in gpio]
return gpio
def load_settings(session_path: Union[str, Path]):
"""
Load PyBpod Settings files (.json).
[description]
:param session_path: Absolute path of session folder
:type session_path: str, Path
:return: Settings dictionary
:rtype: dict
"""
if session_path is None:
_logger.warning("No data loaded: session_path is None")
return
path = Path(session_path).joinpath("raw_behavior_data")
path = next(path.glob("_iblrig_taskSettings.raw*.json"), None)
if not path:
_logger.warning("No data loaded: could not find raw settings file")
return None
with open(path, 'r') as f:
settings = json.load(f)
if 'IBLRIG_VERSION_TAG' not in settings.keys():
settings['IBLRIG_VERSION_TAG'] = ''
return settings
def load_stim_position_screen(session_path):
path = Path(session_path).joinpath("raw_behavior_data")
path = next(path.glob("_iblrig_stimPositionScreen.raw*.csv"), None)
data = pd.read_csv(path, sep=',', header=None, error_bad_lines=False)
data.columns = ['contrast', 'position', 'bns_ts']
data['bns_ts'] = pd.to_datetime(data['bns_ts'])
return data
def load_encoder_events(session_path, settings=False):
"""
Load Rotary Encoder (RE) events raw data file.
Assumes that a folder called "raw_behavior_data" exists in folder.
Events number correspond to following bpod states:
1: correct / hide_stim
2: stim_on
3: closed_loop
4: freeze_error / freeze_correct
>>> data.columns
>>> ['re_ts', # Rotary Encoder Timestamp (ms) 'numpy.int64'
'sm_ev', # State Machine Event 'numpy.int64'
'bns_ts'] # Bonsai Timestamp (int) 'pandas.Timestamp'
# pd.to_datetime(data.bns_ts) to work in datetimes
:param session_path: [description]
:type session_path: [type]
:return: dataframe w/ 3 cols and (ntrials * 3) lines
:rtype: Pandas.DataFrame
"""
if session_path is None:
return
path = Path(session_path).joinpath("raw_behavior_data")
path = next(path.glob("_iblrig_encoderEvents.raw*.ssv"), None)
if not settings:
settings = load_settings(session_path)
if settings is None or settings['IBLRIG_VERSION_TAG'] == '':
settings = {'IBLRIG_VERSION_TAG': '100.0.0'}
# auto-detect old files when version is not labeled
with open(path) as fid:
line = fid.readline()
if line.startswith('Event') and 'StateMachine' in line:
settings = {'IBLRIG_VERSION_TAG': '0.0.0'}
if not path:
return None
if version.ge(settings['IBLRIG_VERSION_TAG'], '5.0.0'):
return _load_encoder_events_file_ge5(path)
else:
return _load_encoder_events_file_lt5(path)
def _load_encoder_ssv_file(file_path, **kwargs):
file_path = Path(file_path)
if file_path.stat().st_size == 0:
_logger.error(f"{file_path.name} is an empty file. ")
raise ValueError(f"{file_path.name} is an empty file. ABORT EXTRACTION. ")
return | pd.read_csv(file_path, sep=' ', header=None, error_bad_lines=False, **kwargs) | pandas.read_csv |
"""
Calculate L and T velocities from LL and LT backwalls.
Raises
------
IndefiniteVelocityError
Output
------
conf.d/30_block_velocities.yaml
velocity_L.png
velocity_T.png
"""
import logging
import numpy as np
import matplotlib.pyplot as plt
import arim
import arim.ray
from arim.im.das import lanczos_interpolation
from tqdm import tqdm
import pandas as pd
import yaml
import arim.models.block_in_immersion as bim
import numba
from . import common
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logging.getLogger("arim").setLevel(logging.WARNING)
class IndefiniteVelocityError(RuntimeError):
pass
def _time_of_flights_backwall_LL(conf):
probe = common.load_probe(conf)
examination_object = arim.io.examination_object_from_conf(conf)
tx_list, rx_list = arim.ut.fmc(probe.numelements)
# Backwall paths
backwall_paths = bim.backwall_paths(
examination_object.couplant_material,
examination_object.block_material,
probe.to_oriented_points(),
examination_object.frontwall,
examination_object.backwall,
)
path = backwall_paths["LL"]
arim.ray.ray_tracing_for_paths([path])
return path.rays.times
def _time_of_flights_backwall_LT(conf):
probe = common.load_probe(conf)
examination_object = arim.io.examination_object_from_conf(conf)
tx_list, rx_list = arim.ut.fmc(probe.numelements)
# Backwall paths
backwall_paths = bim.backwall_paths(
examination_object.couplant_material,
examination_object.block_material,
probe.to_oriented_points(),
examination_object.frontwall,
examination_object.backwall,
)
path = backwall_paths["LT"]
arim.ray.ray_tracing_for_paths([path])
return path.rays.times
@numba.njit(parallel=True)
def _wall_intensities_lanczos(scanlines, tof_arr, tx, rx, t0, invdt, a):
res = 0.0
for scan in range(scanlines.shape[0]):
tof = tof_arr[tx[scan], rx[scan]]
tof_idx = (tof - t0) * invdt
res += lanczos_interpolation(tof_idx, scanlines[scan], a)
return res
def _wall_intensities(frame, tof_arr):
return _wall_intensities_lanczos(
frame.scanlines,
tof_arr,
frame.tx,
frame.rx,
frame.time.start,
1 / frame.time.step,
a=3,
)
def _measure_l_vel(conf, frame, l_vel_range):
intensities = []
for l_vel in tqdm(l_vel_range, desc="L velocity"):
conf["block_material"]["longitudinal_vel"] = l_vel
tof = _time_of_flights_backwall_LL(conf)
intensities.append(_wall_intensities(frame, tof))
return pd.Series(intensities, index=l_vel_range)
def _measure_t_vel(conf, frame, t_vel_range):
intensities = []
for t_vel in tqdm(t_vel_range, desc="T velocity"):
conf["block_material"]["transverse_vel"] = t_vel
tof = _time_of_flights_backwall_LT(conf)
intensities.append(_wall_intensities(frame, tof))
return pd.Series(intensities, index=t_vel_range)
def measure_velocities_from_timetraces(dataset_name, save, noshow=False):
"""
maximise Sum_i(Envelope(TimeTrace[tof_backwall_i]))
"""
conf = arim.io.load_conf(dataset_name)
# conf["frontwall"]["numpoints"] = 1000
# conf["backwall"]["numpoints"] = 1000
root_dir = conf["root_dir"]
result_dir = conf["result_dir"]
frame = common.load_frame(
conf, apply_filter=True, expand=True, warn_if_fallback_vel=False
)
frame.scanlines = np.abs(frame.scanlines)
# === L velocity ===
# First pass
base_l_vel = (
conf["block_material"]["longitudinal_vel"] // 10
) * 10 # make round numbers
l_vel_range_1 = np.arange(base_l_vel - 100, base_l_vel + 100.1, 10.0)
intensities_1 = _measure_l_vel(conf, frame, l_vel_range_1)
l_vel_1_idx = intensities_1.values.argmax()
if l_vel_1_idx == 0 or l_vel_1_idx == (len(l_vel_range_1) - 1):
# we're on a bound, that's bad
raise IndefiniteVelocityError
# Second pass
l_vel_range_2 = np.arange(
l_vel_range_1[l_vel_1_idx - 1] + 1, l_vel_range_1[l_vel_1_idx + 1], 1.0
)
intensities_2 = _measure_l_vel(conf, frame, l_vel_range_2)
# agregate results
intensities = | pd.concat([intensities_1, intensities_2]) | pandas.concat |
import pandas as pd
import numpy as np
from texthero import representation
from texthero import preprocessing
from . import PandasTestCase
import doctest
import unittest
import string
import math
import warnings
from parameterized import parameterized
"""
Test doctest
"""
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(representation))
return tests
"""
Helper functions for the tests.
"""
def _tfidf(term, corpus, document_index):
idf = (
math.log((1 + len(corpus)) / (1 + len([doc for doc in corpus if term in doc])))
+ 1
)
tfidf_value = idf * corpus[document_index].count(term)
return tfidf_value
"""
Test functions in representation module in a
parameterized way.
"""
# Define valid inputs / outputs / indexes for different functions.
s_not_tokenized = pd.Series(["This is not tokenized!"])
s_tokenized = pd.Series([["Test", "Test", "TEST", "!"], ["Test", "?", ".", "."]])
s_tokenized_with_noncontinuous_index = pd.Series(
[["Test", "Test", "TEST", "!"], ["Test", "?", ".", "."]], index=[5, 7]
)
s_tokenized_output_index = pd.MultiIndex.from_tuples(
[(0, "!"), (0, "TEST"), (0, "Test"), (1, "."), (1, "?"), (1, "Test")],
)
s_tokenized_output_noncontinuous_index = pd.MultiIndex.from_tuples(
[(5, "!"), (5, "TEST"), (5, "Test"), (7, "."), (7, "?"), (7, "Test")],
)
s_tokenized_output_min_df_index = pd.MultiIndex.from_tuples([(0, "Test"), (1, "Test")],)
test_cases_vectorization = [
# format: [function_name, function, correct output for tokenized input above, dtype of output]
["count", representation.count, [1, 1, 2, 2, 1, 1], "int"],
[
"term_frequency",
representation.term_frequency,
[0.125, 0.125, 0.250, 0.250, 0.125, 0.125],
"float",
],
[
"tfidf",
representation.tfidf,
[_tfidf(x[1], s_tokenized, x[0]) for x in s_tokenized_output_index],
"float",
],
]
test_cases_vectorization_min_df = [
# format: [function_name, function, correct output for tokenized input above, dtype of output]
["count", representation.count, [2, 1], "int"],
["term_frequency", representation.term_frequency, [0.666667, 0.333333], "float",],
["tfidf", representation.tfidf, [2.0, 1.0], "float",],
]
class AbstractRepresentationTest(PandasTestCase):
"""
Class for representation test cases. Most tests are
parameterized, some are implemented individually
(e.g. to test a formula manually).
"""
"""
Vectorization.
"""
@parameterized.expand(test_cases_vectorization)
def test_vectorization_simple(
self, name, test_function, correct_output_values, int_or_float
):
if int_or_float == "int":
s_true = pd.Series(
correct_output_values, index=s_tokenized_output_index, dtype="int"
).astype(pd.SparseDtype(np.int64, 0))
else:
s_true = pd.Series(
correct_output_values, index=s_tokenized_output_index, dtype="float"
).astype(pd.SparseDtype("float", np.nan))
result_s = test_function(s_tokenized)
pd.testing.assert_series_equal(s_true, result_s)
@parameterized.expand(test_cases_vectorization)
def test_vectorization_noncontinuous_index_kept(
self, name, test_function, correct_output_values, int_or_float
):
if int_or_float == "int":
s_true = pd.Series(
correct_output_values,
index=s_tokenized_output_noncontinuous_index,
dtype="int",
).astype(pd.SparseDtype(np.int64, 0))
else:
s_true = pd.Series(
correct_output_values,
index=s_tokenized_output_noncontinuous_index,
dtype="float",
).astype(pd.SparseDtype("float", np.nan))
result_s = test_function(s_tokenized_with_noncontinuous_index)
pd.testing.assert_series_equal(s_true, result_s)
@parameterized.expand(test_cases_vectorization_min_df)
def test_vectorization_min_df(
self, name, test_function, correct_output_values, int_or_float
):
if int_or_float == "int":
s_true = pd.Series(
correct_output_values,
index=s_tokenized_output_min_df_index,
dtype="int",
).astype(pd.SparseDtype(np.int64, 0))
else:
s_true = pd.Series(
correct_output_values,
index=s_tokenized_output_min_df_index,
dtype="float",
).astype(pd.SparseDtype("float", np.nan))
result_s = test_function(s_tokenized, min_df=2)
pd.testing.assert_series_equal(s_true, result_s)
@parameterized.expand(test_cases_vectorization)
def test_vectorization_not_tokenized_yet_warning(self, name, test_function, *args):
with self.assertWarns(DeprecationWarning): # check raise warning
test_function(s_not_tokenized)
@parameterized.expand(test_cases_vectorization)
def test_vectorization_arguments_to_sklearn(self, name, test_function, *args):
try:
test_function(s_not_tokenized, max_features=1, min_df=1, max_df=1.0)
except TypeError:
self.fail("Sklearn arguments not handled correctly.")
"""
Individual / special tests.
"""
def test_tfidf_formula(self):
s = pd.Series(["Hi Bye", "Test Bye Bye"])
s = preprocessing.tokenize(s)
s_true_index = pd.MultiIndex.from_tuples(
[(0, "Bye"), (0, "Hi"), (1, "Bye"), (1, "Test")],
)
s_true = pd.Series(
[_tfidf(x[1], s, x[0]) for x in s_true_index], index=s_true_index
).astype("Sparse")
self.assertEqual(representation.tfidf(s), s_true)
"""
flatten.
"""
def test_flatten(self):
index = pd.MultiIndex.from_tuples(
[("doc0", "Word1"), ("doc0", "Word3"), ("doc1", "Word2")],
)
s = pd.Series([3, np.nan, 4], index=index)
s_true = pd.Series(
[[3.0, 0.0, np.nan], [0.0, 4.0, 0.0]], index=["doc0", "doc1"],
)
pd.testing.assert_series_equal(
representation.flatten(s), s_true, check_names=False
)
def test_flatten_fill_missing_with(self):
index = pd.MultiIndex.from_tuples(
[("doc0", "Word1"), ("doc0", "Word3"), ("doc1", "Word2")],
)
s = | pd.Series([3, np.nan, 4], index=index) | pandas.Series |
import re
import numpy as np
import pandas as pd
import pytest
from mock import patch
import woodwork as ww
from woodwork.logical_types import (
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
Integer,
IntegerNullable,
Ordinal,
PostalCode,
SubRegionCode,
)
from woodwork.type_sys.type_system import DEFAULT_INFERENCE_FUNCTIONS
from woodwork.type_sys.utils import (
_get_specified_ltype_params,
_is_categorical_series,
_is_numeric_series,
col_is_datetime,
list_logical_types,
list_semantic_tags,
)
from woodwork.utils import (
_convert_input_to_set,
_get_column_logical_type,
_infer_datetime_format,
_is_null_latlong,
_is_s3,
_is_url,
_is_valid_latlong_series,
_is_valid_latlong_value,
_parse_logical_type,
_reformat_to_latlong,
_to_latlong_float,
camel_to_snake,
get_valid_mi_types,
import_or_none,
import_or_raise,
)
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
def test_camel_to_snake():
test_items = {
"PostalCode": "postal_code",
"SubRegionCode": "sub_region_code",
"NaturalLanguage": "natural_language",
"Categorical": "categorical",
}
for key, value in test_items.items():
assert camel_to_snake(key) == value
def test_convert_input_to_set():
error_message = "semantic_tags must be a string, set or list"
with pytest.raises(TypeError, match=error_message):
_convert_input_to_set(int)
error_message = "test_text must be a string, set or list"
with pytest.raises(TypeError, match=error_message):
_convert_input_to_set({"index": {}, "time_index": {}}, "test_text")
error_message = "include parameter must contain only strings"
with pytest.raises(TypeError, match=error_message):
_convert_input_to_set(["index", 1], "include parameter")
semantic_tags_from_single = _convert_input_to_set("index", "include parameter")
assert semantic_tags_from_single == {"index"}
semantic_tags_from_list = _convert_input_to_set(["index", "numeric", "category"])
assert semantic_tags_from_list == {"index", "numeric", "category"}
semantic_tags_from_set = _convert_input_to_set(
{"index", "numeric", "category"}, "include parameter"
)
assert semantic_tags_from_set == {"index", "numeric", "category"}
@patch("woodwork.utils._validate_string_tags")
@patch("woodwork.utils._validate_tags_input_type")
def test_validation_methods_called(mock_validate_input_type, mock_validate_strings):
assert not mock_validate_input_type.called
assert not mock_validate_strings.called
_convert_input_to_set("test_tag", validate=False)
assert not mock_validate_input_type.called
_convert_input_to_set("test_tag", validate=True)
assert mock_validate_input_type.called
_convert_input_to_set(["test_tag", "tag2"], validate=False)
assert not mock_validate_strings.called
_convert_input_to_set(["test_tag", "tag2"], validate=True)
assert mock_validate_strings.called
def test_list_logical_types_default():
all_ltypes = ww.logical_types.LogicalType.__subclasses__()
df = list_logical_types()
assert set(df.columns) == {
"name",
"type_string",
"description",
"physical_type",
"standard_tags",
"is_default_type",
"is_registered",
"parent_type",
}
assert len(all_ltypes) == len(df)
default_types_set = {str(cls) for cls in DEFAULT_INFERENCE_FUNCTIONS.keys()}
listed_as_default = set(df[df["is_default_type"]]["name"])
assert listed_as_default == default_types_set
def test_list_logical_types_customized_type_system():
ww.type_system.remove_type("URL")
class CustomRegistered(ww.logical_types.LogicalType):
primary_dtype = "int64"
class CustomNotRegistered(ww.logical_types.LogicalType):
primary_dtype = "int64"
ww.type_system.add_type(CustomRegistered)
all_ltypes = ww.logical_types.LogicalType.__subclasses__()
df = list_logical_types()
assert len(all_ltypes) == len(df)
# Check that URL is unregistered
url = df[df.name == "URL"].iloc[0]
assert url.is_default_type
assert not url.is_registered
# Check that new registered type is present and shows as registered
index = df.name == "CustomRegistered"
assert index.any()
custom = df[index].iloc[0]
assert not custom.is_default_type
assert custom.is_registered
# Check that new unregistered type is present and shows as not registered
index = df.name == "CustomNotRegistered"
assert index.any()
custom = df[index].iloc[0]
assert not custom.is_default_type
assert not custom.is_registered
ww.type_system.reset_defaults()
def test_list_semantic_tags():
df = list_semantic_tags()
assert set(df.columns) == {"name", "is_standard_tag", "valid_logical_types"}
for name, valid_ltypes in df[["name", "valid_logical_types"]].values:
if name in ["passthrough", "ignore", "index"]:
assert valid_ltypes == "Any LogicalType"
elif name not in ["time_index", "date_of_birth"]:
assert isinstance(valid_ltypes, list)
for log_type in valid_ltypes:
assert name in log_type.standard_tags
def test_is_numeric_datetime_series(time_index_df):
assert _is_numeric_series(time_index_df["ints"], None)
assert _is_numeric_series(time_index_df["ints"], Double)
assert not _is_numeric_series(time_index_df["ints"], Categorical)
assert _is_numeric_series(time_index_df["ints"], Datetime)
assert not _is_numeric_series(time_index_df["strs"], None)
assert not _is_numeric_series(time_index_df["strs"], "Categorical")
assert not _is_numeric_series(time_index_df["strs"], Categorical)
assert _is_numeric_series(time_index_df["strs"], Double)
assert _is_numeric_series(time_index_df["strs"], "Double")
assert not _is_numeric_series(time_index_df["bools"], None)
assert not _is_numeric_series(time_index_df["bools"], "Boolean")
assert not _is_numeric_series(time_index_df["times"], None)
assert not _is_numeric_series(time_index_df["times"], Datetime)
assert not _is_numeric_series(time_index_df["letters"], None)
assert not _is_numeric_series(time_index_df["letters"], Double)
assert not _is_numeric_series(time_index_df["letters"], Categorical)
def test_get_ltype_params():
params_empty_class = _get_specified_ltype_params(Categorical)
assert params_empty_class == {}
params_empty = _get_specified_ltype_params(Categorical())
assert params_empty == {}
params_class = _get_specified_ltype_params(Datetime)
assert params_class == {}
params_null = _get_specified_ltype_params(Datetime())
assert params_null == {"datetime_format": None}
ymd = "%Y-%m-%d"
params_value = _get_specified_ltype_params(Datetime(datetime_format=ymd))
assert params_value == {"datetime_format": ymd}
def test_import_or_raise():
assert import_or_raise("pandas", "Module pandas could not be found") == pd
error = "Module nonexistent could not be found."
with pytest.raises(ImportError, match=error):
import_or_raise("nonexistent", error)
def test_import_or_none():
assert import_or_none("pandas") == pd
assert import_or_none("nonexistent") is None
def test_is_url():
assert _is_url("https://www.google.com/")
assert not _is_url("google.com")
def test_is_s3():
assert _is_s3("s3://test-bucket/test-key")
assert not _is_s3("https://woodwork-static.s3.amazonaws.com/")
def test_reformat_to_latlong_errors():
for latlong in [{1, 2, 3}, "{1, 2, 3}", "This is text"]:
error = f"LatLongs must either be a tuple, a list, or a string representation of a tuple. {latlong} does not fit the criteria."
with pytest.raises(ValueError, match=error):
_reformat_to_latlong(latlong)
error = re.escape(
"LatLongs must either be a tuple, a list, or a string representation of a tuple. (1,2) does not fit the criteria."
)
with pytest.raises(ValueError, match=error):
_reformat_to_latlong("'(1,2)'")
for latlong in [(1, 2, 3), "(1, 2, 3)"]:
error = re.escape(
"LatLong values must have exactly two values. (1, 2, 3) does not have two values."
)
with pytest.raises(ValueError, match=error):
_reformat_to_latlong(latlong)
error = re.escape(
"Latitude and Longitude values must be in decimal degrees. The latitude or longitude represented by 41deg52'54\" N cannot be converted to a float."
)
with pytest.raises(ValueError, match=error):
_reformat_to_latlong(("41deg52'54\" N", "21deg22'54\" W"))
def test_reformat_to_latlong():
simple_latlong = (1, 2)
assert _reformat_to_latlong((1, 2)) == simple_latlong
assert _reformat_to_latlong(("1", "2")) == simple_latlong
assert _reformat_to_latlong("(1,2)") == simple_latlong
# Check non-standard tuple formats
assert _reformat_to_latlong([1, 2]) == simple_latlong
assert _reformat_to_latlong(["1", "2"]) == simple_latlong
assert _reformat_to_latlong("[1, 2]") == simple_latlong
assert _reformat_to_latlong("1, 2") == simple_latlong
assert _reformat_to_latlong(None) is np.nan
assert _reformat_to_latlong((1, np.nan)) == (1, np.nan)
assert _reformat_to_latlong((np.nan, "1")) == (np.nan, 1)
# This is how csv and parquet will deserialize
assert _reformat_to_latlong("(1, nan)") == (1, np.nan)
assert _reformat_to_latlong("(NaN, 9)") == (np.nan, 9)
def test_reformat_to_latlong_list():
simple_latlong = [1, 2]
assert _reformat_to_latlong((1, 2), use_list=True) == simple_latlong
assert _reformat_to_latlong(("1", "2"), use_list=True) == simple_latlong
assert _reformat_to_latlong("(1,2)", use_list=True) == simple_latlong
assert _reformat_to_latlong([1, 2], use_list=True) == simple_latlong
assert _reformat_to_latlong(["1", "2"], use_list=True) == simple_latlong
assert _reformat_to_latlong("[1, 2]", use_list=True) == simple_latlong
assert _reformat_to_latlong("1, 2", use_list=True) == simple_latlong
assert _reformat_to_latlong((1, np.nan), use_list=True) == [1, np.nan]
assert _reformat_to_latlong((np.nan, "1"), use_list=True) == [np.nan, 1]
# This is how csv and parquet will deserialize
assert _reformat_to_latlong("[1, nan]", use_list=True) == [1, np.nan]
assert _reformat_to_latlong("[1, NaN]", use_list=True) == [1, np.nan]
def test_to_latlong_float():
assert _to_latlong_float(4) == 4.0
assert _to_latlong_float("2.2") == 2.2
assert _to_latlong_float(None) is np.nan
assert _to_latlong_float(np.nan) is np.nan
assert _to_latlong_float(pd.NA) is np.nan
error = re.escape(
"Latitude and Longitude values must be in decimal degrees. The latitude or longitude represented by [1, 2, 3] cannot be converted to a float."
)
with pytest.raises(ValueError, match=error):
_to_latlong_float([1, 2, 3])
def test_is_null_latlong():
assert _is_null_latlong(None)
assert _is_null_latlong(np.nan)
assert _is_null_latlong(pd.NA)
assert _is_null_latlong("None")
assert _is_null_latlong("nan")
assert _is_null_latlong("NaN")
assert not _is_null_latlong([None, 1, 3])
assert not _is_null_latlong("none")
assert not _is_null_latlong(0)
assert not _is_null_latlong(False)
def test_is_valid_latlong_value():
values = [
(1.0, 2.0),
np.nan,
[1.0, 2.0],
(np.nan, np.nan),
("a", 2.0),
(1.0, 2.0, 3.0),
None,
]
expected_values = [True, True, False, False, False, False, False]
for index, value in enumerate(values):
assert _is_valid_latlong_value(value) is expected_values[index]
def test_is_valid_latlong_value_koalas():
values = [
(1.0, 2.0),
np.nan,
[1.0, 2.0],
(np.nan, np.nan),
("a", 2.0),
(1.0, 2.0, 3.0),
None,
]
expected_values = [False, True, True, False, False, False, False]
for index, value in enumerate(values):
assert (
_is_valid_latlong_value(value, bracket_type=list) is expected_values[index]
)
def test_is_valid_latlong_series():
valid_series = pd.Series([(1.0, 2.0), (3.0, 4.0)])
invalid_series = pd.Series([(1.0, 2.0), (3.0, "4.0")])
assert _is_valid_latlong_series(valid_series) is True
assert _is_valid_latlong_series(invalid_series) is False
def test_get_valid_mi_types():
valid_types = get_valid_mi_types()
expected_types = [
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
Integer,
IntegerNullable,
Ordinal,
PostalCode,
SubRegionCode,
]
assert valid_types == expected_types
def test_get_column_logical_type(sample_series):
assert isinstance(
_get_column_logical_type(sample_series, None, "col_name"), Categorical
)
assert isinstance(
_get_column_logical_type(sample_series, Datetime, "col_name"), Datetime
)
def test_parse_logical_type():
assert isinstance(_parse_logical_type("Datetime", "col_name"), Datetime)
assert isinstance(_parse_logical_type(Datetime, "col_name"), Datetime)
ymd_format = Datetime(datetime_format="%Y-%m-%d")
assert _parse_logical_type(ymd_format, "col_name") == ymd_format
def test_parse_logical_type_errors():
error = "Invalid logical type specified for 'col_name'"
with pytest.raises(TypeError, match=error):
_parse_logical_type(int, "col_name")
def test_col_is_datetime():
inputs = [
pd.to_datetime(pd.Series(["2020-01-01", "2021-02-02", "2022-03-03"])),
pd.to_datetime(pd.Series([pd.NA, "2021-02-02", "2022-03-03"])),
pd.Series([1, 2, 3]),
pd.Series([pd.NA, 2, 3]),
pd.Series([1.0, 2.0, 3.0]),
pd.Series([pd.NA, 2.0, 3.0]),
pd.Series(["2020-01-01", "2021-02-02", "2022-03-03"]),
pd.Series([pd.NA, "2021-02-02", "2022-03-03"]),
pd.Series(["a", "b", "c"]),
pd.Series([pd.NA, "b", "c"]),
pd.Series([pd.NA, pd.NA, pd.NA]),
]
expected_values = [
True,
True,
False,
False,
False,
False,
True,
True,
False,
False,
False,
]
for input, expected in list(zip(inputs, expected_values)):
actual = col_is_datetime(input)
assert actual is expected
def test_infer_datetime_format(datetimes):
for series in datetimes:
fmt = _infer_datetime_format(series)
assert fmt == "%m/%d/%Y"
dt = pd.Series(
["3/11/2000 9:00", "3/11/2000 10:00", "3/11/2000 11:00", "3/11/2000 12:00"]
)
fmt = _infer_datetime_format(dt)
assert fmt == "%m/%d/%Y %H:%M"
# https://github.com/alteryx/woodwork/pull/1158
dt = pd.Series(["Tue 24 Aug 2021 01:30:48 AM"])
fmt = _infer_datetime_format(dt)
assert fmt == "%a %d %b %Y %H:%M:%S %p"
# https://github.com/alteryx/woodwork/pull/1158
dt = pd.Series(["Tuesday 24 Aug 2021 01:30:48 AM"])
fmt = _infer_datetime_format(dt)
assert fmt == "%A %d %b %Y %H:%M:%S %p"
def test_infer_datetime_format_all_null():
missing_data = [
pd.Series([None, None, None]),
| pd.Series([np.nan, np.nan, np.nan]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 16 09:15:54 2016
@author: <NAME>
"""
import pandas as pd
import numpy as np
###### Import packages needed for the make_vars functions
from scipy.interpolate import interp1d
import pywt
from skimage.filters.rank import entropy
from skimage.morphology import rectangle
from skimage.util import img_as_ubyte
def make_dwt_vars_cD(wells_df,logs,levels,wavelet):
wave= pywt.Wavelet(wavelet)
grouped = wells_df.groupby(['Well Name'])
new_df = pd.DataFrame()
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
cA_4, cD_4, cD_3, cD_2, cD_1 = pywt.wavedec(temp_data,wave,level=4,mode='symmetric')
dict_cD_levels = {1:cD_1, 2:cD_2, 3:cD_3, 4:cD_4}
for i in levels:
new_depth = np.linspace(min(depth),max(depth),len(dict_cD_levels[i]))
fA = interp1d(new_depth,dict_cD_levels[i],kind='nearest')
temp_df[log + '_cD_level_' + str(i)] = fA(depth)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_dwt_vars_cA(wells_df,logs,levels,wavelet):
wave= pywt.Wavelet(wavelet)
grouped = wells_df.groupby(['Well Name'])
new_df = pd.DataFrame()
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
for i in levels:
cA_cD = pywt.wavedec(temp_data,wave,level=i,mode='symmetric')
cA = cA_cD[0]
new_depth = np.linspace(min(depth),max(depth),len(cA))
fA = interp1d(new_depth,cA,kind='nearest')
temp_df[log + '_cA_level_' + str(i)] = fA(depth)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_entropy_vars(wells_df,logs,l_foots):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
image = np.vstack((temp_data,temp_data,temp_data))
image -= np.median(image)
image /= np.max(np.abs(image))
image = img_as_ubyte(image)
for l_foot in l_foots:
footprint = rectangle(l_foot,3)
temp_df[log + '_entropy_foot' + str(l_foot)] = entropy(image,footprint)[0,:]
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_gradient_vars(wells_df,logs,dx_list):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
for dx in dx_list:
temp_df[log + 'gradient_dx' + str(dx)] = np.gradient(temp_data,dx)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_av_vars(wells_df,logs,windows):
new_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import re
import datetime
def load_data_helper(path, table_names):
headers = {}
data = {}
student_sect = [name for name in table_names if "stud" in name]
# utf-8-sig: encoding with utf-8 without BOM
with open(path, "r", encoding="utf-8-sig") as f1:
tmp_header = ""
nextIsHeader = False
isStudTable = False
for line in f1.readlines():
line = line.replace("\n", "")
if(line in table_names):
tmp_header = line
data[tmp_header] = []
nextIsHeader = True
if(line in student_sect):
isStudTable = True
elif(nextIsHeader):
headers[tmp_header] = line.split(",")
nextIsHeader = False
else:
if(isStudTable):
line = _strip_en_name(line)
data[tmp_header].append(line.split(","))
tables = {}
for table_name in table_names:
tables[table_name] = (pd.DataFrame(
data=data[table_name], columns=headers[table_name]))
del data
return tables
def _strip_en_name(user_name):
regex = re.compile(".*?(\(.*?\))")
result = re.findall(regex, user_name)
try:
return user_name.replace(result[0], "").replace(" ", "").replace("\"", "")
except:
return user_name.replace("\"", "")
def to_datetime(x):
tempt = datetime.datetime.strptime(
x, '%Y-%m-%d %H:%M:%S %Z') + datetime.timedelta(hours=8)
return tempt.hour*100 + tempt.minute
def set_date_index(df, col):
df[col] = | pd.to_datetime(df[col]) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 21 21:26:14 2017
@author: skywalk
"""
#判断月份
import time
import tushare as ts
import pandas as pd
import numpy as np
dct2=pd.date_range(2010,2017)
dcmon=time.ctime()[4:7]
dc={"Jan":1,'Feb':1,'Mar':1,'Apr':0.5,'May':0.4,"Jun":0.2,'jul':1,'Aug':1,'Sep':0.8,'Oct':0.7,'Nov':0.5,"Dec":0.8}
print(dc[dcmon])
'''
一月 January (Jan)2. 二月 February (Feb)3. 三月 March (Mar)
4. 四月 April (Apr)5. 五月 May (May)
6. 六月 June (Jun)7. 七月 July (Jul)
8. 八月 August (Aug)9. 九月 September (Sep)
10. 十月 October (Oct) 11. 十一月 November (Nov)12. 十二月 December (Dec)
'''
dctime=time.ctime()
dctime=time.strftime('%Y%m%d')
print(dctime)
dcdata =ts.get_k_data('000001',index=True,start='2010-01-01')
dct=dcdata.date
dct1=pd.date_range('2010','2011')
rng=dct1
dcts= pd.Series(np.random.randn(len(rng)), index=rng)
dcts[0:10].to_period('M')
dcts[0:10].to_period('W')
'''
Series 才有to_period
'''
#dcdata.asfreq('M')
#dcdata.index=dcdata['date']
#dct3=dcdata.asfreq(freq='M')
#print(dct3)
#dctemp=dct
#for i in dct :
# print (i,)
print(dcdata.date[5:7])
#拿到月初和月末的数据
#dcdata['yuemo']=dcdata.date[5:7] !=dcdata.date.shift(-1)[5:7]
#dctemp=dcdata[dcdata.yuemo==True]
#for i in range(1,len(dcdata)):
# print (type(i),i,dcdata[i])
df = | pd.DataFrame({'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]})
print(df) | pandas.DataFrame |
# coding: utf-8
import pandas as pd
import matplotlib.pyplot as plt
import sys, os.path
if __name__ == '__main__':
result_path = sys.argv[1]
result_dir, filename = os.path.split(result_path)
n = int(filename.split('gram')[0][-1])
# sublex_id = int(sys.argv[2])
# list_length = int(sys.argv[3])
df_ngram = pd.read_csv(result_path)
min_frequency = 1
# df_ngram = df_ngram[df_ngram.sublex_id == sublex_id]
df_ngram = pd.read_csv(result_path, encoding='utf-8')
df_ngram = df_ngram[df_ngram.frequency >= min_frequency]
# df_ngram.loc[:,'decoded_value'] = df_ngram.decoded_value.str.replace(u'ä','a')
df_context = df_ngram.decoded_context.str.split('_', expand=True).rename(columns={0:'c1', 1:'c2'})
df_ngram = | pd.concat([df_ngram,df_context], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
##############################################################################
# simulation of earthquake catalogs using ETAS
#
# as described by Mizrahi et al., 2021
# <NAME>, <NAME>, <NAME>;
# The Effect of Declustering on the Size Distribution of Mainshocks.
# Seismological Research Letters 2021; doi: https://doi.org/10.1785/0220200231
##############################################################################
import pandas as pd
import numpy as np
import datetime as dt
import geopandas as gpd
from scipy.special import gammaincc, gammainccinv, gamma as gamma_func
from inversion import parameter_dict2array, to_days, branching_ratio, \
haversine, expected_aftershocks, upper_gamma_ext
from mc_b_est import simulate_magnitudes
from shapely.geometry import Polygon
def inverse_upper_gamma_ext(a, y):
# TODO: find a more elegant way to do this
if a > 0:
return gammainccinv(a, y/gamma_func(a))
else:
from pynverse import inversefunc
import warnings
from scipy.optimize import minimize
uge = (lambda x: upper_gamma_ext(a, x))
# numerical inverse
def num_inv(a, y):
def diff(x, xhat):
xt = upper_gamma_ext(a, x)
return (xt - xhat)**2
x = np.zeros(len(y))
for idx, y_value in enumerate(y):
res = minimize(diff, 1.0, args=(y_value), method='Nelder-Mead', tol=1e-6)
x[idx] = res.x[0]
return x
warnings.filterwarnings("ignore")
result = inversefunc(uge, y)
warnings.filterwarnings("default")
# where inversefunc was unable to calculate a result, calculate numerical approximation
nan_idxs = np.argwhere(np.isnan(result)).flatten()
if len(nan_idxs) > 0:
num_res = num_inv(a, y[nan_idxs])
result[nan_idxs] = num_res
return result
def simulate_aftershock_time(log10_c, omega, log10_tau, size=1):
# time delay in days
c = np.power(10, log10_c)
tau = np.power(10, log10_tau)
y = np.random.uniform(size=size)
return inverse_upper_gamma_ext(-omega, (1 - y) * upper_gamma_ext(-omega, c / tau)) * tau - c
def simulate_aftershock_place(log10_d, gamma, rho, mi, mc):
# x and y offset in km
d = np.power(10, log10_d)
d_g = d * np.exp(gamma * (mi - mc))
y_r = np.random.uniform(size=len(mi))
r = np.sqrt(np.power(1 - y_r, -1 / rho) * d_g - d_g)
phi = np.random.uniform(0, 2 * np.pi, size=len(mi))
x = r * np.sin(phi)
y = r * np.cos(phi)
return x, y
def simulate_aftershock_radius(log10_d, gamma, rho, mi, mc):
# x and y offset in km
d = np.power(10, log10_d)
d_g = d * np.exp(gamma * (mi - mc))
y_r = np.random.uniform(size=len(mi))
r = np.sqrt(np.power(1 - y_r, -1 / rho) * d_g - d_g)
return r
def simulate_background_location(latitudes, longitudes, background_probs, scale=0.1, n=1):
np.random.seed()
keep_idxs = background_probs >= np.random.uniform(size=len(background_probs))
sample_lats = latitudes[keep_idxs]
sample_lons = longitudes[keep_idxs]
choices = np.floor(np.random.uniform(0, len(sample_lats), size=n)).astype(int)
lats = sample_lats.iloc[choices] + np.random.normal(loc=0, scale=scale, size=n)
lons = sample_lons.iloc[choices] + np.random.normal(loc=0, scale=scale, size=n)
return lats, lons
def generate_background_events(polygon, timewindow_start, timewindow_end,
parameters, beta, mc, delta_m=0,
background_lats=None, background_lons=None,
background_probs=None, gaussian_scale=None
):
from inversion import polygon_surface, to_days
theta_without_mu = parameters["log10_k0"], parameters["a"], parameters["log10_c"], parameters["omega"], \
parameters["log10_tau"], parameters["log10_d"], parameters["gamma"], parameters["rho"]
area = polygon_surface(polygon)
timewindow_length = to_days(timewindow_end - timewindow_start)
# area of surrounding rectangle
min_lat, min_lon, max_lat, max_lon = polygon.bounds
coords = [[min_lat, min_lon],
[max_lat, min_lon],
[max_lat, max_lon],
[min_lat, max_lon]]
rectangle = Polygon(coords)
rectangle_area = polygon_surface(rectangle)
# number of background events
expected_n_background = np.power(10, parameters["log10_mu"]) * area * timewindow_length
n_background = np.random.poisson(lam=expected_n_background)
# generate too many events, afterwards filter those that are in the polygon
n_generate = int(np.round(n_background * rectangle_area / area * 1.2))
print(" number of background events needed:", n_background)
print(" generating", n_generate, "to throw away those outside the polygon")
# define dataframe with background events
catalog = | pd.DataFrame(None, columns=["latitude", "longitude", "time", "magnitude", "parent", "generation"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import glob
import HP
from multiprocessing import Pool
def merge_assessment_score(df):
new_df = pd.DataFrame()
for note in note_list:
tmp = df.loc[df['note_id'] == note]
score_list = tmp.score.unique()
if 'No score' in score_list:
if tmp.shape[0] < 2:
print("no assessment extracted")
else:
idx_list = tmp.loc[tmp['tag'] == 'INTERPRETATION'].index.values
all_idx = tmp.index.values
for idx in idx_list:
if idx == all_idx[0]:
print('INTERPRETATION is before assessment')
if idx == all_idx[-1]:
print('INTERPRETATION is the last extraction for this note')
else:
if tmp['score'][idx+1] == 'No score':
print('No score at idx: ', idx+1)
assess = tmp['assessment'][idx+1]
score = tmp['score'][idx]
tmp['score'][idx+1] = score
else:
print('Only INTERPRETATION tags. No empty assessment found')
new_df = new_df.append(tmp)
return new_df
def parallelize_dataframe(df, func, n_cores=4):
df_split = np.array_split(df, n_cores)
pool = Pool(n_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
if __name__ == '__main__':
print("Loading the data...")
# fnames = glob.glob(HP.output_path + 'set_proto_batch000_extracted.parquet') #TODO make name more general for github
# fnames.sort()
# full_df = pd.DataFrame()
# for f in fnames:
# df = pd.read_parquet(f)
# full_df = full_df.append(df)
full_df = | pd.read_parquet(HP.extracted_pros) | pandas.read_parquet |
#!/usr/bin/env python3
# coding: utf-8
import json
import logging
import os
import random
import arrow
import pandas as pd
import requests
from ._scrapers import (
scrape_credits,
scrape_albuminfo,
scrape_newest,
scrape_requirements,
scrape_songinfo,
scrape_stats,
scrape_top200,
SONGURL,
ALBUMURL
)
from ._utils import (
interpolate,
)
class Song(object):
"""This class represents a single song from a database.
It's not meant to be instantiated directly, but instances of it are
created on demand by the parent SongDB object.
Attributes:
id: The unique identifier of the song on Genie Music.
title: The title of the song.
artist: The artist of the song.
is_tracking: Whether the song is currently being tracked or not.
Please refer to the SongDB docstring for more information on
the tracking system.
credits: A dictionary containing the songwriting credits for the
song as reported by Genie.
minute: The minute at which the song will be fetched by the
parent database.
"""
def __init__(self, db, songid):
self.id = songid
self._info = db._songs[self.id]
self.title = self._info.get('title')
self.artist = self._info.get('artist')
self.agency = self._info.get('agency')
self.release_date = arrow.get(self._info.get('release_date'))
self._db_path = os.path.join(db.path, '{}.pkl'.format(self.id))
@property
def is_tracking(self):
return self._info['is_tracking']
@is_tracking.setter
def is_tracking(self, value):
self._info['is_tracking'] = value
@property
def credits(self):
return self._info['credits']
@credits.setter
def credits(self, value):
self._info['credits'] = value
@property
def minute(self):
if 'minute' not in self._info:
random.seed(self.id)
self._info['minute'] = random.randrange(1, 60)
return self._info['minute']
def fetch(self):
"""Fetches and stores the current total play count from Genie."""
# scraping code
try:
page = requests.get(SONGURL, {'xgnm': self.id})
except (requests.ConnectionError, requests.HTTPError):
logging.error('Request to genie.co.kr for song ID %s failed',
self.id)
return
markup = page.text
if not self.credits:
self.credits = scrape_credits(markup)
tstamp = arrow.get(page.headers.get('date'),
'ddd, DD MMM YYYY HH:mm:ss ZZZ')
stats = scrape_stats(markup)
# prepare the record to be stored
record = pd.DataFrame(stats, [pd.to_datetime(tstamp.datetime)])
# save new record in the database
self._db_append(record)
logging.info('Fetching completed: %s by %s',
self.title, self.artist)
def _get_stats(self):
data = self._db
if data.empty:
return data
else:
data = interpolate(data)
data = data.floordiv(1) # truncate the decimal part
data = (-data.diff(-1)).head(-1)
return data.tz_convert('Asia/Seoul').to_period()
def get_plays(self):
"""Returns a table of hourly plays data.
Returns:
A Pandas Series object with a hourly PeriodIndex. The values
represent the number of plays in the hour period. A record
such as
2018-09-18 11:00 3017
means that the song has been played 3017 times in the time
period from 11:00 to 11:59 of September 18, 2018. If not
enough data have been fetched to return such a table an
empty Series object will be returned. Times are given in
Korean Standard Time.
"""
return self._get_stats()['plays'].rename(self.title)
def get_listeners(self):
"""Returns a table of hourly listeners data.
Returns:
A Pandas Series object with a hourly PeriodIndex. The values
represent the number of new listeners in the hour period. A
record such as
2018-09-18 11:00 183
means that 183 people have listened to the song for the
first time in the time period from 11:00 to 11:59 of
September 18, 2018. If not enough data have been fetched to
return such a table an empty Series object will be returned.
Times are given in Korean Standard Time.
"""
return self._get_stats()['listeners'].rename(self.title)
def _db_append(self, record):
db = self._db.append(record, sort=True)
db = db.drop_duplicates()
db.to_pickle(self._db_path)
@property
def _db(self):
return pd.read_pickle(self._db_path)
class SongDB(object):
"""This class represents a single database.
It provides methods to access the streaming data and keep the
database up to date. Songs in the database, represented by Song
objects, can be accessed either by iterating on the SongDB instance
or by indexing, using their song ID.
When creating a new database, it is advised to call the init_db()
function found in this module, which will also return a SongDB
instance to access the newly created database.
The database is designed to fetch the total play count of the songs
it keeps track of every hour, while adding new songs by looking at
the hourly Genie Top 200. These operations, executed by the fetch()
and update() methods respectively, need to be automated by the user
of the package through a daemon, cron jobs or similar. A server
module might be added to the package at a later date.
To avoid overloading the Genie servers with requests, the database
isn't designed to fetch streaming data for all the songs at the same
time. Rather, a minute is assigned algorithmically to every song and
the fetch() method will only retrieve data for the songs allotted to
the minute at which the method is called. For example, the song
뚜두뚜두 (DDU-DU DDU-DU) by BLACKPINK is scheduled to be fetched at
the 17th minute of every hour, and will be only fetched if the
fetch() method of the SongDB instance is called at that time.
In order to prevent the average load to exceed one request per
second, a limited number of songs can be tracked at any given time
and when the database exceeds that size a number of songs stops
being tracked further through a call to the prune() method.
Attributes:
path: the path to the directory where the database files are stored.
quota: the maximum number of songs the database can track at any
given moment. It's currently hardcoded to 3540, but it can be
overridden at runtime and it will be configurable in a
future release.
tracking: the number of songs that are currently being tracked.
"""
def __init__(self, path):
"""Returns a new SongDB object.
Args:
path: the path to the directory where the file structure of
the database is located. Use init_db() to initialize a
new database.
Returns:
A SongDB instance pointing to the database found in the path.
Raises:
FileNotFoundError: a file or directory required by the
database has not been found.
"""
self.path = path
self.quota = 3540 # TODO make it configurable in the settings
self._json_path = os.path.join(self.path, 'songs.json')
self._blacklist_path = os.path.join(self.path, 'blacklist.json')
self._songs = {}
self.blacklist = []
self._cache = {}
self.load()
def __getitem__(self, key):
if key not in self._cache:
self._cache[key] = Song(self, key)
return self._cache[key]
def __iter__(self):
for songid in self._songs:
yield self[songid]
def __len__(self):
return len(self._songs)
def __contains__(self, item):
return item in self._songs
def is_tracking(self, songid):
"""Tells if the provided song ID is currently being tracked."""
try:
return self[songid].is_tracking
except KeyError:
return False
def count_tracking(self):
"""Returns the number of songs currently being tracked."""
return len([song for song in self if song.is_tracking])
def prune(self, n):
"""Stops n currently tracking songs from being tracked.
The songs are chosen based on their streaming performance.
Specifically, the n songs with the least average plays/hour in
the last 10 days will stop being tracked. The are not removed
from the database, and their tracking can be resumed if they are
found again in the hourly Top 200 in a future call
to SongDB.update().
Args:
n: the number of songs to be pruned.
"""
# rank the song ids by streams in the last 10 days
performance = {}
for song in self:
streams = song.get_plays().to_timestamp().last('10D').mean()
performance[song.id] = streams
for songid in sorted(performance, key=performance.get)[:n]:
self[songid].is_tracking = False
logging.info('Disabled tracking of %d songs', min(n, performance))
def add_from_songid(self, songid):
"""Fetches metadata and adds the song provided to the database."""
songinfo = scrape_songinfo(songid)
self.add_from_songinfo(songinfo)
def add_from_songinfo(self, songinfo):
"""Adds to the database the song with the metadata provided.
Args:
songinfo: a dictionary with keys 'id', 'title', 'artist',
'release_date' and 'agency'.
"""
self._songs[songinfo['id']] = {'title': songinfo['title'],
'artist': songinfo['artist'],
'release_date':
songinfo['release_date'].for_json(),
'is_tracking': True,
'credits': {},
'agency': songinfo['agency']}
db_path = os.path.join(self.path, '{}.pkl'.format(songinfo['id']))
| pd.DataFrame(columns=['plays', 'listeners'], dtype=int) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = | Index(['a', 'b', 'c']) | pandas.Index |
import pandas as pd
from fbprophet import Prophet
import json
import dataframe
import sys
import argparse
import topologia
from Utilities.utilities import discretize_data_quantile
import traceback
# MANDAR A BASE, O N_BINS, A VARIAVEL ALVO, QUATIDADE DE ANOS, TIPO DE DISCRETIZAÇÃO
def discretizacao_BDCompleta(n_bins,type_discretize, mes, base, alvo, ano_projecao):
dicano = dict()
base=pd.DataFrame(base).drop(['DATA'], axis=1)
topologia.node_list=base.columns
nodes=list(topologia.node_list)
nodes.remove(alvo)
Data,faixas = discretize_data_quantile(base,n_bins,type_discretize)
#print("faixas = ", faixas)
#print('Data = ', Data)
#--------------------------------------
#virá como parametro alvo e anos de projecao (lembrando que deverar
# ter os anos de validacao)
dicano[alvo] = str(ano_projecao)
#------------------------------------------------
k=0
for ano in range(0,len(Data),12):
dicio=dict()
m=0
for mes in range(ano,(ano+12)):
dict_local=dict()
for i in range(len(nodes)):
dict_local[nodes[i]] = int(Data[nodes[i]][mes])
dicio['MES_'+str(m)]=dict_local
m=m+1
dicano['ANO_'+str(k)]=dicio
k = k+1
return dicano
def new_data(ano,data):
meses=ano*12
data['DATA'] = pd.DatetimeIndex(data['DATA'])
nomes=list(data.columns)
dfs=[]
for column in range(1,len(nomes)):
col=data[['DATA',data.columns[column]]]
dfs.append(col)
new_dfs=[]
for data in dfs:
cols=data.columns
data=data.rename(columns={cols[0]: 'ds', cols[1]:'y'})
new_dfs.append(data)
#FAZ A PREVISÃO DE DATA A PARTIR DA PRIMEIRA VARIAVEL
my_model=Prophet(interval_width=0.95)
my_model.fit(new_dfs[0])
future_dates0 = my_model.make_future_dataframe(periods=meses, freq='MS')
forecast0 = my_model.predict(future_dates0)
forecast0=forecast0[['ds']]
forecast0=forecast0.rename(columns={'ds':'DATA'})
dt= | pd.DataFrame(forecast0['DATA']) | pandas.DataFrame |
#-*- coding: utf-8 -*-
# 阈值寻优
import numpy as np
import pandas as pd
inputfile = '../data/water_heater.xls' # 输入数据路径,需要使用Excel格式
n = 4 # 使用以后四个点的平均斜率
threshold = pd.Timedelta(minutes=5) # 专家阈值
data = pd.read_excel(inputfile)
data[u'发生时间'] = pd.to_datetime(data[u'发生时间'], format='%Y%m%d%H%M%S')
data = data[data[u'水流量'] > 0] # 只要流量大于0的记录
def event_num(ts):
d = data[u'发生时间'].diff() > ts # 相邻时间作差分,比较是否大于阈值
return d.sum() + 1 # 这样直接返回事件数
dt = [ | pd.Timedelta(minutes=i) | pandas.Timedelta |
#!/usr/bin/env python3
import pdb
import pandas as pd
from pylru import lrudecorator
import seaborn as sns
BII_URL = 'http://ipbes.s3.amazonaws.com/weighted/' \
'historical-BIIAb-npp-country-1880-2014.csv'
@lrudecorator(10)
def get_raw_bii_data():
return pd.read_csv(BII_URL)
def findt(ss):
rval = [None] * len(ss)
rval[0] = True
for i in range(1, len(ss)):
rval[i] = not | pd.isnull(ss.iloc[i]) | pandas.isnull |
import pandas as pd
selectionSundays = {'2002':'03/10/2002','2003':'03/16/2003',
'2004':'03/14/2004','2005':'03/13/2005',
'2006':'03/12/2006','2007':'03/11/2007',
'2008':'03/16/2008','2009':'03/15/2009',
'2010':'03/14/2010','2011':'03/13/2011',
'2012':'03/11/2012','2013':'03/17/2013',
'2014':'03/16/2014','2015':'03/15/2015',
'2016':'03/13/2016','2017':'03/12/2017',
'2018':'03/11/2018','2019':'3/17/2019'}
selectionSundayList = ['03/10/2002','03/16/2003','03/14/2004','03/13/2005','03/12/2006','03/11/2007','03/16/2008',
'03/15/2009','03/14/2010','03/13/2011','03/11/2012',
'03/17/2013','03/16/2014','03/15/2015','03/13/2016','03/12/2017','03/11/2018', '3/17/2019']
from datetime import timedelta
days_to_subtract=7
d = timedelta(days=days_to_subtract)
# Just a consistent way of processing files. Ignore the fact that the local variables say 2014
def read_data(teams_file,games_file,madness_teams_file):
teams_2014 = pd.read_csv(teams_file,header=None)
teams_2014.columns=["number","name"]
games_2014 = pd.read_csv(games_file,header=None)
games_2014.columns = ["notsure1","date","team1","H_A_N1","points1","team2","H_A_N2","points2"]
team1_names = teams_2014.copy()
team1_names.columns = ["team1","team1_name"]
team1_names.set_index('team1',inplace=True)
games_2014 = games_2014.set_index("team1").join(team1_names,how='inner').reset_index()
team2_names = teams_2014.copy()
team2_names.columns = ["team2","team2_name"]
team2_names.set_index('team2',inplace=True)
games_2014 = games_2014.set_index("team2").join(team2_names,how='inner').reset_index()
games_2014["date"] = pd.to_datetime(games_2014["date"],format="%Y%m%d")
games_2014["team1_name"] = games_2014["team1_name"].str.replace(" ","")
games_2014["team2_name"] = games_2014["team2_name"].str.replace(" ","")
prev_len = len(games_2014)
madness_teams = pd.read_csv(madness_teams_file,header=None)
madness_teams.columns=["name"]
games_2014["team1_madness"] = 0
games_2014["team2_madness"] = 0
mask = games_2014.team1_name.isin(list(madness_teams["name"]))
games_2014.loc[mask,"team1_madness"] = 1
mask = games_2014.team2_name.isin(list(madness_teams["name"]))
games_2014.loc[mask,"team2_madness"] = 1
games_2014.reset_index()
for selection_sunday in selectionSundayList:
games = games_2014.loc[games_2014["date"] <= pd.to_datetime(selection_sunday,format="%m/%d/%Y")-d]
remaining_games = games_2014.loc[games_2014["date"] > | pd.to_datetime(selection_sunday,format="%m/%d/%Y") | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2019 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
"""(DEPRECATED) Compares the results of a batch of wltp_db vehicles against phase-1b-alpha Heinz's tool.
* Run as Test-case to generate results for sample-vehicles.
* Run it as cmd-line to compare with Heinz's results.
"""
import functools as fnt
import glob
import logging
import math
import os
import re
import unittest
from collections import OrderedDict
from unittest.case import skipIf
import numpy as np
import numpy.testing as npt
import pandas as pd
from wltp import utils
from wltp.experiment import Experiment
from .goodvehicle import goodVehicle
overwrite_old_results = True # NOTE: Set 'False' to UPDATE sample-results or run main() (assuming they are ok).
force_rerun = False
mydir = os.path.dirname(__file__)
samples_dir = "wltp_db"
vehs_data_inp_fname = "wltp_db_vehicles.csv"
vehs_data_out_fname = "wltp_db_vehicles_out.csv"
gened_fname_regex = r".*wltp_db_vehicles-(\d+).csv"
heinz_fname_regex = r".*heinz-(\d+).csv"
gened_fname_glob = "wltp_db_vehicles-*.csv"
trans_fname_glob = "trans-wltp_db_vehicles-*.csv"
driver_weight = 70
"For calculating unladen_mass."
encoding = "UTF-8"
# desc_columns_to_print = ['mean', 'std', 'min', 'max']
def _init_logging(loglevel=logging.DEBUG):
logging.basicConfig(level=loglevel)
logging.getLogger().setLevel(level=loglevel)
log = logging.getLogger(__name__)
return log
log = _init_logging()
@fnt.lru_cache()
def _read_vehicles_inp():
df = | pd.read_csv(vehs_data_inp_fname, encoding=encoding, index_col=0) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.