prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Author: <NAME> <<EMAIL>>
from time import time
from datetime import datetime
import os, sys
import numpy as np
from scipy.stats.mstats import gmean
import scipy.spatial.distance as ssd
import scipy.cluster.hierarchy as hc
import pandas as pd
import pickle
import gensim
import spacy
import scispacy
from sklearn import linear_model
from sklearn.manifold import TSNE
import glob
import re
#plotting tools
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.ticker as ticker
from matplotlib import transforms
from mpl_toolkits.mplot3d import Axes3D
from wordcloud import WordCloud
from cycler import cycler
import seaborn as sns
"""
The :mod:'model_utilities' module contains many functions that are useful for processing and graphing the topic modeling results.
It also includes the SilentPrinting and Timing classes that you use with the 'with' statement. SilentPrinting stops printing to
the terminal, but is unable to silence MALLET.
"""
def plot_model_comparison(paths, x_column, y_columns, x_label, y_label, graph_title, show=True, fig_save_path=None, csv_save_path=None):
# Use this to combine multiple CompareModels csv outputs into 1 with a mean and standard devation
# Main variables
data_dict = {}
mean_sd_dict = {}
x_data = None
# Setup data_dict y_column keys
for column in y_columns:
data_dict[column] = {}
# Read each file in paths
for path in paths:
df = pd.read_csv(path)
# Setup data_dict x keys and values if not yet done
if x_data is None:
x_data = df[x_column].tolist()
for column in y_columns:
for x in x_data:
data_dict[column][x] = []
# Add file's data to list in data_dict
for column in y_columns:
data = df[column].tolist()
for x in x_data:
data_dict[column][x].append(data.pop(0))
# Calculate mean and Standard deviation for each y value
for y_column in data_dict:
mean_sd_dict[y_column] = {'X':[], 'MEAN':[], 'STDV':[]}
for x in data_dict[y_column]:
mean_sd_dict[y_column]['X'].append(x)
mean_sd_dict[y_column]['MEAN'].append(np.mean(data_dict[y_column][x]))
mean_sd_dict[y_column]['STDV'].append(np.std(data_dict[y_column][x]))
# Plot graph of x VS y with standard deviation for error bars
plt.figure(figsize=(12, 8))
for y_column in mean_sd_dict:
plt.errorbar(mean_sd_dict[y_column]['X'], mean_sd_dict[y_column]['MEAN'],
yerr=mean_sd_dict[y_column]['STDV'], label=y_column,
marker='o', markersize=5, capsize=5, markeredgewidth=1)
plt.title(graph_title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(title='Models', loc='best')
# Saving figure if fig_save_path is entered
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
# Saving a CSV file of the means and standard deviations if csv_save_path is entered
if csv_save_path is not None:
dataframe_dict= {}
for y_column in y_columns:
dataframe_dict[x_column] = mean_sd_dict[y_column]['X']
dataframe_dict[" ".join([y_column, "MEAN"])] = mean_sd_dict[y_column]['MEAN']
dataframe_dict[" ".join([y_column, "STDV"])] = mean_sd_dict[y_column]['STDV']
data = pd.DataFrame.from_dict(dataframe_dict)
data.to_csv(csv_save_path, index=False)
if show:
plt.show()
plt.close() # Closes and deletes graph to free up memory
def dominant_doc_topic_df(model, nlp_data, num_keywords=10):
topics_df = pd.DataFrame()
for i, row_list in enumerate(model[nlp_data.gensim_lda_input()]):
row = row_list[0] if model.per_word_topics else row_list
row = sorted(row, key=lambda x:(x[1]), reverse=True)
for j, (topic_num, prop_topic) in enumerate(row):
if j==0:
wp = model.show_topic(topic_num, topn=num_keywords)
topic_keywords = ", ".join([word for word, prop in wp])
topics_df = topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
topics_df.columns = ["Dominant Topic", "Contribution", "Topic Keywords"]
contents = pd.Series(nlp_data.get_token_text())
topics_df = pd.concat([topics_df, contents], axis=1)
topics_df = topics_df.reset_index()
topics_df.columns = ["Document", "Dominant Topic", "Contribution", "Topic Keywords", "Document Tokens"]
topics_df["Document"] += 1
topics_df["Dominant Topic"] = 1 + topics_df["Dominant Topic"].astype(int)
return topics_df
def best_doc_for_topic(dom_top_df):
sorted_df = pd.DataFrame()
dom_top_df_grouped = dom_top_df.groupby('Dominant Topic')
for i, grp in dom_top_df_grouped:
sorted_df = pd.concat([sorted_df, grp.sort_values(['Contribution'], ascending=False).head(1)], axis=0)
sorted_df.reset_index(drop=True, inplace=True)
sorted_df.columns = ["Best Document", "Topic Number", "Contribution", "Topic Keywords", "Document Tokens"]
sorted_df = sorted_df[["Topic Number", "Contribution", "Topic Keywords", "Best Document", "Document Tokens"]]
return sorted_df
def plot_doc_token_counts_old(dom_top_df=None, nlp_data=None, show=True, fig_save_path=None, bins=None):
if dom_top_df is not None:
doc_lens = [len(doc) for doc in dom_top_df["Document Tokens"]]
if nlp_data is not None:
doc_lens = np.array(nlp_data.sklearn_lda_input().sum(axis=1)).flatten()
fig = plt.figure(figsize=(12,7), dpi=160)
plt.hist(doc_lens, bins = 500, color='navy')
# Prints texts on the graph at x=400
x = 400
plt.text(x, 120, "Documents")
text = plt.text(x, 110, "Total Tokens")
plt.text(x, 100, "Mean")
plt.text(x, 90, "Median")
plt.text(x, 80, "Stdev")
plt.text(x, 70, "1%ile")
plt.text(x, 60, "99%ile")
#This is for offsetting the data so it will appear even
canvas = fig.canvas
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
# This prints the statistics
plt.text(x, 120, " : " + str(len(doc_lens)), transform=t)
plt.text(x, 110, " : " + str(np.sum(doc_lens)), transform=t)
plt.text(x, 100, " : " + str(round(np.mean(doc_lens), 1)), transform=t)
plt.text(x, 90, " : " + str(round(np.median(doc_lens), 1)), transform=t)
plt.text(x, 80, " : " + str(round(np.std(doc_lens),1)), transform=t)
plt.text(x, 70, " : " + str(np.quantile(doc_lens, q=0.01)), transform=t)
plt.text(x, 60, " : " + str(np.quantile(doc_lens, q=0.99)), transform=t)
plt.gca().set(xlim=(0, 500), ylabel='Number of Documents', xlabel='Document Token Count')
plt.tick_params(size=16)
plt.xticks(np.linspace(0,500,11))
plt.title('Distribution of Document Token Counts', fontdict=dict(size=22))
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def plot_doc_token_counts(dom_top_df=None, nlp_data=None, show=True, fig_save_path=None, bins=None):
if dom_top_df is not None:
doc_lens = [len(doc) for doc in dom_top_df["Document Tokens"]]
if nlp_data is not None:
doc_lens = np.array(nlp_data.sklearn_lda_input().sum(axis=1)).flatten()
if bins is None:
bins = 50 * math.ceil(max(doc_lens)/50)
if max(doc_lens) - np.quantile(doc_lens, q=0.99) < bins * 0.2:
bins += 50 * math.ceil((bins*0.25)/50)
bin_list = [i+1 for i in range(bins)]
fig = plt.figure(figsize=(12,7), dpi=160)
plt.hist(doc_lens, bins = bin_list, color='navy', rwidth=None)
# Prints texts on the graph at position x
x = 0.79
t = fig.transFigure
plt.text(x, 0.88, "Documents", transform=t)
text = plt.text(x, 0.85, "Total Tokens", transform=t)
plt.text(x, 0.82, "Mean", transform=t)
plt.text(x, 0.79, "Median", transform=t)
plt.text(x, 0.76, "Stdev", transform=t)
plt.text(x, 0.73, "1%ile", transform=t)
plt.text(x, 0.70, "99%ile", transform=t)
#This is for offsetting the data so it will appear even
canvas = fig.canvas
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
# This prints the statistics
plt.text(x, 0.88, " : " + str(len(doc_lens)), transform=t)
plt.text(x, 0.85, " : " + str(np.sum(doc_lens)), transform=t)
plt.text(x, 0.82, " : " + str(round(np.mean(doc_lens), 1)), transform=t)
plt.text(x, 0.79, " : " + str(round(np.median(doc_lens), 1)), transform=t)
plt.text(x, 0.76, " : " + str(round(np.std(doc_lens),1)), transform=t)
plt.text(x, 0.73, " : " + str(np.quantile(doc_lens, q=0.01)), transform=t)
plt.text(x, 0.70, " : " + str(np.quantile(doc_lens, q=0.99)), transform=t)
plt.gca().set(xlim=(0, bins), ylabel='Number of Documents', xlabel='Document Token Count')
plt.tick_params(size=16)
#plt.xticks(np.linspace(0,500,11))
plt.title('Distribution of Document Token Counts', fontdict=dict(size=22))
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def create_wordcloud(topic, model, nlp_data, seed=100, num_w=20, fig_dpi=400, topic_names=None,
show=True, fig_save_path=None, colormap='tab10', horizontal_pref=0.8):
cloud = WordCloud(background_color='white', width=1000, height=1000, max_words=num_w, colormap=colormap,
prefer_horizontal=horizontal_pref, random_state=seed)
topics = model.show_topics(num_topics=-1, num_words=num_w, formatted=False)
cloud.generate_from_frequencies(dict(topics[topic-1][1]), max_font_size=300)
plt.figure(figsize=(2,2), dpi=fig_dpi)
plt.imshow(cloud)
if topic_names is None:
plt.title('Topic {}'.format(topic+1), fontdict=dict(size=16), pad=10)
else:
plt.title(topic_names[topic+1], fontdict=dict(size=16), pad=10)
plt.axis('off')
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show: # It shows ugly but the actual save file looks good.
plt.show()
plt.close()
def create_multi_wordclouds(n_topics, n_horiz, model, nlp_data, seed=100, num_w=20, fig_dpi=400, topic_names=None, title_font=15,
show=True, fig_save_path=None, colormap='tab10', horizontal_pref=0.8):
if isinstance(n_topics, int):
topics_list = list(range(n_topics))
else:
topics_list = [i-1 for i in n_topics]
n_topics = len(topics_list)
cloud = WordCloud(background_color='white', width=1000, height=1000, max_words=num_w, colormap=colormap,
prefer_horizontal=horizontal_pref, random_state=seed)
topics = model.show_topics(num_topics=-1, num_words=num_w, formatted=False)
x_len = n_horiz
y_len = math.ceil(n_topics/n_horiz)
fig, axes = plt.subplots(y_len, x_len, figsize=(2*x_len,2*y_len), dpi=fig_dpi,
sharex=True, sharey=True, squeeze=False, constrained_layout=True)
for i, ax in enumerate(axes.flatten()):
if i < n_topics:
fig.add_subplot(ax)
topic_words = dict(topics[topics_list[i]][1])
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
if topic_names is None:
plt.gca().set_title('Topic {}'.format(topics_list[i]+1), fontdict=dict(size=title_font), pad=10)
else:
plt.gca().set_title(topic_names[topics_list[i]+1], fontdict=dict(size=title_font), pad=10)
plt.gca().axis('off')
else:
fig.add_subplot(ax)
plt.gca().axis('off')
#plt.suptitle('Topic Wordclouds', fontdict=dict(size=16))
plt.axis('off')
plt.margins(x=0, y=0)
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def color_doc_topics(model, doc, nlp_data, max_chars=120, dpi=150, show=True, fig_save_path=None, topics=5, min_phi=None,
topic_names=None, incl_perc=False, highlight=False, highlight_topic_names=False):
# The output file looks better than show
colors = [color for name, color in mcolors.TABLEAU_COLORS.items()]
if topics > 10: # There are only 10 colors so the max is 10. Change above to add more colors for more topics
topics = 10
# This is for the lemmetazation step
doc_prep = gensim.utils.simple_preprocess(str(doc), deacc=True, min_len=2, max_len=30)
#This is for processing the string while retaining the original characters since simple_preprocess removes punctuation and accents
#It splits the string by ' ' and then individually processes the chunks into tokens and finds there location in the string
#Finally a list is made with strings that directly translate to tokens and preserves non-token strings
doc_raw_split = str(doc).split()
doc_raw_word_list = []
raw_token_dict = {}
for string_piece in doc_raw_split:
tokens = gensim.utils.simple_preprocess(str(string_piece), deacc=True, min_len=1, max_len=30)
working_string = gensim.utils.deaccent(string_piece.lower())
output_string = string_piece
for token in tokens:
if token in working_string:
start_index = working_string.find(token)
end_index = start_index + len(token)
front_part = output_string[:start_index]
token_part = output_string[start_index:end_index]
output_string = output_string[end_index:]
working_string = working_string[end_index:]
if len(front_part) > 0:
doc_raw_word_list.append(front_part)
raw_token_dict[front_part] = False
doc_raw_word_list.append(token_part)
raw_token_dict[token_part] = token
if len(output_string) > 0: # This saves strings that do not become tokens, False prevents them from being in the wordset
doc_raw_word_list.append(output_string)
raw_token_dict[output_string] = False
# This is for finding all index locations of the tokens within the original raw string list
wordset = set([raw_token_dict[word] for word in raw_token_dict.keys() if raw_token_dict[word]])
doc_index_dict = {}
for word in wordset:
word_indexes = [i for i, w in enumerate(doc_raw_word_list) if raw_token_dict[w] == word]
doc_index_dict[word] = word_indexes
token_index_dict = {}
token_list = []
# This is for lemmitazation of the text and linking the lemma to its original token index locations
nlp = spacy.load(nlp_data.spacy_lib, disable=['parser','ner'])
allowed_postags = ['NOUN', 'ADJ', 'VERB','ADV']
for word in doc_prep:
if word not in nlp_data.stopwords:
token = nlp(word)[0]
if token.pos_ in allowed_postags and token.lemma_ not in ['-PRON-']:
token_list.append(token.lemma_)
if token.lemma_ in token_index_dict:
token_index_dict[token.lemma_] = list(set(token_index_dict[token.lemma_] + doc_index_dict[word]))
else:
token_index_dict[token.lemma_] = doc_index_dict[word]
for token in token_index_dict:
token_index_dict[token] = sorted(set(token_index_dict[token]))
# This processes the n-grams based on the model's n-gram settings and combines index locations for the n-gram
processed_tokens = nlp_data.process_ngrams_([token_list])[0]
final_token_dict = {}
for token in processed_tokens:
if token not in final_token_dict:
final_token_dict[token] = []
split_tokens = token.split('_')
for split_token in split_tokens:
final_token_dict[token].append(token_index_dict[split_token].pop(0))
# This is where the text is processed by the model and the top n models are saved
topic_perc, wordid_topics, wordid_phivalues = model.get_document_topics(
nlp_data.gensim_lda_input([" ".join(processed_tokens)])[0], per_word_topics=True,
minimum_probability=0.001, minimum_phi_value=min_phi)
topic_perc_sorted = sorted(topic_perc, key=lambda x:(x[1]), reverse=True)
top_topics = [topic[0] for i, topic in enumerate(topic_perc_sorted) if i < topics]
top_topics_color = {top_topics[i]:i for i in range(len(top_topics))}
word_dom_topic = {}
# This links the individual word lemmas to its best topic within available topics
for wd, wd_topics in wordid_topics:
for topic in wd_topics:
if topic in top_topics:
word_dom_topic[model.id2word[wd]] = topic
break
# Links the index location to a color
index_color_dict = {}
for token in final_token_dict:
if token in word_dom_topic:
for i in final_token_dict[token]:
index_color_dict[i] = top_topics_color[word_dom_topic[token]]
# this is for assembling the individual lines of the graph based on character length and position of punctuation
add_lines = math.ceil(len(top_topics_color)/5)
last_index = len(doc_raw_word_list) - 1
line_len = 0
line_num = 0
doc_raw_lines = [[]]
no_space_list = [".", ",", ")", ":", "'"]
for i, word in enumerate(doc_raw_word_list):
word_len = len(word)
if line_len + word_len < max_chars or (word in no_space_list and line_len <= max_chars):
if word == '(':
if i != last_index:
if (line_len + word_len + len(doc_raw_word_list[i+1]) + 1 >= max_chars
and doc_raw_word_list[i+1] not in no_space_list):
line_num += 1
line_len = 0
doc_raw_lines.append([])
else:
line_num += 1
line_len = 0
doc_raw_lines.append([])
line_len += word_len + 1
doc_raw_lines[line_num].append(i)
line_num += 1
# This creates the figure and subplots
lines = line_num + add_lines
fig, axes = plt.subplots(lines + 1, 1, figsize=(math.ceil(max_chars/8), math.ceil(lines/2)), dpi=dpi,
squeeze=True, constrained_layout=True)
axes[0].axis('off')
plt.axis('off')
indent = 0
# This is the loop for drawing the text
for i, ax in enumerate(axes):
t = ax.transData
canvas = ax.figure.canvas
if i > add_lines:
x = 0.06
line = i - add_lines - 1
for index in doc_raw_lines[line]:
word = doc_raw_word_list[index]
if word[-1] == "(":
pass
elif index != last_index:
if doc_raw_word_list[index+1][0] not in no_space_list:
word = word + " "
if index in index_color_dict:
color = colors[index_color_dict[index]]
else:
color = 'black'
if highlight:
bbox=dict(facecolor=color, edgecolor=[0,0,0,0], pad=0, boxstyle='round')
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color='black',
transform=t, fontweight=700)
if color != 'black':
text.set_bbox(bbox)
else:
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
ax.axis('off')
elif i < add_lines:
x = 0.06
if i == 0:
word = "Topics: "
color = 'black'
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
indent = ex.width
else:
color = 'black'
text = ax.text(x, 0.5, "", horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=indent, units='dots')
for num, index in enumerate(range(i*5, len(top_topics))):
if num < 5:
if topic_names is None:
word = "Topic {}, ".format(top_topics[index]+1)
else:
word = topic_names[top_topics[index]+1] + ", "
if incl_perc:
topic_perc_dict = dict(topic_perc_sorted)
word = "{:.1f}% ".format(topic_perc_dict[top_topics[index]]*100) + word
color = colors[top_topics_color[top_topics[index]]]
if highlight_topic_names:
bbox=dict(facecolor=color, edgecolor=[0,0,0,0], pad=0, boxstyle='round')
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color='black',
transform=t, fontweight=700)
if color != 'black':
text.set_bbox(bbox)
else:
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
ax.axis('off')
else:
ax.axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.suptitle('Document Colored by Top {} Topics'.format(topics),
fontsize=22, y=0.95, fontweight=700)
# This saves and/or shows the plot. Note: Saved file looke better than the drawn plot
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def docs_per_topic(model, nlp_data=None, doc_list=None, corpus=None):
if corpus is None:
if doc_list is None:
corpus = nlp_data.gensim_lda_input()
else:
corpus = nlp_data.process_new_corpus(doc_list)['gensim']
num_topics = model.num_topics
dominant_topics = []
topic_percantages = []
for i, corp in enumerate(corpus):
topic_perc, wordid_topics, wordidphvalues = model.get_document_topics(
corp, per_word_topics=True)
dominant_topic = sorted(topic_perc, key = lambda x: x[1], reverse=True)[0][0]
dominant_topics.append((i, dominant_topic))
topic_percantages.append(topic_perc)
df = | pd.DataFrame(dominant_topics, columns=['Document', 'Dominant Topic']) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
# matplotlib.use('pgf')
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
import matplotlib.lines as mlines
c = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
formatting = {
"DUN": {"color": c[0], "linestyle": "-", "marker": "o", "label": "DUN"},
"ensemble": {"color": c[2], "linestyle": "-.", "marker": "o", "label": "Ensemble"},
"dropout": {"color": c[3], "linestyle": ":", "marker": "o", "label": "Dropout"},
"SGD": {"color": c[1], "linestyle": "--", "marker": "o", "label": "SGD"},
"DUN (exact)": {"color": c[6], "linestyle": (0, [6, 2, 1, 2, 1, 2]), "marker": "o", "label": "DUN (exact)"},
"dropout (0.3)": {"color": c[7], "linestyle": ":", "marker": "p", "label": "Dropout (0.3)"},
"flow ": {"color": c[8], "linestyle": "-", "marker": "o", "label": "flow"},
}
text_width = 5.50107 # in --> Confirmed with template explanation
golden_ratio = (5**.5 - 1) / 2
show_range = 5
ylim = 3
def errorfill(x, y, yerr, color=None, alpha_fill=0.3, line_alpha=1, ax=None, lw=1, linestyle='-', fill_linewidths=0.2):
ax = ax if ax is not None else plt.gca()
# yerr *= 100
if color is None:
color = ax._get_lines.color_cycle.next()
if np.isscalar(yerr) or len(yerr) == len(y):
ymin = y - yerr
ymax = y + yerr
elif len(yerr) == 2:
ymin, ymax = yerr
plt_return = ax.plot(x, y, color=color, lw=lw, linestyle=linestyle, alpha=line_alpha)
ax.fill_between(x, ymax, ymin, color=color, alpha=alpha_fill, linewidths=fill_linewidths)
return plt_return
# Visualize the result
def visualize_uncertainty(savePath, gt_x, gt_y, xdata, mean, var):
plt.figure(dpi=200)
var = np.sqrt(var)
plt.plot(gt_x, gt_y, 'ok', ms=1)
plt.plot(xdata, mean, '-', color='g')
plt.plot(xdata, var, '-', color='r')
plt.ylim([-ylim, ylim])
plt.xlim([-show_range, show_range])
mean = np.array(mean)
var = np.array(var)
plt.fill_between(xdata, mean - var, mean + var, color='g', alpha=0.1)
plt.tight_layout()
plt.savefig(savePath, format='png', bbox_inches='tight')
def plot_err_props(df, conditions, add_cond, ax, formatting, **kwargs):
filt = (df[list(conditions)] == | pd.Series(conditions) | pandas.Series |
import unittest
import pandas as pd
import nlu
from memory_profiler import memory_usage
import tests.test_utils as t
import tests.nlu_hc_tests.secrets as sct
class PandasTests(unittest.TestCase):
def test_memory_batching_benchmark(self):
test = PandasTests().test_data_batching
ms = []
for i in range(10):
m = memory_usage((test ))
ms.append(sum(m))
d = pd.DataFrame(m)
d = pd.DataFrame(ms)
ax = d.plot(title=f'Not Optimized, mean of mem used in {10} runs = {sum(ms)/len(ms)} and max mem usage = {max(ms)}in 10K rows').figure.savefig('10_loop-10k_bert_NOT_optimized_v2.png')
# 10k = 3min 13s optimized
# 10k = 3 min 37s NOT optimized
def test_data_batching(self):
test = PandasTests().test_py_arrow
ms = []
data = {'text':[]}
for i in range(10000):
data['text'].append("Hello WOrld I like RAM")
d = pd.DataFrame(data)
print('GONNA PREDICT!')
df = nlu.load('ner').predict(d)
for c in df.columns:print(df)
def test_pyarrow_memory(self):
test = PandasTests().test_py_arrow
ms = []
for i in range(10):
m = memory_usage((test ))
ms.append(sum(m))
d = pd.DataFrame(m)
d = pd.DataFrame(ms)
ax = d.plot(title=f'Not Optimized, mean of mem used in {10} runs = {sum(ms)/len(ms)} and max mem usage = {max(ms)}in 10K rows').figure.savefig('10_loop-10k_bert_NOT_optimized_v2.png')
# 10k = 3min 13s optimized
# 10k = 3 min 37s NOT optimized
def test_py_arrow(self):
pipe = nlu.load('bert', verbose=True)
# data = self.load_pandas_dataset()
data = pd.read_csv('/home/ckl/Documents/self/steuern/2021/JUL/pakistan/en_lang_filtered_sample.csv')[0:1000]
big_df = data.append(data)
for i in range(10):big_df = big_df.append(big_df)
big_df
df = pipe.predict(big_df[:10000] , output_level='document')
for c in df.columns: print(df[c])
def test_modin(self):
# ## works with RAY and DASK backends
df_path = '/home/ckl/old_home/Documents/freelance/jsl/nlu/nlu4realgit/tests/datasets/covid/covid19_tweets.csv'
pdf = | pd.read_csv(df_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 24 09:02:16 2017
@author: hp
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.grid_search import GridSearchCV
import stats
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import xgboost as xgb
from sklearn.metrics import roc_auc_score
from sklearn import cross_validation, metrics
train_df=pd.read_csv('...train.csv')
test_df=pd.read_csv('...test.csv')
df=pd.concat([train_df.loc[:,'MSSubClass':'SaleCondition'],test_df.loc[:,'MSSubClass':'SaleCondition']])
'''
df.reset_index(inplace=True)
df.drop('index',axis=1,inplace=True)
df=df.reindex_axis(train_df.columns,axis=1)
'''
#数据转换
p=train_df.loc[:,'SalePrice']
train_df.drop('SalePrice',axis=1,inplace=True)
for col in train_df.columns:
if train_df[col].dtype!=np.object:
if train_df[col].dropna().skew()>0.75:
train_df[col]=np.log(train_df[col]+1)
else:
pass
else:
pass
for col in test_df.columns:
if test_df[col].dtype!=np.object:
if test_df[col].dropna().skew()>0.75:
test_df[col]=np.log(test_df[col]+1)
else:
pass
else:
pass
#数据初探
train_df['SalePrice'].describe()
#sns.distplot(pd.DataFrame(train_df['SalePrice']))
#查看类别个数情况
def cat_num(df,columns):
print(df[columns].value_counts())
def cat_null(df,columns,value):
df.loc[df[columns].isnull(),columns]=value
#MZ
cat_num(test_df,'MSZoning')
cat_num(train_df,'MSSubClass')
test_df['MSZoning'].groupby(test_df['MSSubClass']).agg('count')
pd.crosstab(test_df['MSZoning'],test_df['MSSubClass'])
test_df.loc[test_df['MSZoning'].isnull(),'MSZoning']
print(test_df[test_df['MSZoning'].isnull() == True])
test_df.loc[(test_df['MSZoning'].isnull())&(test_df['MSSubClass']==20),'MSZoning']='RL'
test_df.loc[(test_df['MSZoning'].isnull())&(test_df['MSSubClass']==30),'MSZoning']='RM'
test_df.loc[(test_df['MSZoning'].isnull())&(test_df['MSSubClass']==70),'MSZoning']='RM'
#Utilities
cat_num(test_df,'Utilities')
cat_num(train_df,'Utilities')
test_df.drop(['Utilities'],axis=1,inplace=True)
train_df.drop(['Utilities'],axis=1,inplace=True)
#Exterior
cat_num(test_df,'Exterior1st')
cat_num(test_df,'Exterior2nd')
pd.crosstab(test_df['Exterior1st'],test_df['Exterior2nd'])
print(test_df[test_df['Exterior1st'].isnull()==True])
test_df['Exterior1st'][test_df['Exterior1st'].isnull()]='VinylSd'
test_df['Exterior2nd'][test_df['Exterior2nd'].isnull()]='VinylSd'
# MasVnrType & MasVnrArea
print(test_df[['MasVnrType','MasVnrArea']][test_df['MasVnrType'].isnull()==True])
print(train_df[['MasVnrType','MasVnrArea']][train_df['MasVnrType'].isnull()==True])
cat_num(test_df, 'MasVnrType')
cat_num(train_df, 'MasVnrType')
test_df['MasVnrType'][test_df['MasVnrType'].isnull()]='None'
train_df['MasVnrType'][train_df['MasVnrType'].isnull()]='None'
test_df['MasVnrArea'][test_df['MasVnrArea'].isnull()]=0
train_df['MasVnrArea'][train_df['MasVnrArea'].isnull()]=0
#Bsmt
columns=['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinSF1','BsmtFinType2','BsmtFinSF2','BsmtUnfSF','BsmtFullBath','BsmtHalfBath']
cat_columns=['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2']
print(test_df[columns][test_df['BsmtFinType2'].isnull()==True])
print(train_df[columns][train_df['BsmtFinType2'].isnull()==True])
cat_num(test_df,'BsmtQual')
cat_num(test_df,'BsmtCond')
cat_num(test_df,'BsmtExposure')
cat_num(test_df,'BsmtFinType1')
cat_num(test_df,'BsmtFinType2')
cat_num(train_df,'BsmtQual')
cat_num(train_df,'BsmtCond')
cat_num(train_df,'BsmtExposure')
cat_num(train_df,'BsmtFinType1')
cat_num(train_df,'BsmtFinType2')
cat_null(test_df,'BsmtFinSF1',0)
cat_null(test_df,'BsmtFinSF2',0)
cat_null(test_df,'BsmtUnfSF',0)
cat_null(test_df,'BsmtFullBath',0)
cat_null(test_df,'BsmtHalfBath',0)
for col in cat_columns:
cat_null(train_df,col,'None')
pd.crosstab(test_df['BsmtQual'],test_df['BsmtCond'])
test_df.loc[(test_df['BsmtQual'].isnull())&(test_df['BsmtCond']=='TA'),'BsmtQual']='TA'
test_df.loc[(test_df['BsmtQual'].isnull())&(test_df['BsmtCond']=='Fa'),'BsmtQual']='TA'
for col in cat_columns:
cat_null(test_df,col,'None')
test_df[test_df.columns[test_df.isnull().any()].tolist()].isnull().sum()
train_df[train_df.columns[train_df.isnull().any()].tolist()].isnull().sum()
#df['BsmtFinType2'].value_counts()
#TotalBsmtSF
TB=pd.concat([train_df.TotalBsmtSF,train_df.SalePrice],axis=1)
TB.plot.scatter(x='TotalBsmtSF',y='SalePrice',ylim=(0,800000),xlim=(0,7000))
test_df.loc[test_df['TotalBsmtSF'].isnull(),'TotalBsmtSF']=0
#KitchenQual
test_df['KitchenQual'].value_counts()
pd.crosstab(train_df['KitchenQual'],train_df['KitchenAbvGr'])
test_df.loc[test_df['KitchenQual'].isnull(),'KitchenQual']='TA'
test_df.drop(['Alley','PoolQC','Fence','MiscFeature'],axis=1,inplace=True)
train_df.drop(['Alley','PoolQC','Fence','MiscFeature'],axis=1,inplace=True)
#lotarea
test_df['SqrtLotArea'] = np.sqrt(test_df['LotArea'])
train_df['SqrtLotArea'] = np.sqrt(train_df['LotArea'])
test_df['LotFrontage'].corr(test_df['LotArea'])#0.64
train_df['LotFrontage'].corr(train_df['LotArea'])#0.42
test_df['LotFrontage'].corr(test_df['SqrtLotArea'])#0.7
train_df['LotFrontage'].corr(train_df['SqrtLotArea'])#0.6
test_df['LotFrontage'][test_df['LotFrontage'].isnull()]=test_df['SqrtLotArea'][test_df['LotFrontage'].isnull()]
train_df['LotFrontage'][train_df['LotFrontage'].isnull()]=train_df['SqrtLotArea'][train_df['LotFrontage'].isnull()]
#Functional
test_df['Functional'].value_counts()
test_df['Functional'][test_df['Functional'].isnull()]='Typ'
#FireplaceQu
train_df['GarageFinish'].value_counts()
test_df['GarageFinish'].value_counts()
pd.crosstab(test_df['FireplaceQu'],test_df['Fireplaces'])
test_df['Fireplaces'][test_df['FireplaceQu'].isnull()==True].describe()
train_df['Fireplaces'][train_df['FireplaceQu'].isnull()==True].describe()
test_df['FireplaceQu'][test_df['FireplaceQu'].isnull()]='None'
train_df['FireplaceQu'][train_df['FireplaceQu'].isnull()]='None'
#Garage
col=['GarageType','GarageYrBlt','GarageFinish','GarageCars','GarageArea','GarageQual','GarageCond']
print(test_df[col][test_df['GarageType'].isnull()==True])
for columns in col:
if test_df[columns].dtype==np.object:
test_df[columns][test_df[columns].isnull()==True]='None'
else:
test_df[columns][test_df[columns].isnull()==True]=0
for columns in col:
if train_df[columns].dtype==np.object:
train_df[columns][train_df[columns].isnull()==True]='None'
else:
train_df[columns][train_df[columns].isnull()==True]=0
#SaleType
test_df['SaleType'].value_counts()
test_df['SaleType'][test_df['SaleType'].isnull()==True]='WD'
#Electrical
train_df['Electrical'].value_counts()
train_df['Electrical'][train_df['Electrical'].isnull()==True]='SBrkr'
for col in test_df.columns:
if test_df[col].dtype!=train_df[col].dtype:
print(col,test_df[col].dtype,train_df[col].dtype)
cols=['BsmtFinSF1','BsmtFinSF2','BsmtUnfSF','TotalBsmtSF','BsmtFullBath','BsmtHalfBath','GarageCars','GarageArea']
for col in cols:
tm=test_df[col].astype(pd.np.int64)
tm=pd.DataFrame({col:tm})
test_df.drop(col,axis=1,inplace=True)
test_df=pd.concat([test_df,tm],axis=1)
for col in cols:
tm=train_df[col].astype(pd.np.int64)
tm=pd.DataFrame({col:tm})
train_df.drop(col,axis=1,inplace=True)
train_df=pd.concat([train_df,tm],axis=1)
test_df = test_df.replace({"MSSubClass": {20: "A", 30: "B", 40: "C", 45: "D", 50: "E",
60: "F", 70: "G", 75: "H", 80: "I", 85: "J",
90: "K", 120: "L", 150: "M", 160: "N", 180: "O", 190: "P"}})
train_df = train_df.replace({"MSSubClass": {20: "A", 30: "B", 40: "C", 45: "D", 50: "E",
60: "F", 70: "G", 75: "H", 80: "I", 85: "J",
90: "K", 120: "L", 150: "M", 160: "N", 180: "O", 190: "P"}})
test_df=test_df.replace({'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'GarageFinish':{'Fin':3,'RFn':2,'Unf':1, 'None':0}})
train_df=train_df.replace({'GarageFinish':{'Fin':3,'RFn':2,'Unf':1,'None':0}})
#heatingqc
test_df=test_df.replace({'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'BsmtExposure':{'Gd':5,'Av':4,'Mn':3,'No':2, 'NA': 1,'None':0}})
train_df=train_df.replace({'BsmtExposure':{'Gd':5,'Av':4,'Mn':3,'No':2, 'NA': 1,'None':0}})
test_df=test_df.replace({'BsmtFinType2':{'GLQ':6,'ALQ':5,'BLQ':4,'Rec':3, 'LwQ': 2,'Unf':1,'None':0}})
train_df=train_df.replace({'BsmtFinType2':{'GLQ':6,'ALQ':5,'BLQ':4,'Rec':3, 'LwQ': 2,'Unf':1,'None':0}})
test_df=test_df.replace({'BsmtFinType1':{'GLQ':6,'ALQ':5,'BLQ':4,'Rec':3, 'LwQ': 2,'Unf':1,'None':0}})
train_df=train_df.replace({'BsmtFinType1':{'GLQ':6,'ALQ':5,'BLQ':4,'Rec':3, 'LwQ': 2,'Unf':1,'None':0}})
#定量变量
sns.distplot(test_df['SalePrice'], fit='norm')
sns.distplot(test_df['GrLivArea'], fit='norm')
sns.distplot(test_df['LotArea'], fit='norm')
sns.distplot(test_df['MasVnrArea'], fit='norm')######删?
sns.distplot(test_df['2ndFlrSF'], fit='norm')##shan
sns.distplot(test_df['WoodDeckSF'], fit='norm')##shan
sns.distplot(test_df['OpenPorchSF'], fit='norm')##shan
sns.distplot(test_df['EnclosedPorch'], fit='norm')##shan
sns.distplot(test_df['3SsnPorch'], fit='norm')##删
sns.distplot(test_df['ScreenPorch'], fit='norm')##删
sns.distplot(test_df['PoolArea'], fit='norm')##删
plt.scatter(train_df['Heating'],train_df['SalePrice'])
sns.boxplot(x=train_df['Heating'],y=train_df['SalePrice'])
train_df['Heating'].value_counts()
sns.distplot(test_df['MiscVal'], fit='norm')##删
sns.distplot(test_df['BsmtFinSF1'], fit='norm')##shan
sns.distplot(test_df['BsmtFinSF2'], fit='norm')##删
sns.distplot(test_df['BsmtUnfSF'], fit='norm')
sns.distplot(train_df['TotalBsmtSF'], fit='norm')
sns.distplot(int(test_df['GarageArea']), fit='norm')
#TotalBsmtSF
'''
for n in train_df['TotalBsmtSF'].values:
if n>0:
train_df.loc[train_df['TotalBsmtSF']==n,'Bsmt_has']=1
else:
train_df.loc[train_df['TotalBsmtSF']==n,'Bsmt_has']=0
train_df['TotalBsmtSF']=np.log(train_df['TotalBsmtSF'])
for n in test_df['TotalBsmtSF'].values:
if n>0:
test_df.loc[test_df['TotalBsmtSF']==n,'Bsmt_has']=1
else:
test_df.loc[test_df['TotalBsmtSF']==n,'Bsmt_has']=0
'''
#
var='OverallQual'
f,ax=plt.subplots(figsize=(16,8))
data=pd.concat([train_df['KitchenQual'],train_df['SalePrice']],axis=1)
fig=sns.boxplot(x='KitchenQual',y='SalePrice',data=data)
plt.xticks(rotation=90)#rotation刻度旋转角度
#train_df['SalePrice'].skew()#1.88偏度右偏
#train_df['SalePrice'].kurt()#6.54峰度尖顶峰
#train_df['logprice']=np.log(train_df['SalePrice'])
#data=pd.concat([train_df['GrLivArea'],train_df['logprice']],axis=1)
#data.plot.scatter(x='GrLivArea',y='logprice')
train_df[train_df.columns[train_df.isnull().any()].tolist()].isnull().sum()#Alley PoolQC Fence MiscFeature
test_df[test_df.columns[test_df.isnull().any()].tolist()].isnull().sum()#Alley PoolQC Fence MiscFeature
#sns.distplot(pd.DataFrame(train_df['logprice']))
#train_df['logprice'].skew()#0.12偏度右偏
#train_df['logprice'].kurt()#0.8
#虚拟变量和连续值转换
test_df.drop(['Id','Street','LandSlope','Condition2','RoofMatl','Heating','3SsnPorch','ScreenPorch','PoolArea','MiscVal'],axis=1,inplace=True)
train_df.drop(['Id','Street','LandSlope','Condition2','RoofMatl','Heating','3SsnPorch','ScreenPorch','PoolArea','MiscVal'],axis=1,inplace=True)
test_df.drop(['Id'],axis=1,inplace=True)
train_df.drop(['Id'],axis=1,inplace=True)
n=0
for col in train_df.columns:
if train_df[col].dtype==np.object:
print(col,cat_num(train_df,col))
n+=1
m=0
for col in test_df.columns:
if test_df[col].dtype==np.object:
print(col,cat_num(test_df,col))
m+=1
#定性变量中可能特征不一样,导致dummy后的变量不统一
df=pd.concat([train_df,test_df],axis=1)
df.reset_index(inplace=True)
#ont_hot编码
dm=pd.DataFrame()
pm=pd.DataFrame()
for col in train_df.columns:
if train_df[col].dtype==np.object:
dm=pd.get_dummies(train_df[col]).rename(columns=lambda x:col+'_'+str(x))
train_df=pd.concat([train_df,dm],axis=1)
train_df.drop(col,axis=1,inplace=True)
pm=pd.concat([pm,dm],axis=1)
dm_test=pd.DataFrame()
pm_test=pd.DataFrame()
for col in test_df.columns:
if test_df[col].dtype==np.object:
dm_test= | pd.get_dummies(test_df[col]) | pandas.get_dummies |
import os
import gc
import time
import torch
import numpy as np
import pandas as pd
from ast import literal_eval
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForTokenClassification, \
AutoConfig
from torch.utils.data import Dataset, DataLoader
from torch import cuda
from sklearn.metrics import accuracy_score
class FeedbackDataset(Dataset):
"""PyTorch Dataset Class
Standard Pytorch Dataset class can read more about it here:
https://pytorch.org/tutorials/beginner/basics/data_tutorial.html
Attributes:
data: A pandas dataframe with id, text, and NER entities.
len: The length of the dataframe.
tokenizer: Encodes text into tokens.
max_len: Maximum length of tokens.
"""
def __init__(self, dataframe, tokenizer, max_len):
"""Inits FeedbackDataset Class with data, tokenizer, and max length."""
self.data = dataframe
self.len = len(dataframe)
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
"""Simple function that returns length of data."""
return self.len
def __getitem__(self, index):
"""Gets input ids, attention mask, labels, word ids as tensors."""
text = self.data.text.iloc[index]
word_labels = self.data.entities.iloc[index]
# Tokenize text.
encoding = self.tokenizer(text.split(),
is_split_into_words = True,
padding = 'max_length',
truncation = True,
max_length = self.max_len
)
word_ids = encoding.word_ids()
# Create labels.
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx:
label_ids.append(labels_to_ids[word_labels[word_idx]])
else:
if LABEL_ALL_SUBTOKENS:
label_ids.append(labels_to_ids[word_labels[word_idx]])
else:
label_ids.append(-100)
previous_word_idx = word_idx
encoding['labels'] = label_ids
# Convert items to torch tensors.
item = {key: torch.as_tensor(val) for key, val in encoding.items()}
word_ids = [w if w is not None else -1 for w in word_ids]
item['wids'] = torch.as_tensor(word_ids)
return item
def train(epoch):
"""A function to train model."""
tr_loss, tr_accuracy = 0, 0
nb_tr_examples, nb_tr_steps = 0, 0
# Set model to training mode.
model.train()
# Start timer.
t0 = time.time()
for idx, batch in enumerate(training_loader):
ids = batch['input_ids'].to(config['device'], dtype = torch.long)
mask = batch['attention_mask'].to(config['device'], dtype = torch.int64)
labels = batch['labels'].to(config['device'], dtype = torch.long)
loss, tr_logits = model(input_ids = ids, attention_mask = mask,
labels = labels, return_dict = False)
tr_loss += loss.item()
nb_tr_steps += 1
nb_tr_examples += labels.size(0)
# Progress tracker printed to output.
if idx % 200 == 0:
loss_step = tr_loss/nb_tr_steps
time_step = (time.time() - t0)/nb_tr_steps
print(
f"Training loss after {idx:04d} training steps: {loss_step:.4f}",
f"\t {time_step:.4f} sec/step"
)
# Compute Accuracy.
flattened_targets = labels.view(-1) # shape (batch_size * seq_len,)
active_logits = tr_logits.view(-1, model.num_labels) # shape (batch_size * seq_len, num_labels)
flattened_predictions = torch.argmax(active_logits, axis=1) # shape (batch_size * seq_len,)
active_accuracy = labels.view(-1) != -100 # shape (batch_size, seq_len)
labels = torch.masked_select(flattened_targets, active_accuracy)
predictions = torch.masked_select(flattened_predictions, active_accuracy)
tmp_tr_accuracy = accuracy_score(labels.cpu().numpy(), predictions.cpu().numpy())
tr_accuracy += tmp_tr_accuracy
# Gradient Clipping.
torch.nn.utils.clip_grad_norm_(
parameters=model.parameters(), max_norm=config['max_grad_norm']
)
# Backwards Pass.
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss = tr_loss / nb_tr_steps
tr_accuracy = tr_accuracy / nb_tr_steps
print(f"Training loss epoch: {epoch_loss}")
print(f"Training accuracy epoch: {tr_accuracy}")
def inference(batch):
"""A helper function to make predictions on batches."""
# Move batch to GPU and make prediction.
ids = batch["input_ids"].to(config['device'])
mask = batch["attention_mask"].to(config['device'])
outputs = model(ids, attention_mask=mask, return_dict=False)
all_preds = torch.argmax(outputs[0], axis=-1).cpu().numpy()
# Iterate through each text and get prediction.
predictions = []
for k,text_preds in enumerate(all_preds):
token_preds = [ids_to_labels[i] for i in text_preds]
prediction = []
word_ids = batch['wids'][k].numpy()
previous_word_idx = -1
for idx,word_idx in enumerate(word_ids):
if word_idx == -1:
pass
elif word_idx != previous_word_idx:
prediction.append(token_preds[idx])
previous_word_idx = word_idx
predictions.append(prediction)
return predictions
def get_predictions(df, loader):
"""A function to get predictions on data."""
# Put model in evaluation mode.
model.eval()
# Get word label predictions.
y_pred2 = []
for batch in loader:
labels = inference(batch)
y_pred2.extend(labels)
final_preds2 = []
for i in range(len(df)):
idx = df.id.values[i]
pred = y_pred2[i] # leave "B" and "I"
preds = []
j = 0
while j < len(pred):
cls = pred[j]
if cls == 'O': j += 1
else: cls = cls.replace('B','I') # spans start with B
end = j + 1
while end < len(pred) and pred[end] == cls:
end += 1
if cls != 'O' and cls != '' and end - j > 7:
final_preds2.append((idx, cls.replace('I-',''),
' '.join(map(str, list(range(j, end))))))
j = end
# Create dataframe with Out-Of-Fold predictions (oof).
oof = pd.DataFrame(final_preds2)
oof.columns = ['id','class','predictionstring']
return oof
def calc_overlap(row):
"""A function used for evaluation.
Calculates the overlap between prediction and
ground truth and overlap percentages used for determining
true positives.
"""
set_pred = set(row.predictionstring_pred.split(' '))
set_gt = set(row.predictionstring_gt.split(' '))
# Length of each and intersection
len_gt = len(set_gt)
len_pred = len(set_pred)
inter = len(set_gt.intersection(set_pred))
overlap_1 = inter / len_gt
overlap_2 = inter/ len_pred
return [overlap_1, overlap_2]
def score_feedback_comp(pred_df, gt_df):
"""A function that scores for the kaggle Student Writing Competition.
Uses the steps in the evaluation page here:
https://www.kaggle.com/c/feedback-prize-2021/overview/evaluation
"""
gt_df = gt_df[['id','discourse_type','predictionstring']] \
.reset_index(drop=True).copy()
pred_df = pred_df[['id','class','predictionstring']] \
.reset_index(drop=True).copy()
pred_df['pred_id'] = pred_df.index
gt_df['gt_id'] = gt_df.index
# Step 1. all ground truths and predictions for a given class are compared.
joined = pred_df.merge(gt_df,
left_on=['id','class'],
right_on=['id','discourse_type'],
how='outer',
suffixes=('_pred','_gt')
)
joined['predictionstring_gt'] = joined['predictionstring_gt'].fillna(' ')
joined['predictionstring_pred'] = joined['predictionstring_pred'].fillna(' ')
joined['overlaps'] = joined.apply(calc_overlap, axis=1)
# 2. If the overlap between the ground truth and prediction is >= 0.5,
# and the overlap between the prediction and the ground truth >= 0.5,
# the prediction is a match and considered a true positive.
# If multiple matches exist, the match with the highest pair of overlaps is taken.
joined['overlap1'] = joined['overlaps'].apply(lambda x: eval(str(x))[0])
joined['overlap2'] = joined['overlaps'].apply(lambda x: eval(str(x))[1])
joined['potential_TP'] = (joined['overlap1'] >= 0.5) & (joined['overlap2'] >= 0.5)
joined['max_overlap'] = joined[['overlap1','overlap2']].max(axis=1)
tp_pred_ids = joined.query('potential_TP') \
.sort_values('max_overlap', ascending=False) \
.groupby(['id','predictionstring_gt']).first()['pred_id'].values
# 3. Any unmatched ground truths are false negatives
# and any unmatched predictions are false positives.
fp_pred_ids = [p for p in joined['pred_id'].unique() if p not in tp_pred_ids]
matched_gt_ids = joined.query('potential_TP')['gt_id'].unique()
unmatched_gt_ids = [c for c in joined['gt_id'].unique() if c not in matched_gt_ids]
# Get numbers of each type
TP = len(tp_pred_ids)
FP = len(fp_pred_ids)
FN = len(unmatched_gt_ids)
my_f1_score = TP / (TP + 0.5*(FP+FN))
return my_f1_score
########## CODE THAT TRAINS THE MODEL ##########
# Gets rid of warnings in the Dataset Class when using tokenizer before forks.
os.environ["TOKENIZERS_PARALLELISM"] = "false"
DOWNLOADED_MODEL_PATH = '../longformer-base-4096'
LABEL_ALL_SUBTOKENS = True
# Set desired version number of trained model.
VER = 1
# Describe training configurations.
config = {'model_name': 'allenai/longformer-base-4096',
'max_length': 1024,
'train_batch_size':2,
'valid_batch_size':2,
'epochs':6,
'learning_rates': [2.5e-5, 2.5e-5, 2.5e-6, 2.5e-6, 2.5e-7, 2.5e-7],
'max_grad_norm':10,
'device': torch.device("cuda" if torch.cuda.is_available() else "cpu")}
print('GPU detected') if torch.cuda.is_available() else print('GPU not detected')
# Create dictionary that map an output label to a number.
output_labels = ['O', 'B-Lead', 'I-Lead', 'B-Position', 'I-Position',
'B-Claim', 'I-Claim', 'B-Counterclaim', 'I-Counterclaim',
'B-Rebuttal', 'I-Rebuttal', 'B-Evidence', 'I-Evidence',
'B-Concluding Statement', 'I-Concluding Statement']
labels_to_ids = {v:k for k,v in enumerate(output_labels)}
ids_to_labels = {k:v for k,v in enumerate(output_labels)}
# Import data generated from set-up code.
train_df = pd.read_csv('../feedback-prize-2021/train.csv')
train_fold = pd.read_csv('../preprocessed/train_folds.csv')
test_texts = pd.read_csv('../preprocessed/test_texts.csv')
train_texts = pd.read_csv('../preprocessed/train_NER.csv')
train_fold = train_fold[['id', 'kfold']]
# Pandas stores labels as a string need to convert into a list dtype.
train_texts.entities = train_texts.entities.apply(lambda x: literal_eval(x))
# Creates a list of dataframes indexed by the fold number.
folds_texts = []
for fold_num in range(len(train_fold.kfold.unique())):
fold = train_fold[train_fold.kfold == fold_num]
fold_texts = train_texts.loc[train_texts['id'].isin(fold['id'])]
folds_texts.append(fold_texts)
train_params = {'batch_size': config['train_batch_size'],
'shuffle': True,
'num_workers': 2,
'pin_memory':True
}
valid_params = {'batch_size': config['valid_batch_size'],
'shuffle': False,
'num_workers': 2,
'pin_memory':True
}
tokenizer = AutoTokenizer.from_pretrained(DOWNLOADED_MODEL_PATH)
# TRAINING LOOP WITH K-FOLD CROSS VALIDATION.
for nb_fold in range(len(folds_texts)):
print(f"### Training Fold: {nb_fold + 1} out of {len(folds_texts)} ###")
# Load blank pre-trained model for each fold.
config_model = AutoConfig.from_pretrained(DOWNLOADED_MODEL_PATH+'/config.json')
model = AutoModelForTokenClassification.from_pretrained(
DOWNLOADED_MODEL_PATH+'/pytorch_model.bin', config = config_model
)
model.to(config['device'])
optimizer = torch.optim.Adam(
params = model.parameters(), lr = config['learning_rates'][0]
)
# Create validation set and training set.
validation_set = FeedbackDataset(
folds_texts[nb_fold], tokenizer, config['max_length']
)
training_set = FeedbackDataset(
| pd.concat(folds_texts[:nb_fold] + folds_texts[nb_fold+1:]) | pandas.concat |
"""
Tabular module
"""
import os
# Conditional import
try:
import pandas as pd
PANDAS = True
except ImportError:
PANDAS = False
from ..base import Pipeline
class Tabular(Pipeline):
"""
Splits tabular data into rows and columns.
"""
def __init__(self, idcolumn=None, textcolumns=None, content=False):
"""
Creates a new Tabular pipeline.
Args:
idcolumn: column name to use for row id
textcolumns: list of columns to combine as a text field
content: if True, a dict per row is generated with all fields. If content is a list, a subset of fields
is included in the generated rows.
"""
if not PANDAS:
raise ImportError('Tabular pipeline is not available - install "pipeline" extra to enable')
self.idcolumn = idcolumn
self.textcolumns = textcolumns
self.content = content
def __call__(self, data):
"""
Splits data into rows and columns.
Args:
data: input data
Returns:
list of (id, text, tag)
"""
items = [data] if not isinstance(data, list) else data
# Combine all rows into single return element
results = []
dicts = []
for item in items:
# File path
if isinstance(item, str):
_, extension = os.path.splitext(item)
extension = extension.replace(".", "").lower()
if extension == "csv":
df = pd.read_csv(item)
results.append(self.process(df))
# Dict
if isinstance(item, dict):
dicts.append(item)
# List of dicts
elif isinstance(item, list):
df = pd.DataFrame(item)
results.append(self.process(df))
if dicts:
df = | pd.DataFrame(dicts) | pandas.DataFrame |
#
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
from __future__ import print_function # Python 2/3 compatibility
import boto3
import json
import decimal
from decimal import Decimal
from boto3.dynamodb.conditions import Key, Attr
import pandas as pd
TABLE_NAME = 'Tokens'
TOKEN_LIST = ["<PASSWORD>", "<PASSWORD>", "<PASSWORD>", "<PASSWORD>", "<PASSWORD>"]
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
# Helper function to convert decimal object to float.
def decimal_to_float(obj):
if isinstance(obj, Decimal):
return float(obj)
raise TypeError
# dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
dynamodb = boto3.resource('dynamodb', region_name='ap-northeast-1')
table = dynamodb.Table(TABLE_NAME)
df = pd.DataFrame()
def main():
i = 0
for token in TOKEN_LIST:
try:
datetime, tvl = [], []
response = table.query(
KeyConditionExpression=Key('name').eq(token)
)
print("Token {} has been extracted successfully.".format(token))
# print(response['Items'])
for item in response['Items']:
datetime.append(item["datetime"])
tvl.append(decimal_to_float(item["tvl"]["USD"]["value"]))
if i == 0:
df_temp = | pd.DataFrame(tvl, columns=[token], index=datetime) | pandas.DataFrame |
# import packages
from finrl.apps import config
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# matplotlib.use('Agg')
import datetime
from finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader
from finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split
from finrl.finrl_meta.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.drl_agents.stablebaselines3.models import DRLAgent
from finrl.finrl_meta.data_processor import DataProcessor
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline
from pprint import pprint
import sys
sys.path.append("../FinRL-Library")
import itertools
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
# read data
df= | pd.read_csv("new.csv") | pandas.read_csv |
from typing import List
import albumentations as A
import pandas as pd
import pytest
from torch.utils.data import DataLoader, Dataset
from fulmo.datasets import MultiDomainCsvDataset
from fulmo.readers import Augmentor, ReaderCompose
from ..utils import NpyGenerator
@pytest.fixture
def reader() -> ReaderCompose:
"""Create a new instance of `ReaderCompose`"""
open_fn = ReaderCompose(
[
NpyGenerator(output_key="feature_1", shape=(256, 256, 3)),
NpyGenerator(output_key="feature_2", shape=(512, 512, 3)),
]
)
return open_fn
@pytest.fixture
def x1_transforms() -> Augmentor:
"""Create a new instance of `Augmentor`"""
train = Augmentor("feature_1", "transformed_feature_1", augment_fn=lambda x: x)
return train
@pytest.fixture
def x2_transforms() -> Augmentor:
"""Create a new instance of `Augmentor`"""
compose_transforms = A.Compose([A.Resize(128, 128, always_apply=True, p=1.0)])
train = Augmentor("feature_2", "transformed_feature_2", augment_fn=lambda x: compose_transforms(image=x)["image"])
return train
@pytest.fixture
def dataset(reader: ReaderCompose, x1_transforms: Augmentor, x2_transforms: Augmentor) -> Dataset:
"""Create a new instance of `Dataset`"""
train_df = | pd.DataFrame([{"feature_1": "", "feature_2": ""}] * 32) | pandas.DataFrame |
import os
import click
import numpy as np
import pandas as pd
import os.path as op
import nibabel as nib
from tqdm import tqdm
from glob import glob
from joblib import Parallel, delayed
from nilearn import masking, image
from nistats.design_matrix import make_first_level_design_matrix
from nistats.first_level_model import run_glm
from nistats.contrasts import compute_contrast, expression_to_contrast_vector
from nistats.second_level_model import SecondLevelModel
def fit_firstlevel(bids_dir, sub, acq, space, out_dir):
sub_base = op.basename(sub)
ext = 'func.gii' if 'fs' in space else 'desc-preproc_bold.nii.gz'
funcs = sorted(glob(op.join(
sub, 'func', f'*{acq}*_space-{space}*{ext}'
)))
to_save = ['resp', 'cardiac', 'interaction', 'hrv', 'rvt']
cons = {}
for func in funcs:
f_base = op.basename(func).split('space')[0]
ricor = op.join(bids_dir, 'derivatives', 'physiology', sub_base, 'physio', f_base + 'recording-respcardiac_desc-retroicor_regressors.tsv')
if not op.isfile(ricor):
continue
ricor = pd.read_csv(ricor, sep='\t')
conf = op.join(bids_dir, 'derivatives', 'fmriprep', sub_base, 'func', f_base + 'desc-confounds_regressors.tsv')
conf = pd.read_csv(conf, sep='\t')
cols = [col for col in conf.columns if 'cosine' in col]
cols += ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z']
conf = conf.loc[:, cols]
dm = | pd.concat((conf, ricor), axis=1) | pandas.concat |
#!/usr/bin/env python
import os
import sys
import pandas as pd
import argparse
import configparser
import multiprocessing
import time
import datetime
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
import tqdm
import statsmodels.stats.multitest as multitest
import snps
import genes
import interactions
import summary
import eqtls
import aFC
def parse_tissues(user_tissues, match_tissues, eqtl_project, db):
if eqtl_project:
sql = '''SELECT * FROM meta_eqtls WHERE project = '{}' '''.format(
eqtl_project)
else:
sql = '''SELECT * FROM meta_eqtls'''
df = pd.DataFrame()
with db.connect() as con:
df = pd.read_sql(sql, con=con)
db.dispose()
tissues = []
if match_tissues:
user_tissues = match_tissues[0]
if user_tissues:
matched_df = []
matched_tissues = []
to_omit = []
not_tissues = []
for u_tissue in user_tissues:
u_df = df[
(df['name'] == u_tissue) |
(df['tags'].str.contains(
r'\b{}\b'.format(u_tissue), case=False))
]
if u_df.empty:
if u_tissue.startswith('-'):
to_omit.append(u_tissue)
else:
not_tissues.append(u_tissue)
else:
matched_df.append(u_df)
matched_tissues.append(u_tissue)
error_msg = 'Program aborting:\n\t{}\nnot found in database.'
if (len(matched_df) == 0 or len(not_tissues) > 0) and len(to_omit) == 0:
print(error_msg.format('\n\t'.join(not_tissues)))
print('\nPlease use one of the following. ' +
'Tissue names are case sensitive:')
list_eqtl_tissues(db)
sys.exit()
user_df = pd.DataFrame()
if len(to_omit) > 0 and len(matched_tissues) == 0:
user_df = df
else:
user_df = pd.concat(matched_df)
if match_tissues:
for i in range(len(matched_tissues)):
user_df = user_df[
user_df['tags'].str.contains(
r'\b{}\b'.format(matched_tissues[i]), case=False)]
user_df = user_df.drop_duplicates()
for i in range(len(to_omit)):
user_df = user_df[
~user_df['tags'].str.contains(
r'\b{}\b'.format(to_omit[i][1:]), case=False)]
if len(user_df['project'].drop_duplicates()) > 1 and not eqtl_project:
# Ensure tissues are from same eQTL project
print('FATAL: eQTL tissues are from different projects. ',
'Add another tag to fine-tune match',
'or use \'--eqtl-project\' to specify project.')
print(user_df[['name', 'project']].to_string(index=False))
sys.exit()
tissues = user_df[['name', 'project']]
else: # Use GTEx database as default
tissues = df[df['project'] == 'GTEx'][[
'name', 'project']]
return tissues
def parse_hic(
match_tissues,
include_cell_line,
exclude_cell_line,
restriction_enzymes,
db):
''' user parameters -r, -n and -x.
Args:
restriction_enzymes: space-delimited list of restriction enzymes from
user. Limits program to Hic libraries restricted by specified enzyme.
include_cell_line: space-delimited list of cell_lines from -n.
exclude_cell_line: space-delimited list of cell_lines from -x
Returns:
hic_df: a dataframe columns(library, enzyme, rep_count)
'''
sql = '''SELECT library, tags, enzyme, rep_count FROM meta_hic'''
df = pd.DataFrame()
with db.connect() as con:
df = pd.read_sql(sql, con=con)
db.dispose()
hic_df = pd.DataFrame()
if match_tissues:
matched_df = []
matched_tissues = []
to_omit = []
not_tissues = []
for u_tissue in match_tissues[0]:
u_df = df[
(df['library'] == u_tissue) |
(df['tags'].str.contains(
r'\b{}\b'.format(u_tissue), case=False))
]
if u_df.empty:
if u_tissue.startswith(u_tissue):
to_omit.append(u_tissue)
else:
not_tissues.append(u_tissue)
else:
matched_df.append(u_df)
matched_tissues.append(u_tissue)
error_msg = 'Program aborting:\n\t{}\ndid not match any Hi-C library.'
if (len(matched_df) == 0 or len(not_tissues) > 0) and len(to_omit) == 0:
print(error_msg.format('\n\t'.join(not_tissues)))
print(('Use -t and -n to include specific eQTL tissues'
' and Hi-C libraries. Library names are case sensitive:'))
sys.exit('\n\t{}'.format('\n\t'.join(df['library'].tolist())))
if len(matched_df) == 0 and len(to_omit) > 0:
hic_df = df
else:
hic_df = pd.concat(matched_df)
if match_tissues:
for i in range(len(matched_tissues)):
hic_df = hic_df[
hic_df['tags'].str.contains(
r'\b{}\b'.format(matched_tissues[i]), case=False)]
for i in range(len(to_omit)):
hic_df = hic_df[
~hic_df['tags'].str.contains(
r'\b{}\b'.format(to_omit[i][1:]), case=False)]
hic_df = hic_df.drop_duplicates()
elif include_cell_line and len(include_cell_line) > 0:
validate_input(include_cell_line, df['library'])
hic_df = df[df['library'].str.upper().isin(
[c.upper() for c in include_cell_line])]
elif exclude_cell_line and len(exclude_cell_line) > 0:
validate_input(exclude_cell_line, df['library'])
hic_df = df[~df['library'].str.upper().isin(
[c.upper() for c in exclude_cell_line])]
else:
hic_df = df
if restriction_enzymes and len(restriction_enzymes) > 0:
validate_input(restriction_enzymes, df['enzyme'].drop_duplicates())
hic_df = hic_df[hic_df['enzyme'].str.upper().isin(
[c.upper() for c in restriction_enzymes])]
if not hic_df.empty:
return hic_df.drop_duplicates()
else:
return df.drop_duplicates()
def validate_input(input_params, params):
input_params_upper = [c.upper() for c in input_params]
not_found = set(input_params_upper).difference(
set(params.str.upper().tolist()))
if len(not_found) > 0:
print('FATAL: The following parameters are not recognized:')
for param in not_found:
print('\t{}'.format(input_params[input_params_upper.index(param)]))
sys.exit(
'Please ensure parameter is from the following:\n\t{}'.format(
'\n\t'.join(params.tolist())))
def calc_afc(eqtl_df, genotypes_fp, expression_dir, covariates_dir,
eqtl_project, output_dir, fdr_threshold, bootstrap, num_processes):
if 'adj_pval' in eqtl_df.columns: # Exclude non-significant eQTLs.
eqtl_df = eqtl_df[eqtl_df['adj_pval'] <= fdr_threshold]
if eqtl_df.empty:
print('Warning: No significant eQTL associations found.\nExiting.')
sys.exit()
eqtl_df = aFC.main(
eqtl_df, genotypes_fp, expression_dir, covariates_dir,
eqtl_project, output_dir, bootstrap, num_processes)
return eqtl_df
def list_eqtl_databases(db):
sql = '''SELECT * FROM meta_eqtls'''
with db.connect() as con:
df = pd.read_sql(sql, con=con)
df = df[['project']].drop_duplicates().reset_index()
for idx, row in df.iterrows():
print('{}\t{}'.format(idx + 1, row['project']))
db.dispose()
def list_eqtl_tissues(db):
sql = '''SELECT * FROM meta_eqtls'''
with db.connect() as con:
df = pd.read_sql(sql, con=db)
for idx, row in df.iterrows():
print('{}\t{}'.format(idx + 1, row['name']))
db.dispose()
def list_hic_libraries(db):
sql = '''SELECT library, tissue FROM meta_hic'''
with db.connect() as con:
df = pd.read_sql(sql, con=db)
for idx, row in df.drop_duplicates().iterrows():
print('{}\t{}'.format(idx + 1, row['library']))
db.dispose()
def list_enzymes(db):
sql = '''SELECT DISTINCT enzyme FROM meta_hic'''
with db.connect() as con:
df = pd.read_sql(sql, con=db)
for idx, row in df.drop_duplicates().iterrows():
print('{}\t{}'.format(idx + 1, row['enzyme']))
db.dispose()
def list_tissue_tags(db):
sql = '''SELECT tags FROM {}'''
hic_df = pd.DataFrame()
eqtl_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
# @Author : <NAME>
# @Desc :
# @Time : 2020/1/17 19:36
from nlp_base.clean_str import *
import pandas as pd
import gc
from sklearn.model_selection import train_test_split
class SampleSplit():
def __init__(self, clean_pth):
self.SEED = 10
self.CLS = CleanStr()
self.CLEAN_DATA_PTH = clean_pth
def shuffle_sample(self, sample, sample_per):
np.random.seed(self.SEED)
shuffle_indices = np.random.permutation(np.arange(len(sample)))
shuffle_sample = sample[shuffle_indices]
dev_sample_index = -1 * int(sample_per * float(len(sample)))
sample_train, sample_test = shuffle_sample[:dev_sample_index], shuffle_sample[dev_sample_index:]
return sample_train, sample_test
def labels_to_vector(self, labels_list, labels_class=None):
'''
:param labels_list: 原始标签, list
:param labels_class: 标签名字,list
:return:
'''
labels_vec = []
if set(labels_list) == set(labels_class):
labels_class = np.array(labels_class)
for li in labels_list:
tmp_vec = np.zeros((len(labels_class)), dtype=int)
tmp_vec[np.where(labels_class == li.strip())] = 1
labels_vec.append(list(tmp_vec))
else:
print('类别不对齐')
labels_class = np.array(set(labels_list))
for li in labels_list:
tmp_vec = np.zeros((len(labels_class)), dtype=int)
tmp_vec[np.where(labels_class == li.strip())] = 1
labels_vec.append(list(tmp_vec))
return np.array(labels_vec), np.array(labels_class)
def split_dataset_(self, id_text):
# 归类
print('训练集与测试集划分---------')
real_label_list = list(map(lambda x: x.lower(), self.LABELS))
train_dataset, test_dataset = [], []
counti = 0
for l1 in real_label_list:
labels_list = []
for indxi, line in enumerate(id_text):
labels = line[-1].strip()
if labels == l1:
labels_list.append(line)
elif labels not in real_label_list:
counti += 1
print('第%d条记录错误, 共有: %d ----' % (indxi, counti))
print('内容为:{}'.format(line))
labels_train, labels_test = self.shuffle_sample(labels_list)
print("类别%s的train/test为:%d/%d, 相等:%s" % (l1, len(labels_train), len(labels_test),
(len(labels_list) == len(labels_train)+len(labels_test))))
train_dataset.append(labels_train)
test_dataset.append(labels_test)
return train_dataset, test_dataset
def write_txt(self, split_text):
counti = 0
for indxi, line in enumerate(split_text):
labels = line[-1].strip()
real_label_list = list(map(lambda x: x.lower(), self.LABELS))
if labels in real_label_list:
# 归类
label_name = '_'.join(labels.split(' '))
label_pth = os.path.join(self.INPUT_DATA_PTH, label_name)
txt_pth = os.path.join(label_pth, '_'.join([label_name, str(indxi)]))
# 内容提取
con_text = " ".join(line[:-1])
con_text = re.sub(r"\s{2,}", " ", con_text)
con_text = con_text.strip()
# 写入文件
with open(txt_pth, 'w') as f:
f.write(con_text)
else:
counti += 1
print('第%d条记录错误,共有%d ----' % (indxi, counti))
print('内容为:{}'.format(line))
return 0
def split_sample_a1(self, split_rate=0.2, rand_seed=10):
'''
各类别均匀划分测试集与训练集
'''
labels_target = ['make up', 'skin care', 'bodycare', 'fashion']
labels_cl = ['make up', 'skin care', 'bodycare', 'fashion', 'others']
id_pd = pd.read_csv(self.CLEAN_DATA_PTH)
id_pd.dropna(axis=0, how='any', inplace=True)
id_pd['labels'].replace(list(id_pd['labels'][~id_pd['labels'].isin(labels_target)]), 'others', inplace=True)
X_train, X_test, y_train, y_test = train_test_split(id_pd['content'], id_pd['labels'], test_size=split_rate,
stratify=id_pd['labels'], random_state=rand_seed)
print('数据集划分完成')
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
print('----------')
print(pd.value_counts(y_train))
print('----------')
print(pd.value_counts(y_test))
y_train, labels_name = self.labels_to_vector(list(y_train), labels_cl)
y_test, labels_name = self.labels_to_vector(list(y_test), labels_cl)
dataset = {'data': [np.array(X_train), np.array(X_test), y_train, y_test],
'labels_name': labels_name}
del id_pd
gc.collect()
return dataset
def split_sample_a2(self, split_rate=0.2, rand_seed=10):
'''
对大类别欠采样,小类别不变
'''
labels_target = ['make up', 'skin care', 'bodycare', 'fashion']
labels_cl = ['make up', 'skin care', 'bodycare', 'fashion', 'others']
id_pd = | pd.read_csv(self.CLEAN_DATA_PTH) | pandas.read_csv |
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.datasets.mnist import MNIST
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
import os
from src.data import MNISTDatasetLeNet, RotationDatasetLeNet
from src.metrics import se
import src.config as cfg
from src.generative_model.models import LeNet5, LeNet5Regressor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
from collections import OrderedDict
def train(epoch, net, trainloader, optimizer, criterion):
net.train()
loss_list, batch_list = [], []
for i, (images, labels) in enumerate(trainloader):
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
output = net(images)
loss = criterion(output, labels)
loss_list.append(loss.detach().cpu().item())
batch_list.append(i+1)
if i % 10 == 0:
print('Train - Epoch %d, Batch: %d, Loss: %f' % (epoch, i, loss.detach().cpu().item()))
loss.backward()
optimizer.step()
return loss_list, batch_list
def test(net, testloader, criterion, test_size):
net.eval()
total_correct = 0
avg_loss = 0.0
for i, (images, labels) in enumerate(testloader):
images = images.to(device)
labels = labels.to(device)
output = net(images)
avg_loss += criterion(output, labels).sum()
pred = output.detach().max(1)[1]
total_correct += pred.eq(labels.view_as(pred)).sum()
avg_loss /= test_size
acc = float(total_correct) / test_size
loss_avg = avg_loss.detach().cpu().item()
print('Test Avg. Loss: %f, Accuracy: %f' % (loss_avg, acc))
return loss_avg, acc
def test_regressor(net, testloader, criterion, test_size):
net.eval()
res_se = []
avg_loss = 0.0
for i, (images, labels) in enumerate(testloader):
images = images.to(device)
labels = labels.to(device)
output = net(images)
avg_loss += criterion(output, labels).sum()
pred = output.detach()
res_se.extend(se(pred.cpu().squeeze(1), labels.cpu()))
avg_loss /= test_size
acc = np.mean(res_se)
loss_avg = avg_loss.detach().cpu().item()
print('Test Avg. Loss: %f, Accuracy: %f' % (loss_avg, acc))
return loss_avg, acc
def train_and_test(epoch, net, trainloader, testloader, optimizer, criterion, test_size):
train_loss, train_batch = train(epoch, net, trainloader, optimizer, criterion)
test_loss, test_acc = test(net, testloader, criterion, test_size)
return test_loss, test_acc, net
def train_and_test_regressor(epoch, net, trainloader, testloader, optimizer, criterion, test_size):
train_loss, train_batch = train(epoch, net, trainloader, optimizer, criterion)
test_loss, test_acc = test_regressor(net, testloader, criterion, test_size)
return test_loss, test_acc, net
def create_model_fid_kid():
path_lenet = cfg.model_fidkid_path
os.makedirs(path_lenet, exist_ok=True)
batch_size = 128
if (cfg.experiment == 'min_mnist')|(cfg.experiment == 'max_mnist'):
model_name = 'lenet_mnist'
trainloader = DataLoader(MNISTDatasetLeNet('train',
folder=cfg.data_folder,
data_path=cfg.data_path),
batch_size=batch_size,
shuffle=True,
num_workers=8)
testloader = DataLoader(MNISTDatasetLeNet('test',
folder=cfg.data_folder,
data_path=cfg.data_path),
batch_size=batch_size,
shuffle=True,
num_workers=8)
test_size = len(MNISTDatasetLeNet('test',
folder=cfg.data_folder,
data_path=cfg.data_path))
criterion = nn.CrossEntropyLoss().to(device)
net = LeNet5().to(device)
optimizer = optim.Adam(net.parameters(), lr=2e-3)
test_fct = train_and_test
elif cfg.experiment == 'rotation_dataset':
model_name = 'lenet_rot'
trainloader = DataLoader(RotationDatasetLeNet('train',
folder=cfg.data_folder,
data_path=cfg.data_path),
batch_size=batch_size,
shuffle=True,
num_workers=8)
testloader = DataLoader(RotationDatasetLeNet('test',
folder=cfg.data_folder,
data_path=cfg.data_path),
batch_size=batch_size,
shuffle=True,
num_workers=8)
test_size = len(RotationDatasetLeNet('test',
folder=cfg.data_folder,
data_path=cfg.data_path))
criterion = nn.MSELoss().to(device)
net = LeNet5Regressor().to(device)
optimizer = optim.Adam(net.parameters(), lr=2e-3)
test_fct = train_and_test_regressor
res = | pd.DataFrame(columns=['loss', 'accuracy']) | pandas.DataFrame |
from wonambi import Dataset
from wonambi.detect import DetectSpindle, DetectSlowWave
from mednickdb_pysleep import pysleep_utils
from mednickdb_pysleep import pysleep_defaults
from mednickdb_pysleep.error_handling import EEGError
from typing import List, Tuple, Dict, Union
import pandas as pd
import numpy as np
import warnings
import datetime
import os
import contextlib
import sys
import logging
import inspect
try:
logger = inspect.currentframe().f_back.f_globals['logger']
except KeyError:
logger = logging.getLogger('errorlog')
logger.info = print#
class DummyFile(object):
def write(self, x): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = DummyFile()
yield
sys.stdout = save_stdout
if 'skip_rem' not in os.environ:
if pysleep_defaults.load_matlab_detectors:
try:
import yetton_rem_detector
yetton_rem_detector.initialize_runtime(['-nojvm', '-nodisplay'])
rem_detector = yetton_rem_detector.initialize()
except ModuleNotFoundError:
pysleep_defaults.load_matlab_detectors = False
def extract_features(edf_filepath: str,
epochstages: List[str],
epochoffset_secs: Union[float, None]=None,
end_offset: Union[float, None]=None,
do_slow_osc: bool=True,
do_spindles: bool=True,
chans_for_spindles: List[str]=None,
chans_for_slow_osc: List[str]=None,
epochs_with_artifacts: List[int]=None,
do_rem: bool=False,
spindle_algo: str='Wamsley2012',
do_overlap=True,
timeit=False):
"""
Run full feature extraction (rem, spindles and SO) on an edf
:param edf_filepath: path to edf file
:param epochstages: epochstages list e.g. ['waso', 'waso', 'n1', 'n2', 'n2', etc]
:param epochoffset_secs: difference between the start of the epochstages and the edf in seconds
:param end_offset: end time to stop extraction for (seconds since start of edf)
:param do_slow_osc: whether to extract slow oscillations or not
:param do_spindles: whether to extract spindles or not
:param do_rem: whether to extract rem or not, note that matlab detectors must be turned on in pysleep_defaults
:param chans_for_spindles: which channels to extract for, can be empty list (no channels), None or all (all channels) or a list of channel names
:param chans_for_slow_osc: which channels to extract for, can be empty list (no channels), None or all (all channels) or a list of channel names
:param epochs_with_artifacts: idx of epochstages that are bad or should be skipped
:param spindle_algo: which spindle algo to run, see the list in Wonambi docs
:return: dataframe of all events, with description, stage, onset (seconds since epochstages start), duration, and feature properties
"""
features_detected = []
start_offset = epochoffset_secs
chans_for_slow_osc = None if chans_for_slow_osc == 'all' else chans_for_slow_osc
chans_for_spindles = None if chans_for_spindles == 'all' else chans_for_spindles
if do_spindles:
if timeit:
starttime = datetime.datetime.now()
logger.info('Spindle Extraction starting for ' + edf_filepath)
data = load_and_slice_data_for_feature_extraction(edf_filepath=edf_filepath,
epochstages=epochstages,
epochoffset_secs=start_offset,
bad_segments=epochs_with_artifacts,
end_offset=end_offset,
chans_to_consider=chans_for_spindles)
spindles = detect_spindles(data, start_offset=start_offset, algo=spindle_algo)
if spindles is None or spindles.shape[0]==0:
logger.warning('No Spindles detected for ' + edf_filepath)
else:
n_spindles = spindles.shape[0]
logger.info('Detected '+ str(n_spindles) + ' spindles on ' + edf_filepath)
if timeit:
logger.info('Spindle extraction took '+str(datetime.datetime.now()-starttime))
donetime = datetime.datetime.now()
spindles = assign_stage_to_feature_events(spindles, epochstages)
assert all(spindles['stage'].isin(pysleep_defaults.nrem_stages)), "All stages must be nrem. If missmatch maybe epochoffset is incorrect?"
if spindles.shape[0]:
features_detected.append(spindles)
if timeit:
logger.info('Bundeling extraction took '+str(datetime.datetime.now()-donetime))
if do_slow_osc:
if timeit:
starttime = datetime.datetime.now()
logger.info('Slow Osc Extraction starting for '+edf_filepath)
if not do_spindles or chans_for_slow_osc != chans_for_spindles:
data = load_and_slice_data_for_feature_extraction(edf_filepath=edf_filepath,
epochstages=epochstages,
epochoffset_secs=start_offset,
bad_segments=epochs_with_artifacts,
end_offset=end_offset,
chans_to_consider=chans_for_slow_osc)
sos = detect_slow_oscillation(data, start_offset=start_offset)
if sos is None:
logger.warning('No SO detected for ' + edf_filepath)
else:
n_sos = sos.shape[0]
logger.info('Detected '+str(n_sos)+ ' slow osc for ' + edf_filepath)
sos = assign_stage_to_feature_events(sos, epochstages)
assert all(sos['stage'].isin(pysleep_defaults.nrem_stages)), "All stages must be nrem. If missmatch maybe epochoffset is incorrect?"
if sos.shape[0]:
features_detected.append(sos)
if timeit:
logger.info('Slow Osc extraction took '+str(datetime.datetime.now()-starttime))
if do_rem:
if not pysleep_defaults.load_matlab_detectors:
warnings.warn('Requested REM, but matlab detectors are turned off. Turn on in pysleep defaults.')
else:
if timeit:
starttime = datetime.datetime.now()
try:
logger.info('REM Extraction starting for '+ edf_filepath)
data = load_and_slice_data_for_feature_extraction(edf_filepath=edf_filepath,
epochstages=epochstages,
epochoffset_secs=start_offset,
bad_segments = epochs_with_artifacts,
end_offset=end_offset,
chans_to_consider=['LOC','ROC'],
stages_to_consider=['rem'])
except ValueError:
warnings.warn('LOC and ROC must be present in the record. Cannot do REM')
rems = None
else:
rems = detect_rems(edf_filepath=edf_filepath, data=data, start_time=start_offset)
if rems is None:
logger.warning('No REM detected for ' + edf_filepath)
else:
rems = assign_stage_to_feature_events(rems, epochstages)
assert all(rems['stage'] == 'rem'), "All stages for rem must be rem. If missmatch maybe epochoffset is incorrect?"
logger.info('Detected '+ str(rems.shape[0]) + ' REMs for ' + edf_filepath)
if rems.shape[0]:
features_detected.append(rems)
if timeit:
logger.info('REM extraction took'+ str(datetime.datetime.now() - starttime))
if features_detected:
sleep_features_df = pd.concat(features_detected, axis=0, sort=False)
if do_spindles and do_slow_osc and do_overlap:
sleep_features_df = detect_slow_osc_spindle_overlap(sleep_features_df,
coupling_secs=pysleep_defaults.so_spindle_overlap,
as_bool=True)
return sleep_features_df
else:
return None
def load_and_slice_data_for_feature_extraction(edf_filepath: str,
epochstages: List[str],
bad_segments: List[int]=None,
epochoffset_secs: float = None,
end_offset: float = None,
chans_to_consider: List[str] = None,
epoch_len=pysleep_defaults.epoch_len,
stages_to_consider=pysleep_defaults.nrem_stages):
if epochoffset_secs is None:
epochoffset_secs = 0
if end_offset is not None:
last_good_epoch = int((end_offset - epochoffset_secs) / epoch_len)
epochstages = epochstages[0:last_good_epoch]
d = Dataset(edf_filepath)
eeg_data = d.read_data().data[0]
if not (1 < np.sum(np.abs(eeg_data))/eeg_data.size < 200):
raise EEGError("edf data should be in mV, please rescale units in edf file")
if bad_segments is not None:
for bad_epoch in bad_segments:
epochstages[bad_epoch]='artifact'
epochstages = pysleep_utils.convert_epochstages_to_eegevents(epochstages, start_offset=epochoffset_secs)
epochstages_to_consider = epochstages.loc[epochstages['description'].isin(stages_to_consider), :]
starts = epochstages_to_consider['onset'].tolist()
ends = (epochstages_to_consider['onset'] + epochstages_to_consider['duration']).tolist()
for i in range(len(starts)-1,0,-1):
if starts[i] == ends[i-1]:
del starts[i]
del ends[i-1]
data = d.read_data(begtime=starts, endtime=ends, chan=chans_to_consider)
data.starts = starts
data.ends = ends
return data
def detect_spindles(data: Dataset, algo: str = 'Wamsley2012',
start_offset: float = None) ->pd.DataFrame:
"""
Detect spindles locations in an edf file for each channel.
:param edf_filepath: path of edf file to load. Will maybe work with other filetypes. untested.
:param algo: which algorithm to use to detect spindles. See wonambi methods: https://wonambi-python.github.io/gui/methods.html
:param chans_to_consider: which channels to detect spindles on, must match edf channel names
:param bad_segments:
:param start_offset: offset between first epoch and edf - onset is measured from this
:return: returns dataframe of spindle locations, with columns for chan, start, duration and other spindle properties, sorted by onset
"""
detection = DetectSpindle(algo)
spindles_detected = detection(data)
spindles_df = pd.DataFrame(spindles_detected.events, dtype=float)
col_map = {'start': 'onset',
'end': None,
'peak_time': 'peak_time',
'peak_val_det': 'peak_uV', #peak in the band of interest (removing DC, and other signal components)
'peak_val_orig': None,
'dur': 'duration',
'auc_det': None,
'auc_orig': None,
'rms_det': None,
'rms_orig': None,
'power_orig': None,
'peak_freq': 'freq_peak',
'ptp_det': None,
'ptp_orig': None,
'chan': 'chan'}
cols_to_keep = set(spindles_df.columns) - set([k for k, v in col_map.items() if v is None])
spindles_df = spindles_df.loc[:, cols_to_keep]
spindles_df.columns = [col_map[k] for k in spindles_df.columns]
if spindles_df.shape[0] == 0:
return None #empty df
spindles_df['peak_time'] = spindles_df['peak_time'] - spindles_df['onset']
spindles_df['description'] = 'spindle'
if start_offset is not None:
spindles_df['onset'] = spindles_df['onset'] - start_offset
spindles_df = spindles_df.loc[spindles_df['onset'] >= 0, :]
return spindles_df.sort_values('onset')
def detect_slow_oscillation(data: Dataset, algo: str = 'AASM/Massimini2004', start_offset: float = None) -> pd.DataFrame:
"""
Detect slow waves (slow oscillations) locations in an edf file for each channel
:param edf_filepath: path of edf file to load. Will maybe work with other filetypes. untested.
:param algo: which algorithm to use to detect spindles. See wonambi methods: https://wonambi-python.github.io/gui/methods.html
:param chans_to_consider: which channels to detect spindles on, must match edf channel names
:param bad_segments:
:param start_offset: offset between first epoch and edf - onset is measured from this
:return: returns dataframe of spindle locations, with columns for chan, start, duration and other spindle properties, sorted by onset
"""
detection = DetectSlowWave(algo)
sos_detected = detection(data)
sos_df = pd.DataFrame(sos_detected.events, dtype=float)
col_map = {'start': 'onset',
'end': None,
'trough_time': 'trough_time',
'zero_time': 'zero_time',
'peak_time': 'peak_time',
'trough_val': 'trough_uV',
'peak_val': 'peak_uV',
'dur': 'duration',
'ptp': None,
'chan': 'chan'}
cols_to_keep = set(sos_df.columns) - set([k for k, v in col_map.items() if v is None])
sos_df = sos_df.loc[:, cols_to_keep]
sos_df.columns = [col_map[k] for k in sos_df.columns]
if sos_df.shape[0] == 0:
return None #empty df
sos_df['peak_time'] = sos_df['peak_time'] - sos_df['onset']
sos_df['trough_time'] = sos_df['trough_time'] - sos_df['onset']
sos_df['zero_time'] = sos_df['zero_time'] - sos_df['onset']
sos_df['description'] = 'slow_osc'
if start_offset is not None:
sos_df['onset'] = sos_df['onset'] - start_offset
sos_df = sos_df.loc[sos_df['onset']>=0,:]
return sos_df.sort_values('onset')
def detect_rems(edf_filepath: str,
data: Dataset,
loc_chan: str = 'LOC',
roc_chan: str = 'ROC',
start_time: float=0,
std_rem_width: float = 0.1,
algo: str='HatzilabrouEtAl'):
"""
Detect rapid eye movement events in an edf file from loc and roc channels (only REM stage considered). Sample Freq must be converted to 256Hz!
:param edf_filepath: path of edf file to load. Will maybe work with other filetypes. untested.
:param algo: which algorithm to use to detect spindles. See wonambi methods: https://wonambi-python.github.io/gui/methods.html
:param chans_to_consider: which channels to detect spindles on, must match edf channel names
:param epochstages: list of stages for each epoch
:param start_offset: offset between first epoch and edf - onset is measured from this
:return: returns dataframe of spindle locations, with columns for chan, start, duration and other spindle properties, sorted by onscdet
"""
# if data.header['s_freq'] != 256:
# raise EEGError("edf should be 256Hz. Please resample.")
rem_starts = [int(d*256) for d in data.starts] #must be in samples, must be 256Hz.
rem_ends = [int(d*256) for d in data.ends]
if len(rem_starts) > 0:
with nostdout():
onsets = rem_detector.runDetectorCommandLine(edf_filepath, [rem_starts, rem_ends], algo, loc_chan, roc_chan, 0)
else:
return None
if isinstance(onsets, float): #one rem?
warnings.warn('Only a single rem was found, this may be an error')
pass
if len(onsets) > 0:
onsets = onsets[0]
else:
return None
rem_df = pd.DataFrame({'onset': onsets}, dtype=float)
rem_df['description'] = 'rem_event'
if start_time is not None:
rem_df['onset'] = rem_df['onset'] - start_time
rem_df = rem_df.loc[rem_df['onset'] >= 0, :]
rem_df['duration'] = std_rem_width
rem_df['chan'] = 'LOC'
return rem_df
def assign_stage_to_feature_events(feature_events: pd.DataFrame,
epochstages: list,
epoch_stage_offset: int = 0,
epoch_len=pysleep_defaults.epoch_len,
) -> pd.DataFrame:
"""
:param feature_events: events dataframe, with start and duration columns
:param epochstages: stages, where the first stage starts at 0 seconds on the events timeline
:param epoch_stage_offset: the offset in seconds between the epoch stages and the features events
(i.e. when epoch stages start compared to the time where feature_events['onset']==0)
:return: the modified events df, with a stage column
"""
feature_events['onset'] -= epoch_stage_offset
if isinstance(epochstages, list):
stage_events = pd.DataFrame({'onset':np.arange(0, len(epochstages))*epoch_len,
'stage_idx':np.arange(0, len(epochstages)),
'stage':epochstages})
stage_events['duration'] = epoch_len
else:
raise ValueError('epochstages is of unknown type. Should be list.')
labels = [(s,i) for s,i in zip(stage_events['stage'], stage_events['stage_idx'])]
end_point = stage_events['onset'].iloc[-1] + stage_events['duration'].iloc[-1]
stages = pd.DataFrame(pd.cut(feature_events['onset'],
stage_events['onset'].to_list()+[end_point],
right=False,
labels=labels).to_list(),
columns=['stage','stage_idx'])
feature_events = pd.concat([feature_events.reset_index(drop=True), stages.reset_index(drop=True)], axis=1)
return feature_events
def detect_slow_osc_spindle_overlap(features_df, coupling_secs=None, as_bool=False) -> pd.DataFrame:
"""
Detect if a set of features (i.e. spindles) are close (with coupling_secs) to a set of base features (i.e. Slow Osc)
:param base_onsets: the onsets of features to search around (Slow Osc)
:param candidate_onsets:
:param offset_secs:
:return: closest infront of onset, closest behind onset, Nan if nothing within "coupling secs"
"""
if as_bool:
assert coupling_secs is not None, 'if you want a yes no returned for coupling, then there must be a couping secs'
SO_df = features_df.loc[features_df['description'] == 'slow_osc', :]
spindle_df = features_df.loc[features_df['description'] == 'spindle', :]
def overlap_func(base, spindle_onsets, coupling_secs=None, as_bool=False):
if spindle_onsets.shape[0] == 0:
return pd.Series({'before':np.nan, 'after':np.nan})
spindle_diff = spindle_onsets - base
closest_before = spindle_diff[spindle_diff<0].iloc[-1] if spindle_diff[spindle_diff<0].shape[0]!=0 else np.nan
closest_after = spindle_diff[spindle_diff>0].iloc[0] if spindle_diff[spindle_diff>0].shape[0]!=0 else np.nan
if coupling_secs:
if abs(closest_before) < coupling_secs:
if as_bool:
closest_before = True
else:
closest_before = np.nan
if as_bool:
closest_before = False
if closest_after < coupling_secs:
if as_bool:
closest_after = True
else:
closest_after = np.nan
if as_bool:
closest_after = False
return pd.Series({'before':closest_before, 'after':closest_after})
for chan, chan_data in SO_df.groupby('chan'):
overlap = chan_data.apply(lambda x: overlap_func(x['onset']+x['zero_time'],
spindle_df.loc[spindle_df['chan']==x['chan'],'onset'],
coupling_secs,
as_bool), axis=1)
features_df.loc[(features_df['description'] == 'slow_osc') & (features_df['chan'] == chan), 'coupled_before'] = overlap['before']
features_df.loc[(features_df['description'] == 'slow_osc') & (features_df['chan'] == chan), 'coupled_after'] = overlap['after']
return features_df
def sleep_feature_variables_per_stage_old(feature_events: pd.DataFrame,
mins_in_stage_df: pd.DataFrame=None,
stages_to_consider: List[str]=pysleep_defaults.stages_to_consider,
channels: List[str]=None,
av_across_channels: bool=True):
"""
Calculate the density, and mean of other important sleep feature variables (amp, power, peak freq, etc)
:param feature_events: dataframe of a single event type (spindle, slow osc, rem, etc)
:param stages_to_consider: The stages to extract for, i.e. you probably want to leave out REM when doing spindles
:param channels: if None consider all channels THAT HAVE DETECTED SPINDLES, to include 0 density and count for
channels that have no spindles, make sure to include this channel list argument.
:param av_across_channels: whether to average across channels, or return separate for each channel
:return: dataframe of with len(stage)*len(chan) or len(stage) rows with density + mean of each feature as columns
"""
if 'stage_idx' in feature_events.columns:
feature_events = feature_events.drop('stage_idx', axis=1)
if 'quartile' in feature_events.columns:
by_quart = True
index_vars = ['stage','description', 'quartile']
else:
index_vars = ['stage','description']
by_quart = False
pos_non_var_cols = ['stage', 'onset', 'description', 'chan'] + index_vars
non_var_cols = [col for col in feature_events.columns if col in pos_non_var_cols]
features_per_stage_cont = []
for stage_and_other_idx, feature_data_per_stage in feature_events.groupby(index_vars):
stage = stage_and_other_idx[0]
if by_quart:
quart = stage_and_other_idx[-1]
mins_in_stage = mins_in_stage_df.loc[(quart,stage),'minutes_in_stage']
else:
mins_in_stage = mins_in_stage_df.loc[stage, 'minutes_in_stage']
if stage in stages_to_consider:
per_chan_cont = []
channels_without_events = set(feature_data_per_stage['chan'].unique() if channels is None else channels)
for chan, feature_data_per_stage_chan in feature_data_per_stage.groupby('chan'):
if channels is None or chan in channels:
channels_without_events = channels_without_events - {chan}
features_per_chan = feature_data_per_stage_chan.drop(non_var_cols, axis=1).agg(np.nanmean)
features_per_chan.index = ['av_'+col for col in features_per_chan.index]
features_per_chan['density'] = feature_data_per_stage_chan.shape[0] / mins_in_stage
features_per_chan['count'] = feature_data_per_stage_chan.shape[0]
features_per_chan['chan'] = chan
per_chan_cont.append(features_per_chan)
if len(channels_without_events) > 0: #if there were channels that didnt have any spindles
for chan in channels_without_events:
per_chan_cont.append( | pd.Series({'chan': chan, 'density': 0, 'count': 0}) | pandas.Series |
# import app components
from app import app, data
from flask_cors import CORS
CORS(app) # enable CORS for all routes
# import libraries
from flask import request
import pandas as pd
import re
from datetime import datetime
from functools import reduce
# define functions
## process date args
def date_arg(arg):
try:
arg = datetime.strptime(arg, '%d-%m-%Y')
except:
try:
arg = datetime.strptime(arg, '%Y-%m-%d')
except:
arg = None
return arg
## process missing arg
def missing_arg(missing):
if missing == 'na':
missing_val = 'NA'
elif missing == 'empty':
missing_val = ''
elif missing == 'nan':
missing_val = 'NaN'
else:
missing_val = 'NULL'
return(missing_val)
## get date column
def get_date_col(df):
return list(filter(re.compile('^date_.*').search, df.columns.values))[0]
# list of dataset by location
data_canada = ['cases_timeseries_canada',
'mortality_timeseries_canada',
'recovered_timeseries_canada',
'testing_timeseries_canada',
'active_timeseries_canada',
'vaccine_administration_timeseries_canada',
'vaccine_distribution_timeseries_canada',
'vaccine_completion_timeseries_canada']
data_prov = ['cases_timeseries_prov',
'mortality_timeseries_prov',
'recovered_timeseries_prov',
'testing_timeseries_prov',
'active_timeseries_prov',
'vaccine_administration_timeseries_prov',
'vaccine_distribution_timeseries_prov',
'vaccine_completion_timeseries_prov']
data_hr = ['cases_timeseries_hr',
'mortality_timeseries_hr']
data_names = ['cases',
'mortality',
'recovered',
'testing',
'active',
'avaccine',
'dvaccine',
'cvaccine']
data_sknew = ['sk_new_cases_timeseries_hr_combined',
'sk_new_mortality_timeseries_hr_combined']
data_names_dates = {
'date_report': 'cases',
'date_death_report': 'mortality',
'date_recovered': 'recovered',
'date_testing': 'testing',
'date_active': 'active',
'date_vaccine_administered': 'avaccine',
'date_vaccine_distributed': 'dvaccine',
'date_vaccine_completed': 'cvaccine'
}
data_other = {
'prov': 'prov_map',
'hr': 'hr_map',
'age_cases': 'age_map_cases',
'age_mortality': 'age_map_mortality'
}
@app.route('/')
@app.route('/index')
def index():
# initialize response
response = {}
# subset dataframes
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
# rename date columns
for df in dfs.values():
df.columns = df.columns.str.replace('^date_.*', 'date', regex = True)
# subset active dataframe to avoid duplicate columns
dfs['active_timeseries_canada'] = dfs['active_timeseries_canada'].drop(columns=['cumulative_cases',
'cumulative_recovered',
'cumulative_deaths'])
# merge dataframes
df = reduce(lambda left, right: pd.merge(left, right, on=['date', 'province'], how='outer'), dfs.values())
# convert date column and filter to most recent date
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
df = df.loc[df['date'] == data.version['date']]
# format output
df['date'] = df['date'].dt.strftime('%d-%m-%Y')
df = df.fillna('NULL')
response['summary'] = df.to_dict(orient='records')
# add version to response
response['version'] = data.version['version']
# return response
return response
@app.route('/timeseries')
def timeseries():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
# process other arguments
missing_val = missing_arg(missing)
if not loc:
loc = 'prov'
# get dataframes
if loc == 'canada':
if stat == 'cases':
data_name = data_canada[0]
dfs = [ | pd.read_csv(data.ccodwg[data_name]) | pandas.read_csv |
# PERISCOPE KPI CHART TEMPLATE
# SQL output should have 2 columns:
# 1) ds_aggregation: the date or datetime. name should be ds_hour, ds_day, ds_week, ds_month, ds_quarter, or ds_year. if you are using Periscope's aggregation filter, you can name it ds_[aggregation]
# 2) y_value: the value to forecast. name it whatever makes sense, e.g. y_signups, y$_revenue, etc. add the dollar sign ($) to format in dollars.
import pandas as pd
import plotly.plotly as py
import plotly.graph_objs as go
import datetime
import numpy as np
# Generating dummy data if SQL output is not in the correct format
community_post = ''
dummy_df = pd.DataFrame()
dummy_df['ds_day'] = pd.date_range(start='12/1/2018', end='1/1/2019')
dummy_df['row_num'] = range(1, dummy_df.shape[0] + 1)
dummy_df['multiplier'] = np.random.randint(10,50, dummy_df.shape[0])
dummy_df['y$_revenue'] = dummy_df['row_num'] * dummy_df['multiplier']
# gets the formatter for the column ($ or %)
def get_formatter(column):
if '$' in column:
return '$'
elif '%' in column:
return '%'
else:
return None
# returns the plotly formatter for the column
def tickformat(column):
if column.startswith('y$'):
return '$.3s'
elif column.startswith('y%'):
return '.0%'
else:
return '.3s'
# formats the column name
def column_name(column):
return column.split('_', 1)[1].replace('_',' ').title()
# identifies the aggregation level
def aggregation(ds_col):
return ds_col.split('_', 1)[1].lower()
# formats a percent value
def percent(pct):
return str(int(round(pct*100))) + '%'
# formats a number based on its formatter ($ or %)
def format(num, formatter = None):
if formatter is None:
return abbrev(num)
elif formatter == '$':
return dollars(num)
elif formatter == '%':
return percent(num)
# formats a dollar value
def dollars(num):
num = float('{:.3g}'.format(float(num)))
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '${}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude])
# makes a large number human-readable, e.g. 100000 -> 100K
def abbrev(num):
num = float('{:.3g}'.format(float(num)))
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude])
# returns the css styling for text
def style_text(text, **settings):
style = ';'.join([f'{key.replace("_","-")}:{settings[key]}' for key in settings])
return f'<span style="{style}">{text}</span>'
def style_link(text, link, **settings):
style = ';'.join([f'{key.replace("_","-")}:{settings[key]}' for key in settings])
return f'<a href="{link}" style="{style}">{text}</a>'
# displays text in the center of the plot
def number_overlay(text, annotation_msg=None):
axis_setting = dict(range=[-1,1], showline=False, ticks='', showticklabels=False, showgrid=False, zeroline=False, fixedrange=True)
annotation = dict(x=0, y=0, ax=0, ay=0, text=text)
margin = dict(t=60)
if annotation_msg is not None:
annotations = [annotation, annotation_msg]
else:
annotations = [annotation]
layout = go.Layout(xaxis=axis_setting,yaxis=axis_setting,annotations=annotations,margin=margin,)
fig=go.Figure(data=[], layout=layout)
periscope.plotly(fig, config={'displayModeBar':False})
# translates a hex code to RGB values
def rgb_from_hex(hex):
h = hex.lstrip('#')
return tuple(((int(h[i:i+2], 16))) for i in (0, 2 ,4))
# translates RGB values to hex code
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
# calculates the gradient value between two colors, based on a scale
def gradient(value, scale, colors):
min = scale[0]
max = scale[1]
if value < min:
value = min
elif value > max:
value = max
pct = 1.0 * (value - min) / (max - min)
min_color = rgb_from_hex(colors[0])
max_color = rgb_from_hex(colors[1])
rgb = tuple(((int(round(pct * (max_color[i] - min_color[i]) + min_color[i])))) for i in range(0,3))
return rgb_to_hex(rgb)
# prints a message indicating there was no data
def no_data():
msg = 'No data to display.'
number_overlay(style_text(msg, font_size="18px"))
def plot(df, annotation=None):
df.columns = [c.lower() for c in df.columns]
y_col = [c for c in df.columns if c.startswith('y')][0]
ds_col = [c for c in df.columns if c.startswith('ds')][0]
df[y_col] = | pd.to_numeric(df[y_col]) | pandas.to_numeric |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
| Period("2011-01", freq="M") | pandas.Period |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ~~aliGater~~
# (semi)automated gating software
#
# /^^\
# /^^\_______/0 \_
# ( `~+++,,_________,,++~^^^^^^^
#..V^V^V^V^V^V^\.................................
#
#
# Parsing flow data with fcsparser from Eugene Yurtsevs FlowCytometryTools (very slightly modified)
# Check out his excellent toolkit for flow cytometry analysis:
# http://eyurtsev.github.io/FlowCytometryTools/
#
# <NAME> & <NAME> 2016~
# https://www.med.lu.se/labmed/hematologi_och_transfusionsmedicin/forskning/bjoern_nilsson
# Distributed under the MIT License
import numpy as np
import pandas as pd
import os
import sys
import copy #For deep copies of lists
import h5py
import json #for importing json with machine-specific scaling factors
#AliGater imports
from aligater.fscparser_api import parse
import aligater.AGConfig as agconf
#Dummy object for arguments
sentinel=object
#Some custom errors from template
class ExceptionTemplate(Exception):
def __call__(self, *args):
return self.__class__(*(args + self.args))
def __str__(self):
return ' '.join(self.args)
#General AliGater name to create errors
class AliGaterError(ExceptionTemplate): pass
#Some reoccuring, common ones
invalidAGgateError=AliGaterError('passed gate is not a valid AGgate object')
invalidAGgateParentError=AliGaterError('Invalid AGgate object passed as parentGate')
invalidSampleError=AliGaterError('Invalid AGsample object')
filePlotError=AliGaterError("If plotting to file is requested filePlot must be string filename")
markerError=AliGaterError("not present in sample, check spelling or control your dataframe.columns labels")
def compensateDF(fcsDF, metaDict, fsc_ssc_count, *args, **kwargs):
if 'spill_col_name' in kwargs:
spill_keyword = kwargs['spill_col_name']
elif 'SPILL' in metaDict.keys():
spill_keyword='SPILL'
elif '$SPILL' in metaDict.keys():
spill_keyword='$SPILL'
elif 'SPILLOVER' in metaDict.keys():
spill_keyword='SPILLOVER'
elif '$SPILLOVER' in metaDict.keys():
spill_keyword='$SPILLOVER'
else:
raise ValueError("Unknown label of spillover in metadata, pass correct alias with spill_col_name\nYou can load the fcs using ag.loadFCS with compensate=False, metadata=True and inspect the metadata for corect label.")
spill_matrix=metaDict[spill_keyword].split(',')
n = int(spill_matrix[0]) #number of colors
if not (n == len(fcsDF.columns)-fsc_ssc_count):
raise AliGaterError("in LoadFCS, compensateDF: ", "unexpected number of channels. If your FCS are exported with Area+width+height values for each flourochrome the flourochrome_area_filter needs to be set to True.")
#Depending on version FCS different number of cols-per-flourochrome will be reported(A-W-H vs just A) , and different number of columns preceding and subsequent the flourchrome columns
#NOTE: AliGater assumes first columns are FSC/SSC
colNames=fcsDF.columns[fsc_ssc_count:(n+fsc_ssc_count)]
fcsArray = fcsDF[colNames]
comp_matrix = np.array(spill_matrix[n+1:]).reshape(n, n).astype(float)
#Sanity check that the compensation matrix is non-zero if compensation was requested
tmp_identity = np.identity(n)
if np.array_equal(comp_matrix, tmp_identity):
reportStr="WARNING: No compensation data available in sample!\n"
sys.stderr.write(reportStr)
inv_comp_matrix = np.linalg.inv(comp_matrix)
compensatedArray=np.dot(fcsArray, inv_comp_matrix)
fcsDF.update(pd.DataFrame(compensatedArray,columns=colNames))
return fcsDF
def reportGateResults(vI, vOutput):
if agconf.ag_verbose:
reportString="After gating, "+str(len(vOutput))+" out of "+str(len(vI))+" events remain.\n"
sys.stderr.write(reportString)
return
def compensate_manual(fcsDF, comp_matrix, fsc_ssc_count):
n=len(comp_matrix)
if not (n == len(fcsDF.columns)-fsc_ssc_count):
raise AliGaterError("in LoadFCS, compensate_manual: ", "unexpected number of channels. If your FCS are exported with Area+width+height values for each flourochrome the flourochrome_area_filter needs to be set to True.")
#Depending on version FCS different number of cols-per-flourochrome will be reported(A-W-H vs just A) , and different number of columns preceding and subsequent the flourchrome columns
#NOTE: AliGater assumes first columns are FSC/SSC
colNames=fcsDF.columns[fsc_ssc_count:(n+fsc_ssc_count)]
fcsArray = fcsDF[colNames]
#Sanity check that the compensation matrix is non-zero if compensation was requested
#Raise here instead of warning?
tmp_identity = np.identity(n)
if np.array_equal(comp_matrix, tmp_identity):
reportStr="WARNING: passed compensation matrix is 0, compensation NOT applied!\n"
sys.stderr.write(reportStr)
inv_comp_matrix = np.linalg.inv(comp_matrix)
compensatedArray=np.dot(fcsArray, inv_comp_matrix)
fcsDF.update(pd.DataFrame(compensatedArray,columns=colNames))
reportStr="Applied passed compensation matrix\n"
sys.stderr.write(reportStr)
return fcsDF
def getCompensationMatrix(fcsDF, metaDict):
spill_matrix=metaDict['SPILL'].split(',')
n = int(spill_matrix[0]) #number of colors
colNames=fcsDF.columns[4:(n+4)]
comp_matrix = np.array(spill_matrix[n+1:]).reshape(n, n).astype(float)
#Sanity check that the compensation matrix is non-zero if compensation was requested
#Raise here instead of warning?
tmp_identity = np.identity(n)
if np.array_equal(comp_matrix, tmp_identity):
reportStr="WARNING: This samples has no compensation data available\n"
sys.stderr.write(reportStr)
return colNames, comp_matrix
def getGatedVector(fcsDF, gate, vI=None, return_type="pdseries", dtype=np.float64):
"""
Collects list-like of measured intensities for a population.
Also see getGatedVectors
**Parameters**
fcsDF : pandas.DataFrame
Flow data loaded in a pandas DataFrame. \n
If data is stored in an AGSample object this can be retrieved by
calling the sample, i.e. mysample().
gate : str
Marker label.
vI : list-like or AGgate object
Population to collect.
return_type : str, optional, default: "pdseries"
Format of returned list-like, options are: "pdseries" "nparray"
dtype : Type, optional, default: numpy.float64
Data type of values in the returned listlike
**Returns**
List-like
**Examples**
None currently.
"""
if return_type.lower() not in ["pdseries","nparray"]:
raise TypeError("Specify return type as 'pdseries' or 'nparray'")
if return_type.lower()=="pdseries" and not (dtype is np.float64):
sys.stderr.write("dtype specification not supported for pdseries return type, returning default dtype")
if vI is None:
vI=fcsDF.index
if return_type.lower()=="pdseries":
gated_vector=fcsDF[gate].loc[vI]
else:
gated_vector=fcsDF[gate].loc[vI].values.astype(dtype)
return gated_vector
def getGatedVectors(fcsDF, gate1, gate2, vI=None, return_type="pdseries"):
"""
Collects list-like of measured intensities for a population. \n
Useful to collect both intensity coordinates for events in a view.
**Parameters**
fcsDF : pandas.DataFrame
Flow data loaded in a pandas DataFrame.
If data is stored in an AGSample object this can be retrieved by
calling the sample, i.e. mysample().
gate1, gate2 : str
Marker labels.
vI : list-like or AGgate object
Population to collect.
return_type : str, optional, default: "pdseries"
Format of returned list-like, options are: "pdseries" "nparray", matrix
dtype : Type, optional, default: numpy.float64
Data type of values in the returned list-like
**Returns**
numpy.array[numpy.array, numpy.array]
If return type is 'nparray'
numpy array of arrays in order: x-array, y-array
numpy.array[list]
If return_type is 'matrix' returns a numpy array with list-likes of two; x-coord, y-coord
list-like, list-like
if return_type is 'pdseries' returns two pandas.Series objects in order; X, Y
**Examples**
None currently.
"""
if return_type.lower() not in ["pdseries","nparray","matrix"]:
raise TypeError("Specify return type as 'pdseries', 'nparray', 'matrix'")
if vI is None:
vI=fcsDF.index
if return_type.lower()=="pdseries":
gated_vector1=fcsDF[gate1].loc[vI]
gated_vector2=fcsDF[gate2].loc[vI]
return gated_vector1, gated_vector2
elif return_type.lower()=="matrix":
vX=fcsDF[gate1].loc[vI].values
vY=fcsDF[gate2].loc[vI].values
return np.array(list(zip(vX,vY)))
else:
vX=fcsDF[gate1].loc[vI].values
vY=fcsDF[gate2].loc[vI].values
return np.array([vX, vY])
def loadHDF5sample(path, sampling_resolution=32):
"""
Loads a HDF5 compressed fcs file, these are created with aligater.AGClasses.AGExperiment.create_HDF5_files
This function is mainly intended for internal use in the aligater.AGClasses.AGExperiment.apply function.
**Parameters**
path : str
Absolute path to .fcs file.
sampling_resolution : int, default: 32
To-be-deprecated.
Used to specify downsampling dimensions for batch runs through aligater.AGClasses.AGExperiments, this parameter is just passed through.\n
Will be moved into a ``*args`` ``**kwargs`` style parameter.\n
Should always be ignored if loading single files - it does nothing.
**Returns**
aligater.AGClasses.AGSample
"""
#********Lazy loading of*************
# could move to AGClasses and avoid, kind of makes sense.
from aligater.AGClasses import AGsample
#************************************
fcsDF = | pd.read_hdf(path, key='fcs') | pandas.read_hdf |
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep
from uuid import uuid4
import numpy as np
import pandas as pd
import pytest
import v3io_frames as v3f
from conftest import has_go, test_backends, protocols
tsdb_span = 5 # hours
integ_params = [(p, b) for p in protocols for b in test_backends]
def csv_df(size):
data = {
'icol': np.random.randint(-17, 99, size),
'fcol': np.random.rand(size),
'scol': ['val-{}'.format(i) for i in range(size)],
'bcol': np.random.choice([True, False], size=size),
'tcol': | pd.date_range('2018-01-01', '2018-10-10', periods=size) | pandas.date_range |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/17 20:04
Desc: 中国外汇交易中心暨全国银行间同业拆借中心-回购定盘利率-历史数据
"""
import pandas as pd
import requests
def repo_rate_hist(start_date: str = "20200930", end_date: str = "20201029") -> pd.DataFrame:
"""
中国外汇交易中心暨全国银行间同业拆借中心-回购定盘利率-历史数据
http://www.chinamoney.com.cn/chinese/bkfrr/
:param start_date: 开始时间, 开始时间与结束时间需要在一个月内
:type start_date: str
:param end_date: 结束时间, 开始时间与结束时间需要在一个月内
:type end_date: str
:return: 回购定盘利率-历史数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.chinamoney.com.cn/ags/ms/cm-u-bk-currency/FrrHis"
params = {
"lang": "CN",
"startDate": start_date,
"endDate": end_date,
"pageSize": "5000",
}
r = requests.post(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df = pd.DataFrame([item for item in temp_df["frValueMap"].to_list()])
temp_df = temp_df[[
"date",
"FR001",
"FR007",
"FR014",
"FDR001",
"FDR007",
"FDR014",
]]
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
temp_df['FR001'] = pd.to_numeric(temp_df['FR001'])
temp_df['FR007'] = pd.to_numeric(temp_df['FR007'])
temp_df['FR014'] = pd.to_numeric(temp_df['FR014'])
temp_df['FDR001'] = pd.to_numeric(temp_df['FDR001'])
temp_df['FDR007'] = pd.to_numeric(temp_df['FDR007'])
temp_df['FDR014'] = | pd.to_numeric(temp_df['FDR014']) | pandas.to_numeric |
####################데이터프레임의 문자열 컬럼들을 합치는 등의 작업으로 새로운 컬럼 생성#######################################
#이용함수 apply
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
# df = pd.DataFrame({'id' : [1,2,10,20,100,200],
# "name":['aaa','bbb','ccc','ddd','eee','fff']})
# print(df)
#
# #컬럼을 변경하여 새로운 컬럼을 생성
# #새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬
# df['id_2']=df['id'].apply(lambda x:"{:0>5d}".format(x))
# print(df)
# # id name id_2
# # 0 1 aaa 00001
# # 1 2 bbb 00002
# # 2 10 ccc 00010
# # 3 20 ddd 00020
# # 4 100 eee 00100
# # 5 200 fff 00200
#
# # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다.
# #
# # x=3.141592
# # print("{:.2f}".format(x))
# # # 3.14
# #
# # print("{:+.2f}".format(x))
# # # +3.14
# #
# # x=-3.141592
# # print("{:+.2f}".format(x))
# # # -3.14
# #
# # x=2.718
# # print("{:.0f}".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림)
# # # 3
# #
# # x=3.147592
# # print("{:.2f}".format(x)) # .2f(소수 점 셋째자리에서 반올림)
# # # 3.15
# #
# # x=5
# # print("{:0>2d}".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 )
# # # 05
# #
# # x=7777777777
# # print("{:0>5d}".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지)
# # # 7777777777
# # print("{:,}".format(x))
# # # 7,777,777,777
# #
# # x=0.25
# # print("{:.2%}".format(x))
# # # 25.00%
# #
#
# #name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨)
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 NaN
# # 1 2 bbb 00002 NaN
# # 2 10 ccc 00010 NaN
# # 3 20 ddd 00020 NaN
# # 4 100 eee 00100 NaN
# # 5 200 fff 00200 NaN
#
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 00001_aaa
# # 1 2 bbb 00002 00002_bbb
# # 2 10 ccc 00010 00010_ccc
# # 3 20 ddd 00020 00020_ddd
# # 4 100 eee 00100 00100_eee
# # 5 200 fff 00200 00200_fff
#
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1)
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 00001_aaa
# # 1 2 bbb 00002 00002_bbb
# # 2 10 ccc 00010 00010_ccc
# # 3 20 ddd 00020 00020_ddd
# # 4 100 eee 00100 00100_eee
# # 5 200 fff 00200 00200_fff
#
#
# #id를 소숫점 이하로 나타내는 새로운 열을 추가
# df['id_3']=df['id'].apply(lambda x: "{:.2f}".format(x))
# print(df)
# # id name id_2 id_name id_3
# # 0 1 aaa 00001 00001_aaa 1.00
# # 1 2 bbb 00002 00002_bbb 2.00
# # 2 10 ccc 00010 00010_ccc 10.00
# # 3 20 ddd 00020 00020_ddd 20.00
# # 4 100 eee 00100 00100_eee 100.00
# # 5 200 fff 00200 00200_fff 200.00
#
# df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌
# print(df)
# # id name id_2 id_name id_3 name_3
# # 0 1 aaa 00001 00001_aaa 1.00 AAA
# # 1 2 bbb 00002 00002_bbb 2.00 BBB
# # 2 10 ccc 00010 00010_ccc 10.00 CCC
# # 3 20 ddd 00020 00020_ddd 20.00 DDD
# # 4 100 eee 00100 00100_eee 100.00 EEE
# # 5 200 fff 00200 00200_fff 200.00 FFF
#
#
# # id_name_3 컬럼추가
# # id_name_3 => 1.00:AAA
#
# df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1)
# print(df)
# # id name id_2 id_name id_3 name_3 id_name_3
# # 0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA
# # 1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB
# # 2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC
# # 3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD
# # 4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE
# # 5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF
#
###################################################################################################################
#groupby 집계함수
# 1.딕셔너리를 이용해서 그룹화
#위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄
# data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분.
df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4'])
print(df)
# c1 c2 c3 c4 c5
# r1 0 1 2 3 4
# r2 5 6 7 8 9
# r3 10 11 12 13 14
# r4 15 16 17 18 19
# row_g1 = r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum())
# row_g2 = r3+r4
mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'}
gbr = df.groupby(mdr)
print(gbr.sum())
# c1 c2 c3 c4 c5
# row_g1 0 1 2 3 4
# row_g2 5 6 7 8 9
# row_g3 10 11 12 13 14
# row_g4 15 16 17 18 19
mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'}
gbr = df.groupby(mdr)
print(gbr.sum())
# c1 c2 c3 c4 c5
# row_g1 5 7 9 11 13
# row_g2 25 27 29 31 33
print(gbr.mean())
# c1 c2 c3 c4 c5
# row_g1 2.5 3.5 4.5 5.5 6.5
# row_g2 12.5 13.5 14.5 15.5 16.5
print(gbr.std())
# c1 c2 c3 c4 c5
# row_g1 3.535534 3.535534 3.535534 3.535534 3.535534
# row_g2 3.535534 3.535534 3.535534 3.535534 3.535534
# col_g1 = c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis = 1을주어야 한다.
# col_g2 = c3+c4+c5
mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'}
gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다.
print(gbc.sum())
# col_g1 col_g2
# r1 1 9
# r2 11 24
# r3 21 39
# r4 31 54
print(type(mdr))
# <class 'dict'>
print(mdr)
# {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'}
# dic -> Series
# Series를 이용한 그룹화
msr = | Series(mdr) | pandas.Series |
import os
from os.path import join as pjoin
import re
from Bio.Seq import Seq
from Bio import SeqIO, SeqFeature
from Bio.SeqRecord import SeqRecord
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import warnings
import time
import sqlite3 as sql
from collections import defaultdict
import gc
import sys
from gtr_utils import change_ToWorkingDirectory, make_OutputDirectory, merge_ManyFiles, multiprocesssing_Submission, generate_AssemblyId, chunks
from output_RegionResults import subset_SimilarityMatrix
import subprocess
from subprocess import DEVNULL, STDOUT, check_call
def convert_ToGGGenesInput(df_m,filter_on):
'''
'''
df_gggenes = | pd.DataFrame() | pandas.DataFrame |
"""
Loading results, formatting and adding columns
result is the raw result metric computed from predictions at the end the benchmark. For classification problems, it is usually auc for binomial classification and logloss for multinomial classification.
score ensures a standard comparison between tasks: higher is always better.
norm_score is a normalization of score on a [0, 1] scale, with {{zero_one_refs[0]}} score as 0 and {{zero_one_refs[1]}} score as 1.
imp_result and imp_score for imputed results/scores. Given a task and a framework:
if all folds results/scores are missing, then no imputation occurs, and the result is nan for each fold.
if only some folds results/scores are missing, then the missing result is imputed by the {{impute_missing_with}} result for this fold.
"""
import numpy as np
import pandas as pd
import report.config as config
from .metadata import load_dataset_metadata
from .util import Namespace, display
def load_results(files):
return pd.concat([pd.read_csv(file) for file in files], ignore_index=True)
def task_prop(row, metadata, prop):
return getattr(metadata.get(row.task), prop)
def impute_result(row, results_df, res_col='result', imp_framework=None, imp_value=None):
if pd.notna(row[res_col]):
return row[res_col]
# if all folds are failed or missing, don't impute
if pd.isna(results_df.loc[(results_df.task==row.task)
&(results_df.framework==row.framework)][res_col]).all():
return np.nan
if imp_framework is not None:
# impute with ref framework corresponding value
return (results_df.loc[(results_df.framework==imp_framework)
&(results_df.task==row.task)
&(results_df.fold==row.fold)][res_col]
.item())
return imp_value
def imputed(row):
return pd.isna(row.result) and | pd.notna(row.imp_result) | pandas.notna |
import pandas as pd
import sqlite3
data = [
{'nome': 'Qui', 'eta': 23, 'sesso': 'M'},
{'nome': 'Quo', 'eta': 19, 'sesso': 'M'},
{'nome': 'Qua', 'eta': 20, 'sesso': 'M'},
{'nome': 'Paperino', 'eta': 35, 'sesso': 'M'},
{'nome': 'Gastone', 'eta': 28, 'sesso': 'M'},
{'nome': '<NAME>', 'eta': 55, 'sesso': 'M'},
{'nome': 'Paperina', 'eta': 32, 'sesso': 'F'},
{'nome': '<NAME>', 'eta': 60, 'sesso': 'F'},
]
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
def make_submission(time, preds, filename_head=''):
print('time len, preds len : {0}, {1}'.format(len(time), len(preds)))
sub_df = pd.concat([pd.Series(time), pd.Series(preds)], axis=1)
sub_df.columns = ['time', 'open_channels']
sub_df['time'] = sub_df['time'].map(lambda x: '%.4f' % x)
sub_df.to_csv(filename_head + 'submission.csv', index=False)
print(sub_df.head())
return
def make_prediction_result(time, signal, open_channels, preds, filename_head=''):
if open_channels is None:
open_channels = np.ones_like(preds) * (-1)
print('len times, signal, open_channels, preds : {0}, {1}, {2}, {3}'.format(len(time), len(signal), len(open_channels), len(preds)))
sub_df = pd.concat([pd.Series(time), | pd.Series(signal) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import asyncio
import queue
import uuid
from datetime import datetime
import pandas as pd
from storey import build_flow, Source, Map, Filter, FlatMap, Reduce, FlowError, MapWithState, ReadCSV, Complete, AsyncSource, Choice, \
Event, Batch, Table, NoopDriver, WriteToCSV, DataframeSource, MapClass, JoinWithTable, ReduceToDataFrame, ToDataFrame, WriteToParquet, \
WriteToTSDB, Extend
class ATestException(Exception):
pass
class RaiseEx:
_counter = 0
def __init__(self, raise_after):
self._raise_after = raise_after
def raise_ex(self, element):
if self._counter == self._raise_after:
raise ATestException("test")
self._counter += 1
return element
def test_functional_flow():
controller = build_flow([
Source(),
Map(lambda x: x + 1),
Filter(lambda x: x < 3),
FlatMap(lambda x: [x, x * 10]),
Reduce(0, lambda acc, x: acc + x),
]).run()
for _ in range(100):
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 3300
def test_csv_reader():
controller = build_flow([
ReadCSV('tests/test.csv', header=True),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 21
def test_csv_reader_error_on_file_not_found():
controller = build_flow([
ReadCSV('tests/idontexist.csv', header=True),
]).run()
try:
controller.await_termination()
assert False
except FlowError as ex:
assert isinstance(ex.__cause__, FileNotFoundError)
def test_csv_reader_as_dict():
controller = build_flow([
ReadCSV('tests/test.csv', header=True, build_dict=True),
FlatMap(lambda x: [x['n1'], x['n2'], x['n3']]),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 21
def append_and_return(lst, x):
lst.append(x)
return lst
def test_csv_reader_as_dict_with_key_and_timestamp():
controller = build_flow([
ReadCSV('tests/test-with-timestamp.csv', header=True, build_dict=True, key_field='k',
timestamp_field='t', timestamp_format='%d/%m/%Y %H:%M:%S'),
Reduce([], append_and_return, full_event=True),
]).run()
termination_result = controller.await_termination()
assert len(termination_result) == 2
assert termination_result[0].key == 'm1'
assert termination_result[0].time == datetime(2020, 2, 15, 2, 0)
assert termination_result[0].body == {'k': 'm1', 't': datetime(2020, 2, 15, 2, 0), 'v': 8, 'b': True}
assert termination_result[1].key == 'm2'
assert termination_result[1].time == datetime(2020, 2, 16, 2, 0)
assert termination_result[1].body == {'k': 'm2', 't': datetime(2020, 2, 16, 2, 0), 'v': 14, 'b': False}
def test_csv_reader_with_key_and_timestamp():
controller = build_flow([
ReadCSV('tests/test-with-timestamp.csv', header=True, key_field='k',
timestamp_field='t', timestamp_format='%d/%m/%Y %H:%M:%S'),
Reduce([], append_and_return, full_event=True),
]).run()
termination_result = controller.await_termination()
assert len(termination_result) == 2
assert termination_result[0].key == 'm1'
assert termination_result[0].time == datetime(2020, 2, 15, 2, 0)
assert termination_result[0].body == ['m1', datetime(2020, 2, 15, 2, 0), 8, True]
assert termination_result[1].key == 'm2'
assert termination_result[1].time == datetime(2020, 2, 16, 2, 0)
assert termination_result[1].body == ['m2', datetime(2020, 2, 16, 2, 0), 14, False]
def test_csv_reader_as_dict_no_header():
controller = build_flow([
ReadCSV('tests/test-no-header.csv', header=False, build_dict=True),
FlatMap(lambda x: [x[0], x[1], x[2]]),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 21
def test_dataframe_source():
df = pd.DataFrame([['hello', 1, 1.5], ['world', 2, 2.5]], columns=['string', 'int', 'float'])
controller = build_flow([
DataframeSource(df),
Reduce([], append_and_return),
]).run()
termination_result = controller.await_termination()
expected = [{'string': 'hello', 'int': 1, 'float': 1.5}, {'string': 'world', 'int': 2, 'float': 2.5}]
assert termination_result == expected
def test_indexed_dataframe_source():
df = pd.DataFrame([['hello', 1, 1.5], ['world', 2, 2.5]], columns=['string', 'int', 'float'])
df.set_index(['string', 'int'], inplace=True)
controller = build_flow([
DataframeSource(df),
Reduce([], append_and_return),
]).run()
termination_result = controller.await_termination()
expected = [{'string': 'hello', 'int': 1, 'float': 1.5}, {'string': 'world', 'int': 2, 'float': 2.5}]
assert termination_result == expected
def test_dataframe_source_with_metadata():
t1 = datetime(2020, 2, 15)
t2 = datetime(2020, 2, 16)
df = pd.DataFrame([['key1', t1, 'id1', 1.1], ['key2', t2, 'id2', 2.2]],
columns=['my_key', 'my_time', 'my_id', 'my_value'])
controller = build_flow([
DataframeSource(df, key_column='my_key', time_column='my_time', id_column='my_id'),
Reduce([], append_and_return, full_event=True),
]).run()
termination_result = controller.await_termination()
expected = [Event({'my_key': 'key1', 'my_time': t1, 'my_id': 'id1', 'my_value': 1.1}, key='key1', time=t1, id='id1'),
Event({'my_key': 'key2', 'my_time': t2, 'my_id': 'id2', 'my_value': 2.2}, key='key2', time=t2, id='id2')]
assert termination_result == expected
async def async_dataframe_source():
df = pd.DataFrame([['hello', 1, 1.5], ['world', 2, 2.5]], columns=['string', 'int', 'float'])
controller = await build_flow([
DataframeSource(df),
Reduce([], append_and_return),
]).run_async()
termination_result = await controller.await_termination()
expected = [{'string': 'hello', 'int': 1, 'float': 1.5}, {'string': 'world', 'int': 2, 'float': 2.5}]
assert termination_result == expected
def test_async_dataframe_source():
asyncio.run(async_test_async_source())
def test_error_flow():
controller = build_flow([
Source(),
Map(lambda x: x + 1),
Map(RaiseEx(500).raise_ex),
Reduce(0, lambda acc, x: acc + x),
]).run()
try:
for i in range(1000):
controller.emit(i)
except FlowError as flow_ex:
assert isinstance(flow_ex.__cause__, ATestException)
def test_broadcast():
controller = build_flow([
Source(),
Map(lambda x: x + 1),
Filter(lambda x: x < 3, termination_result_fn=lambda x, y: x + y),
[
Reduce(0, lambda acc, x: acc + x)
],
[
Reduce(0, lambda acc, x: acc + x)
]
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 6
def test_broadcast_complex():
controller = build_flow([
Source(),
Map(lambda x: x + 1),
Filter(lambda x: x < 3, termination_result_fn=lambda x, y: x + y),
[
Reduce(0, lambda acc, x: acc + x),
],
[
Map(lambda x: x * 100),
Reduce(0, lambda acc, x: acc + x)
],
[
Map(lambda x: x * 1000),
Reduce(0, lambda acc, x: acc + x)
]
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 3303
# Same as test_broadcast_complex but without using build_flow
def test_broadcast_complex_no_sugar():
source = Source()
filter = Filter(lambda x: x < 3, termination_result_fn=lambda x, y: x + y)
source.to(Map(lambda x: x + 1)).to(filter)
filter.to(Reduce(0, lambda acc, x: acc + x), )
filter.to(Map(lambda x: x * 100)).to(Reduce(0, lambda acc, x: acc + x))
filter.to(Map(lambda x: x * 1000)).to(Reduce(0, lambda acc, x: acc + x))
controller = source.run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 3303
def test_map_with_state_flow():
controller = build_flow([
Source(),
MapWithState(1000, lambda x, state: (state, x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 1036
def test_map_with_cache_state_flow():
table_object = Table("table", NoopDriver())
table_object._cache['tal'] = {'color': 'blue'}
table_object._cache['dina'] = {'color': 'red'}
def enrich(event, state):
event['color'] = state['color']
state['counter'] = state.get('counter', 0) + 1
return event, state
controller = build_flow([
Source(),
MapWithState(table_object, lambda x, state: enrich(x, state), group_by_key=True),
Reduce([], append_and_return),
]).run()
for i in range(10):
key = 'tal'
if i % 3 == 0:
key = 'dina'
controller.emit(Event(body={'col1': i}, key=key))
controller.terminate()
termination_result = controller.await_termination()
expected = [{'col1': 0, 'color': 'red'},
{'col1': 1, 'color': 'blue'},
{'col1': 2, 'color': 'blue'},
{'col1': 3, 'color': 'red'},
{'col1': 4, 'color': 'blue'},
{'col1': 5, 'color': 'blue'},
{'col1': 6, 'color': 'red'},
{'col1': 7, 'color': 'blue'},
{'col1': 8, 'color': 'blue'},
{'col1': 9, 'color': 'red'}]
expected_cache = {'tal': {'color': 'blue', 'counter': 6}, 'dina': {'color': 'red', 'counter': 4}}
assert termination_result == expected
assert table_object._cache == expected_cache
def test_map_with_empty_cache_state_flow():
table_object = Table("table", NoopDriver())
def enrich(event, state):
if 'first_value' not in state:
state['first_value'] = event['col1']
event['diff_from_first'] = event['col1'] - state['first_value']
state['counter'] = state.get('counter', 0) + 1
return event, state
controller = build_flow([
Source(),
MapWithState(table_object, lambda x, state: enrich(x, state), group_by_key=True),
Reduce([], append_and_return),
]).run()
for i in range(10):
key = 'tal'
if i % 3 == 0:
key = 'dina'
controller.emit(Event(body={'col1': i}, key=key))
controller.terminate()
termination_result = controller.await_termination()
expected = [{'col1': 0, 'diff_from_first': 0},
{'col1': 1, 'diff_from_first': 0},
{'col1': 2, 'diff_from_first': 1},
{'col1': 3, 'diff_from_first': 3},
{'col1': 4, 'diff_from_first': 3},
{'col1': 5, 'diff_from_first': 4},
{'col1': 6, 'diff_from_first': 6},
{'col1': 7, 'diff_from_first': 6},
{'col1': 8, 'diff_from_first': 7},
{'col1': 9, 'diff_from_first': 9}]
expected_cache = {'dina': {'first_value': 0, 'counter': 4}, 'tal': {'first_value': 1, 'counter': 6}}
assert termination_result == expected
assert table_object._cache == expected_cache
def test_awaitable_result():
controller = build_flow([
Source(),
Map(lambda x: x + 1, termination_result_fn=lambda _, x: x),
[
Complete()
],
[
Reduce(0, lambda acc, x: acc + x)
]
]).run()
for i in range(10):
awaitable_result = controller.emit(i, return_awaitable_result=True)
assert awaitable_result.await_result() == i + 1
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 55
async def async_test_async_source():
controller = await build_flow([
AsyncSource(),
Map(lambda x: x + 1, termination_result_fn=lambda _, x: x),
[
Complete()
],
[
Reduce(0, lambda acc, x: acc + x)
]
]).run()
for i in range(10):
result = await controller.emit(i, await_result=True)
assert result == i + 1
await controller.terminate()
termination_result = await controller.await_termination()
assert termination_result == 55
def test_async_source():
loop = asyncio.new_event_loop()
loop.run_until_complete(async_test_async_source())
async def async_test_error_async_flow():
controller = await build_flow([
AsyncSource(),
Map(lambda x: x + 1),
Map(RaiseEx(500).raise_ex),
Reduce(0, lambda acc, x: acc + x),
]).run()
try:
for i in range(1000):
await controller.emit(i)
except FlowError as flow_ex:
assert isinstance(flow_ex.__cause__, ATestException)
def test_error_async_flow():
loop = asyncio.new_event_loop()
loop.run_until_complete(async_test_error_async_flow())
def test_choice():
small_reduce = Reduce(0, lambda acc, x: acc + x)
big_reduce = build_flow([
Map(lambda x: x * 100),
Reduce(0, lambda acc, x: acc + x)
])
controller = build_flow([
Source(),
Choice([(big_reduce, lambda x: x % 2 == 0)],
default=small_reduce,
termination_result_fn=lambda x, y: x + y)
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 2025
def test_metadata():
def mapf(x):
x.key = x.key + 1
return x
def redf(acc, x):
if x.key not in acc:
acc[x.key] = []
acc[x.key].append(x.body)
return acc
controller = build_flow([
Source(),
Map(mapf, full_event=True),
Reduce({}, redf, full_event=True)
]).run()
for i in range(10):
controller.emit(Event(i, key=i % 3))
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == {1: [0, 3, 6, 9], 2: [1, 4, 7], 3: [2, 5, 8]}
def test_metadata_immutability():
def mapf(x):
x.key = 'new key'
return x
controller = build_flow([
Source(),
Map(lambda x: 'new body'),
Map(mapf, full_event=True),
Complete(full_event=True)
]).run()
event = Event('original body', key='original key')
result = controller.emit(event, return_awaitable_result=True).await_result()
controller.terminate()
controller.await_termination()
assert event.key == 'original key'
assert event.body == 'original body'
assert result.key == 'new key'
assert result.body == 'new body'
def test_batch():
controller = build_flow([
Source(),
Batch(4, 100),
Reduce([], lambda acc, x: append_and_return(acc, x)),
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
def test_batch_full_event():
def append_body_and_return(lst, x):
ll = []
for item in x:
ll.append(item.body)
lst.append(ll)
return lst
controller = build_flow([
Source(),
Batch(4, 100, full_event=True),
Reduce([], lambda acc, x: append_body_and_return(acc, x)),
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
def test_batch_with_timeout():
q = queue.Queue(1)
def reduce_fn(acc, x):
if x[0] == 0:
q.put(None)
acc.append(x)
return acc
controller = build_flow([
Source(),
Batch(4, 1),
Reduce([], reduce_fn),
]).run()
for i in range(10):
if i == 3:
q.get()
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == [[0, 1, 2], [3, 4, 5, 6], [7, 8, 9]]
async def async_test_write_csv(tmpdir):
file_path = f'{tmpdir}/test_write_csv/out.csv'
controller = await build_flow([
AsyncSource(),
WriteToCSV(file_path, columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
await controller.emit([i, 10 * i])
await controller.terminate()
await controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv(tmpdir):
asyncio.run(async_test_write_csv(tmpdir))
async def async_test_write_csv_error(tmpdir):
file_path = f'{tmpdir}/test_write_csv_error.csv'
write_csv = WriteToCSV(file_path)
controller = await build_flow([
AsyncSource(),
write_csv
]).run()
try:
for i in range(10):
await controller.emit(i)
await controller.terminate()
await controller.await_termination()
assert False
except FlowError as ex:
assert isinstance(ex.__cause__, TypeError)
def test_write_csv_error(tmpdir):
asyncio.run(async_test_write_csv_error(tmpdir))
def test_write_csv_with_dict(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_dict.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv_infer_columns(tmpdir):
file_path = f'{tmpdir}/test_write_csv_infer_columns.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv_infer_columns_without_header(tmpdir):
file_path = f'{tmpdir}/test_write_csv_infer_columns_without_header.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv_with_metadata(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_metadata.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['event_key=$key', 'n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = \
"event_key,n,n*10\nkey0,0,0\nkey1,1,10\nkey2,2,20\nkey3,3,30\nkey4,4,40\nkey5,5,50\nkey6,6,60\nkey7,7,70\nkey8,8,80\nkey9,9,90\n"
assert result == expected
def test_write_csv_with_metadata_no_rename(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_metadata_no_rename.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['$key', 'n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = \
"key,n,n*10\nkey0,0,0\nkey1,1,10\nkey2,2,20\nkey3,3,30\nkey4,4,40\nkey5,5,50\nkey6,6,60\nkey7,7,70\nkey8,8,80\nkey9,9,90\n"
assert result == expected
def test_write_csv_with_rename(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_rename.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['n', 'n x 10=n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "n,n x 10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv_from_lists_with_metadata(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_metadata.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['event_key=$key', 'n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit([i, 10 * i], key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = \
"event_key,n,n*10\nkey0,0,0\nkey1,1,10\nkey2,2,20\nkey3,3,30\nkey4,4,40\nkey5,5,50\nkey6,6,60\nkey7,7,70\nkey8,8,80\nkey9,9,90\n"
assert result == expected
def test_write_csv_from_lists_with_metadata_and_column_pruning(tmpdir):
file_path = f'{tmpdir}/test_write_csv_from_lists_with_metadata_and_column_pruning.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['event_key=$key', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "event_key,n*10\nkey0,0\nkey1,10\nkey2,20\nkey3,30\nkey4,40\nkey5,50\nkey6,60\nkey7,70\nkey8,80\nkey9,90\n"
assert result == expected
def test_write_csv_infer_with_metadata_columns(tmpdir):
file_path = f'{tmpdir}/test_write_csv_infer_with_metadata_columns.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['event_key=$key'], header=True, infer_columns_from_data=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = \
"event_key,n,n*10\nkey0,0,0\nkey1,1,10\nkey2,2,20\nkey3,3,30\nkey4,4,40\nkey5,5,50\nkey6,6,60\nkey7,7,70\nkey8,8,80\nkey9,9,90\n"
assert result == expected
def test_write_csv_fail_to_infer_columns(tmpdir):
file_path = f'{tmpdir}/test_write_csv_fail_to_infer_columns.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, header=True)
]).run()
try:
controller.emit([0])
controller.terminate()
controller.await_termination()
assert False
except FlowError as flow_ex:
assert isinstance(flow_ex.__cause__, TypeError)
def test_reduce_to_dataframe():
controller = build_flow([
Source(),
ReduceToDataFrame()
]).run()
expected = []
for i in range(10):
controller.emit({'my_int': i, 'my_string': f'this is {i}'})
expected.append({'my_int': i, 'my_string': f'this is {i}'})
expected = pd.DataFrame(expected)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result.equals(expected), f"{termination_result}\n!=\n{expected}"
def test_reduce_to_dataframe_with_index():
index = 'my_int'
controller = build_flow([
Source(),
ReduceToDataFrame(index=index)
]).run()
expected = []
for i in range(10):
controller.emit({'my_int': i, 'my_string': f'this is {i}'})
expected.append({'my_int': i, 'my_string': f'this is {i}'})
expected = pd.DataFrame(expected)
expected.set_index(index, inplace=True)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result.equals(expected), f"{termination_result}\n!=\n{expected}"
def test_reduce_to_dataframe_with_index_from_lists():
index = 'my_int'
controller = build_flow([
Source(),
ReduceToDataFrame(index=index, columns=['my_int', 'my_string'])
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append({'my_int': i, 'my_string': f'this is {i}'})
expected = pd.DataFrame(expected)
expected.set_index(index, inplace=True)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result.equals(expected), f"{termination_result}\n!=\n{expected}"
def test_reduce_to_dataframe_indexed_by_key():
index = 'my_key'
controller = build_flow([
Source(),
ReduceToDataFrame(index=index, insert_key_column_as=index)
]).run()
expected = []
for i in range(10):
controller.emit({'my_int': i, 'my_string': f'this is {i}'}, key=f'key{i}')
expected.append({'my_int': i, 'my_string': f'this is {i}', 'my_key': f'key{i}'})
expected = pd.DataFrame(expected)
expected.set_index(index, inplace=True)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result.equals(expected), f"{termination_result}\n!=\n{expected}"
def test_to_dataframe_with_index():
index = 'my_int'
controller = build_flow([
Source(),
Batch(5),
ToDataFrame(index=index),
Reduce([], append_and_return, full_event=True)
]).run()
expected1 = []
for i in range(5):
data = {'my_int': i, 'my_string': f'this is {i}'}
controller.emit(data)
expected1.append(data)
expected2 = []
for i in range(5, 10):
data = {'my_int': i, 'my_string': f'this is {i}'}
controller.emit(data)
expected2.append(data)
expected1 = pd.DataFrame(expected1)
expected2 = pd.DataFrame(expected2)
expected1.set_index(index, inplace=True)
expected2.set_index(index, inplace=True)
controller.terminate()
termination_result = controller.await_termination()
assert len(termination_result) == 2
assert termination_result[0].body.equals(expected1), f"{termination_result[0]}\n!=\n{expected1}"
assert termination_result[1].body.equals(expected2), f"{termination_result[1]}\n!=\n{expected2}"
def test_map_class():
class MyMap(MapClass):
def __init__(self, mul=1, **kwargs):
super().__init__(**kwargs)
self._mul = mul
def do(self, event):
if event['bid'] > 700:
return self.filter()
event['xx'] = event['bid'] * self._mul
return event
controller = build_flow([
Source(),
MyMap(2),
Reduce(0, lambda acc, x: acc + x['xx']),
]).run()
controller.emit({'bid': 600})
controller.emit({'bid': 700})
controller.emit({'bid': 1000})
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 2600
def test_extend():
controller = build_flow([
Source(),
Extend(lambda x: {'bid2': x['bid'] + 1}),
Reduce([], append_and_return),
]).run()
controller.emit({'bid': 1})
controller.emit({'bid': 11})
controller.emit({'bid': 111})
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == [{'bid': 1, 'bid2': 2}, {'bid': 11, 'bid2': 12}, {'bid': 111, 'bid2': 112}]
def test_write_to_parquet(tmpdir):
out_dir = f'{tmpdir}/test_write_to_parquet/{uuid.uuid4().hex}/'
columns = ['my_int', 'my_string']
controller = build_flow([
Source(),
WriteToParquet(out_dir, partition_cols='my_int', columns=columns, max_events=1)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected = | pd.DataFrame(expected, columns=columns, dtype='int32') | pandas.DataFrame |
import os
import pandas as pd
from functools import partial
"""
Feb 28, 2022
Assumption. Each T Cell has one physiologically
relevant A:B TCR receptor.
We consider the frequency of each unique chain (nucleotide-level), and
the frequency of alpha-beta pairings within a pool of single cells.
We attempt to use that information to distinguish 'true' pairings from
possible cell-free DNA artifacts.
In those cases, if a cell has multiple alphas/betas pairings,
and no obvious contaminating receptor is found, the procedure
then is to select, per single cell barcode, the A:B chains
with the highest umi counts.
V1. This script was written without option for multiple batches
TODO: add a multibatch option ['batch_id', 'pool_id']
"""
def _generate_clone_stats_from_contig_annotation_file(
f,
p = None,
sample_index_cols = ['arm', 'ptid', 'visit', 'stim', 'sortid'],
cell_index_cols = ['barcode', 'raw_clonotype_id'],
tcra_cols = ['v_a_gene', 'j_a_gene', 'cdr3_a_aa'],
tcrb_cols = ['v_b_gene', 'j_b_gene', 'cdr3_b_aa'],
pairs_cols = ['cdr3_a_nt', 'cdr3_b_nt'],
chains_cols = ['cdr3_nt', 'v_gene'],
tcr_c_cols = ['v_a_gene', 'j_a_gene', 'cdr3_a_aa', 'v_b_gene', 'j_b_gene', 'cdr3_b_aa', 'cdr3_a_nt', 'cdr3_b_nt']):
"""
Parameters
----------
p : str
path to file (optional)
f : str
filename for contig annoations file
cell_index_cols : list
list of columns that index each single cell (i.e. barcode)
tcra_cols : list
list of columns that index tcr alpha chain
tcrb_cols : list
lost of columns that index tcr beta chain
pairs_cols : list
list of columns used in a groupby to how often a particular alpha/beta pair occurs
chains_cols : list
list of columns used in groupby to count how often a particular chain appears
Returns
-------
tr : pd.DataFrame
Dataframe with TRA and TRB joined together on <cell_index_cols>.
ct_chains : pd.DataFrame
Dataframe with number of time each single chain occured
ct_pairs : pd.DataFrame
Dataframe with the number of time each A/B pairing occured
Notes:
In effect, we are joining all possible A and B receptors. Subsequently
we append important columns
'cts_a' : how often we see a particular alpha/pairing
'cts_b'
"""
if p is not None:
filename = os.path.join(p, f)
else:
filename = f
tcr_raw = pd.read_csv(filename, sep = ",")
tra = tcr_raw.loc[tcr_raw['chain'] == 'TRA']
trb = tcr_raw.loc[tcr_raw['chain'] == 'TRB']
tr = pd.merge(tra, trb, on=cell_index_cols, how='outer', suffixes=('_a', '_b'))
ren = {'v_gene_b':'v_b_gene',
'v_gene_a':'v_a_gene',
'j_gene_b':'j_b_gene',
'j_gene_a':'j_a_gene',
'cdr3_b':'cdr3_b_aa',
'cdr3_a':'cdr3_a_aa',
'cdr3_nt_b':'cdr3_b_nt',
'cdr3_nt_a':'cdr3_a_nt'}
tr = tr.rename(ren, axis=1)
multis = tr.groupby(cell_index_cols)[['cdr3_a_nt', 'cdr3_b_nt']].nunique()
multis.columns = ['cts_a', 'cts_b']
tr = pd.merge(tr, multis, left_on=multis.index.names, right_index=True)
ct_pairs = tr.groupby(pairs_cols)['barcode'].nunique().sort_values(ascending=False)
ct_chains = tcr_raw.groupby(chains_cols)['barcode'].nunique().sort_values(ascending=False)
ct_chains.name = 'ct_chains'
ct_pairs.name = 'ct_pairs'
return (tr, ct_chains, ct_pairs)
def _clean_multi_inframe(
gby,
chain,
ct_chains,
ct_pairs,
threshold_chains = 10,
threshold_pairs = 5):
"""
gby : slice of DataFrame
defined by a groupby operation, such as , e.g., ['barcode', 'raw_clonotype_id'],
chain : str
'a' or 'b
ct_chains : pd.DataFrame (names=['cdr3_nt', 'v_gene')
This DataFrame provide info how oftem each individual receptor occurs
ct_pairs : pd.DataFrame ( names=['cdr3_a_nt', 'cdr3_b_nt']),
This DataFrame provide info how often each receptor occurs
threshold_chains : int
When more than one receptor is present, if the specific receptor
is seen fewere more than <threshold_chains> times in the ct_chains lookup OR
if its seen at least n - <threshold_pairs> times as a pair then its a real pair.
threshold_pairs : int
When more than one receptor is present, if the specific receptor
is seen more than max_n times in the ct_chains lookup OR
if its seen at least n - <threshold_pairs> times as a pair then its a real pair.
"""
assert chain in ['a','b']
# <gby> is a DataFrame slice with all possible TRB:TRA combinations
# <tmp> is left merge to ct_chains, adding the number of times the primary chain, receptor appears in the full batch
tmp = pd.merge(gby, ct_chains, left_on=[f'cdr3_{chain}_nt', f'v_{chain}_gene'], right_index=True, how='left')
# <tmp> a sceond left join to pairs adds counts of the specific pair
tmp = pd.merge(tmp, ct_pairs, left_on=['cdr3_a_nt', 'cdr3_b_nt'], right_index=True, how='left')
if ((tmp['ct_chains'] < threshold_chains ) & ((tmp['ct_chains'] < threshold_chains ) | tmp['ct_chains'].isnull())).all():
"""If all chains are seen <10 times as a chain then just pick the one with more UMIs, reads"""
return tmp.sort_values(by=[f'umis_{chain}', f'reads_{chain}'], ascending=False).iloc[0]
else:
"""If one of them is seen more than n > 10 times as a chain, if its seen at least n - 5 times
as a pair then its a real pair."""
ind = (tmp['ct_chains'] < threshold_chains ) | (tmp['ct_pairs'] >= (tmp['ct_chains'] - threshold_pairs))
if ind.sum() >= 1:
return tmp.loc[ind].sort_values(by=[f'umis_{chain}', f'reads_{chain}'], ascending=False).iloc[0]
else:
print(tmp[['cdr3_b_aa', 'cdr3_a_aa', 'reads_a', 'umis_a', 'ct_chains', 'ct_pairs']])
return None
def _clean_multi_ab_inframe(gby,
ct_chains,
ct_pairs,
threshold_chains = 10,
threshold_pairs = 5):
"""
gby : slice of DataFrame
defined by a groupby operation, such as , e.g., ['barcode', 'raw_clonotype_id'],
ct_chains : pd.DataFrame (names=['cdr3_nt', 'v_gene')
This DataFrame provide info how oftem each individual receptor occurs
ct_pairs : pd.DataFrame ( names=['cdr3_a_nt', 'cdr3_b_nt']),
This DataFrame provide info how often each receptor occurs
"""
tmp = pd.merge(gby, ct_chains, left_on=['cdr3_a_nt', 'v_a_gene'], right_index=True, how='left')
tmp = tmp.rename({'ct_chains':'ct_chains_a'}, axis=1)
tmp = pd.merge(tmp, ct_chains, left_on=['cdr3_b_nt', 'v_b_gene'], right_index=True, how='left')
tmp = tmp.rename({'ct_chains':'ct_chains_b'}, axis=1)
tmp = pd.merge(tmp, ct_pairs, left_on=['cdr3_a_nt', 'cdr3_b_nt'], right_index=True, how='left')
"""If either chain is very common and the pair is uncommon then kick it out"""
ind = ((tmp['ct_chains_a'] >= threshold_chains) | (tmp['ct_chains_b'] >= threshold_chains)) & ((tmp['ct_pairs'] < (tmp['ct_chains_a'] - threshold_pairs)) | (tmp['ct_pairs'] < (tmp['ct_chains_b'] - 5)))
tmp = tmp.loc[~ind]
if (~ind).sum() >= 1:
return tmp.loc[~ind].sort_values(by=['umis_a', 'umis_b', 'reads_a', 'reads_b'], ascending=False).iloc[0]
else:
return None
def _all_multi_ab_inframe(gby,
ct_chains,
ct_pairs):
"""
gby : slice of DataFrame
defined by a groupby operation, such as , e.g., ['barcode', 'raw_clonotype_id'],
ct_chains : pd.DataFrame (names=['cdr3_nt', 'v_gene')
This DataFrame provide info how oftem each individual receptor occurs
ct_pairs : pd.DataFrame ( names=['cdr3_a_nt', 'cdr3_b_nt']),
This DataFrame provide info how often each receptor occurs
"""
tmp = pd.merge(gby, ct_chains, left_on=['cdr3_a_nt', 'v_a_gene'], right_index=True, how='left')
tmp = tmp.rename({'ct_chains':'ct_chains_a'}, axis=1)
tmp = pd.merge(tmp, ct_chains, left_on=['cdr3_b_nt', 'v_b_gene'], right_index=True, how='left')
tmp = tmp.rename({'ct_chains':'ct_chains_b'}, axis=1)
tmp = pd.merge(tmp, ct_pairs, left_on=['cdr3_a_nt', 'cdr3_b_nt'], right_index=True, how='left')
return tmp
def _all_multi_inframe(
gby,
chain,
ct_chains,
ct_pairs):
"""
gby : slice of DataFrame
defined by a groupby operation, such as , e.g., ['barcode', 'raw_clonotype_id'],
chain : str
'a' or 'b
ct_chains : pd.DataFrame (names=['cdr3_nt', 'v_gene')
This DataFrame provide info how oftem each individual receptor occurs
ct_pairs : pd.DataFrame ( names=['cdr3_a_nt', 'cdr3_b_nt']),
This DataFrame provide info how often each receptor occurs
"""
# <gby> is a DataFrame slice with all possible TRB:TRA combinations
# <tmp> is left merge to ct_chains, adding the number of times the primary chain, receptor appears in the full batch
tmp = | pd.merge(gby, ct_chains, left_on=[f'cdr3_{chain}_nt', f'v_{chain}_gene'], right_index=True, how='left') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 21:02:46 2019
@author: shayan
"""
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from collections import OrderedDict
from sklearn.metrics import mean_squared_error
import copy
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--filename", type=str, default="Trace")
parser.add_argument("--seed1", type=int, default=4)
parser.add_argument("--seed2", type=int, default=3)
args = parser.parse_args()
filename=args.filename
seed1=args.seed1
seed2=args.seed2
sys.stdout=open("clf_bb_"+str(seed1)+"_"+str(seed2)+"_"+filename+".log","w")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class cNet(torch.nn.Module):
def __init__(self):
super(cNet, self).__init__()
self.conv1 = nn.Conv1d(x_train.shape[1], 128, 9, padding=(9 // 2))
self.bnorm1 = nn.BatchNorm1d(128)
self.conv2 = nn.Conv1d(128, 256, 5, padding=(5 // 2))
self.bnorm2 = nn.BatchNorm1d(256)
self.conv3 = nn.Conv1d(256, 128, 3, padding=(3 // 2))
self.bnorm3 = nn.BatchNorm1d(128)
self.classification_head = nn.Linear(128, nb_classes)
def forward(self, x_class):
# print("x_class.size--->",x_class.size())
b1 = F.relu(self.bnorm1(self.conv1(x_class)))
# print("b1.size--->",b1.size())
b2 = F.relu(self.bnorm2(self.conv2(b1)))
# print("b2.size--->",b2.size())
b3 = F.relu(self.bnorm3(self.conv3(b2)))
# print("b3.size--->",b3.size())
classification_features = torch.mean(b3, 2)#(64,128)#that is now we have global avg pooling, 1 feature from each conv channel
# print("classification_features.size--->",classification_features.size())
classification_out=self.classification_head(classification_features)
# print("classification_out.size()--->",classification_out.size())
return classification_out
def optimize_network(x_batch_class, y_batch_class):
y_hat_classification = c_model(x_batch_class.float())
loss_classification = criterion_classification(y_hat_classification, y_batch_class.long())
optimizer.zero_grad()
loss_classification.backward()
optimizer.step()
return loss_classification.item()
train=pd.read_csv("/home/shayan/ts/UCRArchive_2018/"+filename+"/"+filename+"_TRAIN.tsv",sep="\t",header=None)
test=pd.read_csv("/home/shayan/ts/UCRArchive_2018/"+filename+"/"+filename+"_TEST.tsv",sep="\t",header=None)
df = pd.concat((train,test))
y_s=df.values[:,0]
nb_classes = len(np.unique(y_s))
y_s = (y_s - y_s.min())/(y_s.max()-y_s.min())*(nb_classes-1)
df[df.columns[0]]=y_s
train, test = train_test_split(df, test_size=0.2, random_state=seed1)
train_labeled, train_unlabeled = train_test_split(train, test_size=1-0.1, random_state=seed2)
train_unlabeled[train_unlabeled.columns[0]]=-1#Explicitly set all the instance's labels to -1
train_1= | pd.concat((train_labeled,train_unlabeled)) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas
import numpy
import sys
import unittest
from datetime import datetime
from pandas.testing import assert_frame_equal, assert_series_equal
import os
import copy
sys.path.append("..")
import warnings
import nPYc
from nPYc.enumerations import SampleType
from nPYc.enumerations import AssayRole
from nPYc.enumerations import VariableType
from generateTestDataset import generateTestDataset
import tempfile
from isatools import isatab
class test_msdataset_synthetic(unittest.TestCase):
"""
Test MSDataset object functions with synthetic data
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata = pandas.DataFrame(
{'Sample File Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'Sample Base Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'AssayRole': [AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference],
'SampleType': [SampleType.StudySample, SampleType.StudyPool, SampleType.ExternalReference],
'Sample Name': ['Sample1', 'Sample2', 'Sample3'], 'Acqu Date': ['26-May-17', '26-May-17', '26-May-17'],
'Acqu Time': ['16:42:57', '16:58:49', '17:14:41'], 'Vial': ['1:A,1', '1:A,2', '1:A,3'],
'Instrument': ['XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest'],
'Acquired Time': [datetime(2017, 5, 26, 16, 42, 57), datetime(2017, 5, 26, 16, 58, 49),
datetime(2017, 5, 26, 17, 14, 41)], 'Run Order': [0, 1, 2], 'Batch': [1, 1, 2],
'Correction Batch': [numpy.nan, 1, 2], 'Matrix': ['U', 'U', 'U'],
'Subject ID': ['subject1', 'subject1', 'subject2'], 'Sample ID': ['sample1', 'sample2', 'sample3'],
'Dilution': [numpy.nan, '60.0', '100.0'],'Exclusion Details': ['','','']})
self.msData.featureMetadata = pandas.DataFrame(
{'Feature Name': ['Feature1', 'Feature2', 'Feature3'], 'Retention Time': [6.2449, 2.7565, 5.0564],
'm/z': [249.124281, 381.433191, 471.132083]})
self.msData.featureMetadata['Exclusion Details'] = None
self.msData.featureMetadata['User Excluded'] = False
self.msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=self.msData.featureMetadata.index)
self.msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=self.msData.featureMetadata.index)
self.msData._intensityData = numpy.array([[10.2, 20.95, 30.37], [10.1, 20.03, 30.74], [3.065, 15.83, 30.16]])
# Attributes
self.msData.Attributes['FeatureExtractionSoftware'] = 'UnitTestSoftware'
# excluded data
self.msData.sampleMetadataExcluded = []
self.msData.intensityDataExcluded = []
self.msData.featureMetadataExcluded = []
self.msData.excludedFlag = []
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[0, :])
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata)
self.msData.excludedFlag.append('Samples')
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[:, 0])
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata)
self.msData.excludedFlag.append('Features')
# finish
self.msData.VariableType = VariableType.Discrete
self.msData.initialiseMasks()
def test_rsd_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='No reference samples'):
msData.sampleMetadata = pandas.DataFrame(None)
with self.assertRaises(ValueError):
msData.rsdSP
with self.subTest(msg='Only one reference sample'):
msData.sampleMetadata = pandas.DataFrame([[nPYc.enumerations.AssayRole.PrecisionReference, nPYc.enumerations.SampleType.StudyPool]], columns=['AssayRole', 'SampleType'])
with self.assertRaises(ValueError):
msData.rsdSP
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
msData = nPYc.MSDataset('', fileType='empty')
msData.sampleMetadata['Sample File Name'] = ['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02_x',
'Test2_RPOS_ToF02_U2W03_b',
'Test3_RNEG_ToF03_S3W04_2',
'Test4_RPOS_ToF04_B1S1_SR_q',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01_9',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21']
msData._getSampleMetadataFromFilename(msData.Attributes['filenameSpec'])
##
# Check basename
##
basename = pandas.Series(['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_RPOS_ToF04_B1S1_SR',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21'],
name='Sample Base Name',
dtype='str')
assert_series_equal(msData.sampleMetadata['Sample Base Name'], basename)
##
# Check Study
##
study = pandas.Series(['Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test1',
'Test1'],
name='Study',
dtype='str')
assert_series_equal(msData.sampleMetadata['Study'], study)
##
#
##
chromatography = pandas.Series(['H',
'R',
'R',
'L',
'L',
'H',
'H',
'R',
'R',
'R',
'L',
'L',
'H',
'H',
'H'],
name='Chromatography',
dtype='str')
assert_series_equal(msData.sampleMetadata['Chromatography'], chromatography)
##
#
##
ionisation = pandas.Series(['POS',
'POS',
'NEG',
'POS',
'NEG',
'POS',
'POS',
'POS',
'NEG',
'POS',
'POS',
'NEG',
'POS',
'POS',
'POS'],
name='Ionisation',
dtype='str')
assert_series_equal(msData.sampleMetadata['Ionisation'], ionisation)
##
#
##
instrument = pandas.Series(['ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF06',
'ToF06',
'ToF06'],
name='Instrument',
dtype='str')
assert_series_equal(msData.sampleMetadata['Instrument'], instrument)
##
#
##
reRun = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'b',
'',
'q',
'',
'',
'',
'',
''],
name='Re-Run',
dtype='str')
assert_series_equal(msData.sampleMetadata['Re-Run'], reRun)
##
#
##
suplemental = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'',
'2',
'',
'',
'9',
'',
'',
''],
name='Suplemental Injections',
dtype='str')
assert_series_equal(msData.sampleMetadata['Suplemental Injections'], suplemental)
##
#
##
skipped = pandas.Series([False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False],
name='Skipped',
dtype='bool')
assert_series_equal(msData.sampleMetadata['Skipped'], skipped)
##
#
##
matrix = pandas.Series(['P',
'U',
'S',
'P',
'U',
'S',
'P',
'U',
'S',
'',
'',
'',
'',
'',
''],
name='Matrix',
dtype='str')
assert_series_equal(msData.sampleMetadata['Matrix'], matrix)
##
#
##
well = pandas.Series([2,
3,
4,
5,
6,
5,
2,
3,
4,
1,
2,
1,
-1,
-1,
-1],
name='Well',
dtype='int')
assert_series_equal(msData.sampleMetadata['Well'], well, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Well'].dtype.kind, well.dtype.kind)
##
#
##
plate = pandas.Series([1,
2,
3,
4,
5,
4,
1,
2,
3,
1,
2,
3,
1,
2,
21],
name='Plate',
dtype='int')
assert_series_equal(msData.sampleMetadata['Plate'], plate, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Plate'].dtype.kind, well.dtype.kind)
##
#
##
batch = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
2.0,
3.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Batch',
dtype='float')
assert_series_equal(msData.sampleMetadata['Batch'], batch)
##
#
##
dilution = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(msData.sampleMetadata['Dilution'], dilution)
##
#
##
assayRole = pandas.Series([AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.Assay,
AssayRole.Assay],
name='AssayRole',
dtype=object)
assert_series_equal(msData.sampleMetadata['AssayRole'], assayRole)
##
#
##
sampleType = pandas.Series([SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.StudyPool,
SampleType.MethodReference,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.ProceduralBlank,
SampleType.StudyPool,
SampleType.StudyPool],
name='SampleType',
dtype=object)
assert_series_equal(msData.sampleMetadata['SampleType'], sampleType)
def test_updateMasks_features(self):
msData = nPYc.MSDataset('', fileType='empty')
msData.Attributes['artifactualFilter'] = True
##
# Variables:
# Good Corr, Good RSD
# Poor Corr, Good RSD
# Good Corr, Poor RSD
# Poor Corr, Poor RSD
# Good Corr, Good RSD, below blank
##
msData.intensityData = numpy.array([[100, 23, 99, 51, 100],
[90, 54, 91, 88, 91],
[50, 34, 48, 77, 49],
[10, 66, 11, 56, 11],
[1, 12, 2, 81, 2],
[50, 51, 2, 12, 49],
[51, 47, 1, 100, 50],
[47, 50, 70, 21, 48],
[51, 49, 77, 91, 50],
[48, 49, 12, 2, 49],
[50, 48, 81, 2, 51],
[54, 53, 121, 52, 53],
[57, 49, 15, 51, 56],
[140, 41, 97, 47, 137],
[52, 60, 42, 60, 48],
[12, 48, 8, 56, 12],
[1, 2, 1, 1.21, 51],
[2, 1, 1.3, 1.3, 63]],
dtype=float)
msData.sampleMetadata = pandas.DataFrame(data=[[100, 1, 1, 1, AssayRole.LinearityReference, SampleType.StudyPool],
[90, 1, 1, 2, AssayRole.LinearityReference, SampleType.StudyPool],
[50, 1, 1, 3, AssayRole.LinearityReference, SampleType.StudyPool],
[10, 1, 1, 4, AssayRole.LinearityReference, SampleType.StudyPool],
[1, 1, 1, 5, AssayRole.LinearityReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank]],
columns=['Dilution', 'Batch', 'Correction Batch', 'Well', 'AssayRole', 'SampleType'])
msData.featureMetadata = pandas.DataFrame(data=[['Feature_1', 0.5, 100., 0.3],
['Feature_2', 0.55, 100.04, 0.3],
['Feature_3', 0.75, 200., 0.1],
['Feature_4', 0.9, 300., 0.1],
['Feature_5', 0.95, 300.08, 0.1]],
columns=['Feature Name','Retention Time','m/z','Peak Width'])
msData.featureMetadata['Exclusion Details'] = None
msData.featureMetadata['User Excluded'] = False
msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=msData.featureMetadata.index)
msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=msData.featureMetadata.index)
msData.initialiseMasks()
with self.subTest(msg='Default Parameters'):
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax RSD threshold'):
expectedFeatureMask = numpy.array([True, False, True, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=90, varianceRatio=0.1, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax correlation threshold'):
expectedFeatureMask = numpy.array([True, True, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter': True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=1.1, corrThreshold=0))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='High variance ratio'):
expectedFeatureMask = numpy.array([False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=100, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(blankThreshold=0.5))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='No blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':False})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Default withArtifactualFiltering'):
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1],[3,4]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData._tempArtifactualLinkageMatrix)
with self.subTest(msg='Altered withArtifactualFiltering parameters'):
expectedArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True}, **dict(deltaMzArtifactual=300,
overlapThresholdArtifactual=0.1,
corrThresholdArtifactual=0.2))
self.assertEqual(msData.Attributes['filterParameters']['deltaMzArtifactual'], 300)
self.assertEqual(msData.Attributes['filterParameters']['overlapThresholdArtifactual'], 0.1)
self.assertEqual(msData.Attributes['filterParameters']['corrThresholdArtifactual'], 0.2)
assert_frame_equal(expectedArtifactualLinkageMatrix, msData._artifactualLinkageMatrix)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=False'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = False
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': False, 'blankFilter': True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData2.featureMask)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=True'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = True
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0, 1], [3, 4]], columns=['node1', 'node2'])
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData2._tempArtifactualLinkageMatrix)
def test_updateMasks_samples(self):
from nPYc.enumerations import VariableType, DatasetLevel, AssayRole, SampleType
msData = nPYc.MSDataset('', fileType='empty')
msData.intensityData = numpy.zeros([18, 5],dtype=float)
msData.sampleMetadata['AssayRole'] = pandas.Series([AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference],
name='AssayRole',
dtype=object)
msData.sampleMetadata['SampleType'] = pandas.Series([SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.MethodReference],
name='SampleType',
dtype=object)
with self.subTest(msg='Default Parameters'):
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False)
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export SP and ER'):
expectedSampleMask = numpy.array([False, False, False, False, False, True, True, True, True, True, True, False, False, False, False, False, True, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool, SampleType.ExternalReference],
assayRoles=[AssayRole.PrecisionReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export Dilution Samples only'):
expectedSampleMask = numpy.array([True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool],
assayRoles=[AssayRole.LinearityReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
def test_updateMasks_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='Correlation'):
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=-1.01))
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(corrThreshold='0.7'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Blanks'):
self.assertRaises(TypeError, msData.updateMasks, **dict(blankThreshold='A string'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Variance Ratio'):
self.assertRaises(TypeError, msData.updateMasks, **dict(varianceRatio='1.1'))
with self.subTest(msg='ArtifactualParameters'):
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':'A string', 'rsdFilter':False, 'blankFilter': False,
'correlationToDilutionFilter':False, 'varianceRatioFilter':False}, **dict(blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=1.01, blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=-0.01, blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual='0.7', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(deltaMzArtifactual='100', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(overlapThresholdArtifactual='0.5', blankThreshold=False))
def test_applyMasks(self):
fit = numpy.random.randn(self.msData.noSamples, self.msData.noFeatures)
self.msData.fit = copy.deepcopy(fit)
deletedFeatures = numpy.random.randint(0, self.msData.noFeatures, size=2)
self.msData.featureMask[deletedFeatures] = False
fit = numpy.delete(fit, deletedFeatures, 1)
self.msData.applyMasks()
numpy.testing.assert_array_almost_equal(self.msData.fit, fit)
def test_correlationToDilution(self):
from nPYc.utilities._internal import _vcorrcoef
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset', sop='GenericMS')
dataset.sampleMetadata['SampleType'] = nPYc.enumerations.SampleType.StudyPool
dataset.sampleMetadata['AssayRole'] = nPYc.enumerations.AssayRole.LinearityReference
dataset.sampleMetadata['Well'] = 1
dataset.sampleMetadata['Dilution'] = numpy.linspace(1, noSamp, num=noSamp)
correlations = dataset.correlationToDilution
with self.subTest(msg='Checking default path'):
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
with self.subTest(msg='Checking corr exclusions'):
dataset.corrExclusions = None
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
def test_correlateToDilution_raises(self):
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset')
with self.subTest(msg='Unknown correlation type'):
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution, method='unknown')
with self.subTest(msg='No LR samples'):
dataset.sampleMetadata['AssayRole'] = AssayRole.Assay
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution)
with self.subTest(msg='No Dilution field'):
dataset.sampleMetadata.drop(['Dilution'], axis=1, inplace=True)
self.assertRaises(KeyError, dataset._MSDataset__correlateToDilution)
def test_validateObject(self):
with self.subTest(msg='validateObject successful on correct dataset'):
goodDataset = copy.deepcopy(self.msData)
self.assertEqual(goodDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True), {'Dataset': True, 'BasicMSDataset':True ,'QC':True, 'sampleMetadata':True})
with self.subTest(msg='BasicMSDataset fails on empty MSDataset'):
badDataset = nPYc.MSDataset('', fileType='empty')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset':False ,'QC':False, 'sampleMetadata':False})
with self.subTest(msg='check raise no warnings with raiseWarning=False'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 0)
with self.subTest(msg='check fail and raise warnings on bad Dataset'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'featureMetadata')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': False, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 5)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.featureMetadata'" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to Dataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not conform to basic MSDataset" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have QC parameters" in str(w[3].message)
assert issubclass(w[4].category, UserWarning)
assert "Does not have sample metadata information" in str(w[4].message)
with self.subTest(msg='check raise warnings BasicMSDataset'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 4)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.Attributes['rtWindow']" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to basic MSDataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have QC parameters" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have sample metadata information" in str(w[3].message)
with self.subTest(msg='check raise warnings QC parameters'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 3)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata['Batch']' is <class 'str'>" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have QC parameters:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[2].message)
with self.subTest(msg='check raise warnings sampleMetadata'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 2)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata' lacks a 'Subject ID' column" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[1].message)
with self.subTest(msg='self.Attributes[\'rtWindow\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rtWindow\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rtWindow'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'msPrecision\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['msPrecision']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'msPrecision\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['msPrecision'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'varianceRatio\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['varianceRatio']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'varianceRatio\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['varianceRatio'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'blankThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['blankThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'blankThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['blankThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrMethod\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrMethod']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrMethod\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrMethod'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'rsdThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rsdThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rsdThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rsdThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'deltaMzArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['deltaMzArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'deltaMzArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['deltaMzArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'overlapThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['overlapThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'overlapThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['overlapThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'FeatureExtractionSoftware\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['FeatureExtractionSoftware']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'FeatureExtractionSoftware\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['FeatureExtractionSoftware'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Raw Data Path\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Raw Data Path']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Raw Data Path\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Raw Data Path'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Feature Names\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Feature Names']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Feature Names\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Feature Names'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.VariableType is not an enum VariableType'):
badDataset = copy.deepcopy(self.msData)
badDataset.VariableType = 'not an enum'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.corrExclusions does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'corrExclusions')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._correlationToDilution does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_correlationToDilution')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._correlationToDilution is not a numpy.ndarray'):
badDataset = copy.deepcopy(self.msData)
badDataset._correlationToDilution = 'not a numpy.ndarray'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._artifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_artifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._artifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._artifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._tempArtifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_tempArtifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._tempArtifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._tempArtifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.fileName does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'fileName')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.fileName is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.fileName = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.filePath does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'filePath')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.filePath is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.filePath = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample File Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample File Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'AssayRole\'] is not an enum \'AssayRole\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['AssayRole'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'SampleType\'] is not an enum \'SampleType\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['SampleType'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Dilution\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Dilution'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Correction Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Correction Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Run Order\'] is not an int'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Run Order'] = 'not an int'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Acquired Time\'] is not a datetime'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Acquired Time'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample Base Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample Base Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Matrix column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Matrix'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Matrix\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Matrix'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Subject ID column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Subject ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Subject ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not unique'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = ['Feature1','Feature1','Feature1']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a m/z column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['m/z'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'m/z\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['m/z'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a Retention Time column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['Retention Time'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Retention Time\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Retention Time'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
class test_msdataset_batch_inference(unittest.TestCase):
"""
Check batches are generated and amended correctly
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata['Sample File Name'] = ['Test_RPOS_ToF04_B1S1_SR',
'Test_RPOS_ToF04_B1S2_SR',
'Test_RPOS_ToF04_B1S3_SR',
'Test_RPOS_ToF04_B1S4_SR',
'Test_RPOS_ToF04_B1S5_SR',
'Test_RPOS_ToF04_P1W01',
'Test_RPOS_ToF04_P1W02_SR',
'Test_RPOS_ToF04_P1W03',
'Test_RPOS_ToF04_B1E1_SR',
'Test_RPOS_ToF04_B1E2_SR',
'Test_RPOS_ToF04_B1E3_SR',
'Test_RPOS_ToF04_B1E4_SR',
'Test_RPOS_ToF04_B1E5_SR',
'Test_RPOS_ToF04_B2S1_SR',
'Test_RPOS_ToF04_B2S2_SR',
'Test_RPOS_ToF04_B2S3_SR',
'Test_RPOS_ToF04_B2S4_SR',
'Test_RPOS_ToF04_B2S5_SR',
'Test_RPOS_ToF04_P2W01',
'Test_RPOS_ToF04_P2W02_SR',
'Test_RPOS_ToF04_P3W03',
'Test_RPOS_ToF04_B2S1_SR_2',
'Test_RPOS_ToF04_B2S2_SR_2',
'Test_RPOS_ToF04_B2S3_SR_2',
'Test_RPOS_ToF04_B2S4_SR_2',
'Test_RPOS_ToF04_B2S5_SR_2',
'Test_RPOS_ToF04_P3W03_b',
'Test_RPOS_ToF04_B2E1_SR',
'Test_RPOS_ToF04_B2E2_SR',
'Test_RPOS_ToF04_B2E3_SR',
'Test_RPOS_ToF04_B2E4_SR',
'Test_RPOS_ToF04_B2E5_SR',
'Test_RPOS_ToF04_B2SRD1']
self.msData.addSampleInfo(descriptionFormat='Filenames')
self.msData.sampleMetadata['Run Order'] = self.msData.sampleMetadata.index + 1
def test_fillbatches_correctionbatch(self):
self.msData._fillBatches()
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_fillbatches_warns(self):
self.msData.sampleMetadata.drop('Run Order', axis=1, inplace=True)
self.assertWarnsRegex(UserWarning, 'Unable to infer batches without run order, skipping\.', self.msData._fillBatches)
def test_amendbatches(self):
"""
"""
self.msData._fillBatches()
self.msData.amendBatches(20)
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_msdataset_addsampleinfo_batches(self):
self.msData.addSampleInfo(descriptionFormat='Batches')
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
class test_msdataset_import_undefined(unittest.TestCase):
"""
Test we raise an error when passing an fileType we don't understand.
"""
def test_raise_notimplemented(self):
self.assertRaises(NotImplementedError, nPYc.MSDataset, os.path.join('nopath'), fileType='Unknown filetype')
class test_msdataset_import_QI(unittest.TestCase):
"""
Test import from QI csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv'), fileType='QI')
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (115, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_Blank01', 'UnitTest1_LPOS_ToF02_Blank02',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W08_x',
'UnitTest1_LPOS_ToF02_S1W11_LTR', 'UnitTest1_LPOS_ToF02_S1W12_SR',
'UnitTest1_LPOS_ToF02_ERROR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking Peak Widths'):
peakWidth = pandas.Series([0.03931667,
0.01403333,
0.01683333,
0.01683333],
name='Peak Width',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Peak Width'], peakWidth)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378339,
293.1811941,
145.0686347,
258.1033447],
name='m/z',
dtype='float')
assert_series_equal(self.msData.featureMetadata['m/z'], mz)
with self.subTest(msg='Checking Retention Time'):
rt = pandas.Series([3.17485,
3.17485,
3.17485,
3.17485],
name='Retention Time',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Retention Time'], rt)
with self.subTest(msg='Checking Isotope Distribution'):
isotope = pandas.Series(['100 - 36.9',
'100 - 11.9',
'100 - 8.69',
'100 - 73.4'],
name='Isotope Distribution',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Isotope Distribution'], isotope)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.msData.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
class test_msdataset_import_xcms(unittest.TestCase):
"""
Test import from XCMS csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_xcms.csv'), fileType='XCMS', noFeatureParams=9)
self.msData_PeakTable = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_xcms_peakTable.csv'), fileType='XCMS', noFeatureParams=8)
self.msData.addSampleInfo(descriptionFormat='Filenames')
self.msData_PeakTable.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (111, 4))
self.assertEqual((self.msData_PeakTable.noSamples, self.msData_PeakTable.noFeatures), (111, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W11_LTR',
'UnitTest1_LPOS_ToF02_S1W12_SR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
assert_series_equal(self.msData_PeakTable.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
assert_series_equal(self.msData_PeakTable.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378339,
293.1811941,
145.0686347,
258.1033447],
name='m/z',
dtype='float')
assert_series_equal(self.msData.featureMetadata['m/z'], mz)
assert_series_equal(self.msData_PeakTable.featureMetadata['m/z'], mz)
with self.subTest(msg='Checking Retention Time'):
rt = pandas.Series([3.17485 / 60.0,
3.17485 / 60.0,
3.17485 / 60.0,
3.17485 / 60.0],
name='Retention Time',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Retention Time'], rt)
assert_series_equal(self.msData_PeakTable.featureMetadata['Retention Time'], rt)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Dilution'], dilution)
assert_series_equal(self.msData_PeakTable.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData.addSampleInfo(descriptionFormat='Batches')
self.msData_PeakTable.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData_PeakTable.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
numpy.testing.assert_array_almost_equal(self.msData_PeakTable.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.msData.Attributes['corrMethod'] = 'spearman'
self.msData_PeakTable.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
numpy.testing.assert_array_almost_equal(self.msData_PeakTable.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
self.assertEqual(self.msData_PeakTable.VariableType, nPYc.enumerations.VariableType.Discrete)
def test_xcms_raises(self):
path = os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv')
self.assertRaises(ValueError, nPYc.MSDataset, path, fileType='XCMS', noFeatureParams=9)
class test_msdataset_import_csvimport_discrete(unittest.TestCase):
"""
Test import from NPC csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(
os.path.join('..', '..', 'npc-standard-project', 'Derived_Data', 'UnitTest1_PCSOP.069_csv_import.csv'), fileType='csv', noFeatureParams=1)
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (111, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W11_LTR',
'UnitTest1_LPOS_ToF02_S1W12_SR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.msData.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
class test_msdataset_import_csvimport_continuum(unittest.TestCase):
"""
Test import from NPC csv files
"""
def test_csv_continuum_import_raises(self):
path = os.path.join('..', '..', 'npc-standard-project', 'Derived_Data', 'UnitTest1_PCSOP.069_csv_import.csv')
self.assertRaises(NotImplementedError, nPYc.MSDataset, path, fileType='csv', noFeatureParams=2, variableType='Continuum')
class test_msdataset_import_metaboscape(unittest.TestCase):
"""
Test import from metaboscape xlsx outputs
"""
def setUp(self):
path = os.path.join('..','..','npc-standard-project','Derived_Data', 'UnitTest1_PCSOP.069_Metaboscape.xlsx')
self.lcData = nPYc.MSDataset(path, fileType='Metaboscape', noFeatureParams=18, sheetName='Test Data')
self.lcData.addSampleInfo(descriptionFormat='Filenames')
self.diData = nPYc.MSDataset(path, fileType='Metaboscape', noFeatureParams=16, sheetName='Test Data (DI)')
self.diData.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.lcData.noSamples, self.lcData.noFeatures), (115, 4))
self.assertEqual((self.diData.noSamples, self.diData.noFeatures), (115, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_Blank01', 'UnitTest1_LPOS_ToF02_Blank02',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W08_x',
'UnitTest1_LPOS_ToF02_S1W11_LTR', 'UnitTest1_LPOS_ToF02_S1W12_SR',
'UnitTest1_LPOS_ToF02_ERROR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.lcData.sampleMetadata['Sample File Name'], samples)
assert_series_equal(self.diData.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.lcData.featureMetadata['Feature Name'], features)
features = pandas.Series(['262.0378339m/z',
'293.1811941m/z',
'145.0686347m/z',
'258.1033447m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.diData.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378,
293.1812,
145.0686,
258.1033],
name='m/z',
dtype='float')
assert_series_equal(self.lcData.featureMetadata['m/z'], mz)
assert_series_equal(self.diData.featureMetadata['m/z'], mz)
with self.subTest(msg='Checking Retention Time'):
rt = pandas.Series([3.17485,
3.17485,
3.17485,
3.17485],
name='Retention Time',
dtype='float')
assert_series_equal(self.lcData.featureMetadata['Retention Time'], rt)
with self.subTest(msg='Checking ΔRT'):
deltaRT = pandas.Series([0,
0.1,
0,
-0.1],
name='ΔRT',
dtype='object')
assert_series_equal(self.lcData.featureMetadata['ΔRT'], deltaRT)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.lcData.sampleMetadata['Dilution'], dilution)
assert_series_equal(self.diData.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.lcData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.lcData.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.lcData.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.lcData.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.lcData.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.lcData.VariableType, nPYc.enumerations.VariableType.Discrete)
def test_csv_import(self):
path = os.path.join('..','..','npc-standard-project','Derived_Data', 'UnitTest1_PCSOP.069_Metaboscape_LC.csv')
lcData = nPYc.MSDataset(path, fileType='Metaboscape', noFeatureParams=18)
lcData.addSampleInfo(descriptionFormat='Filenames')
assert_frame_equal(self.lcData.sampleMetadata, lcData.sampleMetadata)
numpy.testing.assert_array_equal(self.lcData.intensityData, lcData.intensityData)
class test_msdataset_import_biocrates(unittest.TestCase):
"""
Test import of Biocrate sheets
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_Biocrates.xlsx'), fileType='Biocrates', sheetName='Master Samples')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (9, 144))
def test_samples(self):
with self.subTest(msg='Checking Sample IDs'):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W11_LTR',
'UnitTest1_LPOS_ToF02_S1W12_SR'],
name='Sample ID',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample ID'], samples)
with self.subTest(msg='Checking Sample Bar Code'):
samples = pandas.Series([1010751983, 1010751983, 1010751983, 1010751983, 1010751983, 1010751998, 1010751998, 1010751998, 1010751998],
name='Sample Bar Code',
dtype=int)
assert_series_equal(self.msData.sampleMetadata['Sample Bar Code'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['C0', 'C10', 'C10:1', 'C10:2', 'C12', 'C12-DC', 'C12:1', 'C14', 'C14:1', 'C14:1-OH', 'C14:2', 'C14:2-OH', 'C16', 'C16-OH',
'C16:1', 'C16:1-OH', 'C16:2', 'C16:2-OH', 'C18', 'C18:1', 'C18:1-OH', 'C18:2', 'C2', 'C3', 'C3-DC (C4-OH)', 'C3-OH', 'C3:1',
'C4', 'C4:1', 'C6 (C4:1-DC)', 'C5', 'C5-M-DC', 'C5-OH (C3-DC-M)', 'C5:1', 'C5:1-DC', 'C5-DC (C6-OH)', 'C6:1', 'C7-DC', 'C8',
'C9', 'lysoPC a C14:0', 'lysoPC a C16:0', 'lysoPC a C16:1', 'lysoPC a C17:0', 'lysoPC a C18:0', 'lysoPC a C18:1', 'lysoPC a C18:2',
'lysoPC a C20:3', 'lysoPC a C20:4', 'lysoPC a C24:0', 'lysoPC a C26:0', 'lysoPC a C26:1', 'lysoPC a C28:0', 'lysoPC a C28:1',
'PC aa C24:0', 'PC aa C26:0', 'PC aa C28:1', 'PC aa C30:0', 'PC aa C32:0', 'PC aa C32:1', 'PC aa C32:2', 'PC aa C32:3', 'PC aa C34:1',
'PC aa C34:2', 'PC aa C34:3', 'PC aa C34:4', 'PC aa C36:0', 'PC aa C36:1', 'PC aa C36:2', 'PC aa C36:3', 'PC aa C36:4', 'PC aa C36:5',
'PC aa C36:6', 'PC aa C38:0', 'PC aa C38:3', 'PC aa C38:4', 'PC aa C38:5', 'PC aa C38:6', 'PC aa C40:1', 'PC aa C40:2', 'PC aa C40:3',
'PC aa C40:4', 'PC aa C40:5', 'PC aa C40:6', 'PC aa C42:0', 'PC aa C42:1', 'PC aa C42:2', 'PC aa C42:4', 'PC aa C42:5', 'PC aa C42:6',
'PC ae C30:0', 'PC ae C30:1', 'PC ae C30:2', 'PC ae C32:1', 'PC ae C32:2', 'PC ae C34:0', 'PC ae C34:1', 'PC ae C34:2', 'PC ae C34:3',
'PC ae C36:0', 'PC ae C36:1', 'PC ae C36:2', 'PC ae C36:3', 'PC ae C36:4', 'PC ae C36:5', 'PC ae C38:0', 'PC ae C38:1', 'PC ae C38:2',
'PC ae C38:3', 'PC ae C38:4', 'PC ae C38:5', 'PC ae C38:6', 'PC ae C40:1', 'PC ae C40:2', 'PC ae C40:3', 'PC ae C40:4', 'PC ae C40:5',
'PC ae C40:6', 'PC ae C42:0', 'PC ae C42:1', 'PC ae C42:2', 'PC ae C42:3', 'PC ae C42:4', 'PC ae C42:5', 'PC ae C44:3', 'PC ae C44:4',
'PC ae C44:5', 'PC ae C44:6', 'SM (OH) C14:1', 'SM (OH) C16:1', 'SM (OH) C22:1', 'SM (OH) C22:2', 'SM (OH) C24:1', 'SM C16:0',
'SM C16:1', 'SM C18:0', 'SM C18:1', 'SM C20:2', 'SM C24:0', 'SM C24:1', 'SM C26:0', 'SM C26:1', 'H1', 'H1.1'],
name='Feature Name',
dtype=str)
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
with self.subTest(msg='Class'):
classField = pandas.Series(['acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids',
'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids',
'sphingolipids', 'sugars', 'sugars'],
name='Class',
dtype=str)
assert_series_equal(self.msData.featureMetadata['Class'], classField)
with self.subTest(msg='Checking LOD'):
lod = pandas.Series([2.1, 0.08, 1.08, 0.156, 0.064, 0.151, 0.857, 0.023, 0.009, 0.015, 0.049, 0.019, 0.018, 0.009, 0.017, 0.029, 0.023, 0.035,
0.013, 0.029, 0.017, 0.01, 0.063, 0.011, 0.046, 0.02, 0., 0.027, 0.021, 0.02, 0.035, 0.05, 0.037, 0.072, 0.015, 0.014, 0.036,
0.018, 0.1, 0.017, 5.32, 0.068, 0.064, 0.035, 0.181, 0.023, 0.02, 0.088, 0., 0.038, 0.034, 0.015, 0.105, 0.007, 0.061, 1.1,
0.079, 0.139, 0.02, 0.006, 0.006, 0., 0.03, 0.015, 0.001, 0.004, 0.203, 0.012, 0.022, 0.004, 0.009, 0.004, 0.002, 0.035, 0.01,
0.008, 0.005, 0.002, 0.394, 0.058, 0.003, 0.017, 0., 0.188, 0.065, 0.019, 0.058, 0.011, 0.037, 0.248, 0.155, 0.005, 0.01,
0.002, 0.001, 0.011, 0.014, 0.004, 0.01, 0.059, 0.061, 0.029, 0., 0.084, 0.014, 0.076, 0.031, 0.012, 0.005, 0.009, 0.002,
0.003, 0.019, 0.006, 0.013, 0.08, 0.003, 0.007, 1.32, 0.119, 0.017, 0.007, 0., 0.843, 0.048, 0.116, 0.072, 0.043, 0., 0.004,
0.006, 0.001, 0., 0.032, 0.005, 0.003, 0.004, 0.013, 0.006, 0.003, 0.01, 0.003, 912., 912.],
name='LOD (μM)',
dtype=float)
assert_series_equal(self.msData.featureMetadata['LOD (μM)'], lod)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
class test_msdataset_addsampleinfo(unittest.TestCase):
"""
Test import from QI csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv'), fileType='QI')
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_msdataset_load_npc_lims(self):
"""
Test we are matching samples IDs in the LIMS correctly
"""
samplingIDs = pandas.Series(['Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Procedural Blank Sample', 'Procedural Blank Sample',
'Study Pool Sample','Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample','Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'UT1_S1_s1', 'UT1_S2_s1', 'UT1_S3_s1', 'Not specified', 'UT1_S4_s2', 'UT1_S4_s3', 'UT1_S4_s4', 'UT1_S4_s5',
'External Reference Sample', 'Study Pool Sample', 'Not specified'], name='Sample ID', dtype='str')
samplingIDs = samplingIDs.astype(str)
self.msData.addSampleInfo(descriptionFormat='NPC LIMS', filePath=os.path.join('..','..','npc-standard-project','Derived_Worklists','UnitTest1_MS_serum_PCSOP.069.csv'))
assert_series_equal(self.msData.sampleMetadata['Sample ID'], samplingIDs)
def test_msdataset_load_watersraw_metadata(self):
"""
Test we read raw data from Waters .raw and concatenate it correctly - currently focusing on parameters of importance to the workflow.
TODO: Test all paramaters
"""
# Expected data starts with the same samples
expected = copy.deepcopy(self.msData.sampleMetadata)
##
# Define a test subset of columns with one unique value
##
testSeries = ['Sampling Cone', 'Scan Time (sec)', 'Source Offset', 'Source Temperature (°C)', 'Start Mass', 'End Mass', 'Column Serial Number:', 'ColumnType:']
expected['Sampling Cone'] = 20.0
expected['Scan Time (sec)'] = 0.15
expected['Source Offset'] = 80
expected['Source Temperature (°C)'] = 120.0
expected['Start Mass'] = 50.0
expected['End Mass'] = 1200.0
expected['Column Serial Number:'] = 1573413615729.
expected['ColumnType:'] = 'ACQUITY UPLC® HSS T3 1.8µm'
##
# And a subset with multiple values
##
testSeries.append('Detector')
expected['Detector'] = [3161., 3161., 3166., 3166., 3166., 3166., 3171., 3171., 3171., 3171., 3171., 3171., 3171., 3171., 3179.,
3179., 3179., 3179., 3179., 3179., 3184., 3184., 3184., 3188., 3188., 3188., 3188., 3188., 3188., 3193.,
3193., 3193., 3193., 3193., 3197., 3197., 3197., 3197., 3197., 3203., 3203., 3203., 3203., 3203., 3208.,
3208., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407.,
3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3399., 3399., 3399., 3399., 3399., 3399., 3399.,
3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399.,
3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3212., 3212., 3217., 3217., 3217., 3293.,
3293., 3293., 3299., 3299., 3299., 3299., 3299., 3293., 3299., 3299.]
testSeries.append('Measurement Date')
expected['Measurement Date'] = ['25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014',
'25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014',
'25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014',
'25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014',
'25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014',
'26-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '24-Nov-2014', '24-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014',
'27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014']
testSeries.append('Measurement Time')
expected['Measurement Time'] = ['13:43:57', '13:59:44', '14:15:39', '14:31:26', '14:47:21', '15:03:07', '15:19:00', '15:34:46', '15:50:40', '16:06:26',
'16:22:12', '16:38:06', '16:54:01', '17:09:56', '17:25:44', '17:41:30', '17:57:16', '18:13:02', '18:28:47', '18:44:35',
'19:00:22', '19:16:10', '19:31:56', '19:47:51', '20:03:46', '20:19:33', '20:35:19', '20:51:13', '21:07:09', '21:22:55',
'21:38:50', '21:54:43', '22:10:28', '22:26:15', '22:42:09', '22:57:56', '23:13:41', '23:29:35', '23:45:29', '00:01:23',
'00:17:10', '00:32:56', '00:48:49', '01:04:33', '01:20:20', '01:36:06', '19:53:55', '19:38:18', '19:22:41', '19:07:03',
'18:51:23', '18:35:46', '18:20:06', '18:04:29', '17:48:57', '17:33:20', '17:17:42', '17:02:05', '16:46:27', '16:30:57',
'16:15:18', '15:59:40', '15:44:03', '15:28:24', '15:12:48', '14:57:10', '14:41:33', '14:25:55', '14:10:24', '13:54:46',
'13:39:08', '13:23:38', '13:08:08', '12:52:30', '12:36:50', '12:21:13', '12:05:41', '11:50:03', '11:34:25', '11:18:55',
'11:03:25', '10:47:55', '10:32:18', '10:16:40', '10:01:10', '09:45:32', '09:30:01', '09:14:25', '08:58:53', '08:43:23',
'08:27:47', '08:12:10', '08:12:47', '08:25:10', '06:52:08', '07:09:38', '07:25:16', '07:40:52', '07:56:32', '02:39:17',
'02:55:03', '03:10:49', '03:26:43', '03:42:35', '12:11:04', '12:26:51', '12:42:35', '12:58:13', '13:14:01', '13:45:26',
'14:01:05', '14:16:51', '11:53:27', '13:29:48', '13:46:48']
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..', '..', 'npc-standard-project', 'Raw_Data', 'ms', 'parameters_data'))
with self.subTest(msg='Default Path'):
for series in testSeries:
assert_series_equal(self.msData.sampleMetadata[series], expected[series], check_dtype=False)
with self.subTest(msg='No Exclusion details'):
self.msData.sampleMetadata.drop(columns='Exclusion Details', inplace=True)
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..', '..', 'npc-standard-project', 'Raw_Data', 'ms', 'parameters_data'))
def test_msdataset__getSampleMetadataFromRawData_invalidpath(self):
self.assertRaises(ValueError, self.msData.addSampleInfo, descriptionFormat='Raw Data', filePath='./NOT_A_REAL_PATH')
class test_msdataset_artifactual_filtering(unittest.TestCase):
"""
Test calculation and update of artifactual filtering results
Simulated data design:
14 features, 10 samples
normal threshold | 0.1mz, 0.9corr, 50% ov
advanced threshold | 0.05mz, 0.95 corr, 70% ov
0 and 1, Nothing (corr=0, 10. mz, ov 0%)
1 and 2, corr>0.9, 10. mz, ov 80% | fail default param
3 and 4, corr<0.3, 0.005mz, ov 60% | fail default corr, present in ._temp not in ._arti
5 and 6, corr 0.91, 0.01mz, ov 90% | ok default, advance fails corr (present in ._temp, not in ._arti) (5 is higher intensity)
7 and 8, corr 0.97, 0.09mz, ov 90% | ok default, then fail mz (7 is higher intensity)
9 and 10, corr 0.97, 0.01mz, ov 55% | ok default, then fail overlap (9 is higher intensity)
11 and 12, corr 0.97, 0.01mz, ov 90% | ok default, then still okay (11 is higher intensity)
13, 14, 15, 16, all ov > 92.5%, close mz, corr | ok default, then still okay (13 is higher intensity)
Test:
1) compare output .artifactualLinkageMatrix, ._artifactualLinkageMatrix, ._tempArtifactualLinkageMatrix
test .artifactualFilter() results with and without a given featureMask
2) ensure that deepcopy resets ._arti, ._temp
3) change deltaMzArtifactual, overlapThresholdArtifactual, corrThresholdArtifactual (changes in .artifactualLinkageMatrix, ._artifactualLinkageMatrix and ._tempArtifactualLinkageMatrix)
4) applyMask: remove samples -> only corr, remove features -> all
"""
def setUp(self):
self.msData = nPYc.MSDataset(
os.path.join('..', '..', 'npc-standard-project', 'Derived_Data', 'UnitTest2_artifactualFiltering.csv'),
fileType='QI')
self.msData.Attributes['featureFilters']['artifactualFilter'] = True
self.msData.Attributes['filterParameters']["deltaMzArtifactual"] = self.msData.Attributes["deltaMzArtifactual"]
self.msData.Attributes['filterParameters']["overlapThresholdArtifactual"] = self.msData.Attributes["overlapThresholdArtifactual"]
self.msData.Attributes['filterParameters']["corrThresholdArtifactual"] = self.msData.Attributes["corrThresholdArtifactual"]
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_artifactualFilter_raise(self):
with self.subTest(msg='Attributes artifactualFilter is False'):
partialMsData = copy.deepcopy(self.msData)
partialMsData.Attributes['featureFilters']['artifactualFilter'] = False
self.assertRaises(ValueError,partialMsData._MSDataset__generateArtifactualLinkageMatrix)
with self.subTest(msg='Missing Feature Name'):
partialMsData1 = copy.deepcopy(self.msData)
partialMsData1.featureMetadata.drop('Feature Name', axis=1, inplace=True)
self.assertRaises(LookupError,partialMsData1._MSDataset__generateArtifactualLinkageMatrix)
with self.subTest(msg='Missing Retention Time'):
partialMsData2 = copy.deepcopy(self.msData)
partialMsData2.featureMetadata.drop('Retention Time', axis=1, inplace=True)
self.assertRaises(LookupError,partialMsData2._MSDataset__generateArtifactualLinkageMatrix)
with self.subTest(msg='Missing m/z'):
partialMsData3 = copy.deepcopy(self.msData)
partialMsData3.featureMetadata.drop('m/z', axis=1, inplace=True)
self.assertRaises(LookupError,partialMsData3._MSDataset__generateArtifactualLinkageMatrix)
with self.subTest(msg='Missing Peak Width'):
partialMsData4 = copy.deepcopy(self.msData)
partialMsData4.featureMetadata.drop('Peak Width', axis=1, inplace=True)
self.assertRaises(LookupError,partialMsData4._MSDataset__generateArtifactualLinkageMatrix)
def test_artifactualFilter(self):
"""
Test artifactualFilter() and corresponding functions and variables.
"""
##
# _artifactualLinkageMatrix, artifactualLinkageMatrix and __generateArtifactualLinkageMatrix()
##
result_artifactualLinkageMatrix = pandas.DataFrame(
[[5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [13, 15], [13, 16], [14, 15], [14, 16], [15, 16]],
index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], columns=['node1', 'node2'])
assert_frame_equal(self.msData.artifactualLinkageMatrix, result_artifactualLinkageMatrix)
assert_frame_equal(self.msData._artifactualLinkageMatrix, result_artifactualLinkageMatrix)
## _tempArtifactualLinkageMatrix (not filtered by correlation)
result_tempArtifactualLinkageMatrix = pandas.DataFrame(
[[3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [13, 15], [13, 16], [14, 15], [14, 16], [15, 16]],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], columns=['node1', 'node2'])
| assert_frame_equal(self.msData._tempArtifactualLinkageMatrix, result_tempArtifactualLinkageMatrix) | pandas.testing.assert_frame_equal |
import pandas as pd
import cx_Oracle
import time as tmm
import os
from datetime import date
import win32com.client
import xdttm as odt
class omdb:
def __init__(self):
self.orc_con_str = "'SOC_READ', 'soc_read', 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd'"
self.mssq_con_str = "'Driver={SQL Server};SERVER=192.168.88.121;DATABASE=SOC_Roster;UID=sa;PWD=<PASSWORD>456&'"
self.cdir = os.getcwd() + '\\'
self.today = date.today()
def orc_all_active(self,tbl,selcol):
conn = cx_Oracle.connect('SOC_READ', 'soc_read', 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
print(conn.version)
tim1 = tmm.localtime()
dy_p = odt.day_minus(7)
dy_f = odt.day_plus(1)
Q1 = "FROM " + tbl + " WHERE TYPE=1 AND Severity BETWEEN 1 AND 5 "
Q2 = "AND (LASTOCCURRENCE BETWEEN TO_DATE('" + dy_p + "','DD-MM-RRRR') AND TO_DATE('" + dy_f + "','DD-MM-RRRR'))"
QF = "SELECT" + selcol + Q1 + Q2
print(tmm.strftime("%H%M", tim1))
print('----------------')
print(QF)
df = pd.read_sql(QF, con=conn)
print('----------------')
tim2 = tmm.localtime()
print(df.shape[0])
print(tmm.strftime("%H%M", tim2))
df2g = df[df['SUMMARY'].str.contains('2G SITE DOWN')]
df3g = df[df['SUMMARY'].str.contains('3G SITE DOWN')]
df4g = df[df['SUMMARY'].str.contains('4G SITE DOWN')]
dfmf = df[df['SUMMARY'].str.contains('MAIN')]
dfdl = df[df['SUMMARY'].str.contains('DC LOW')]
dftmp = df[df['SUMMARY'].str.contains('TEMP')]
dfcell = df[df['SUMMARY'].str.contains('CELL DOWN')]
dfth = df[df['SUMMARY'].str.contains('ERI-RRU THEFT')]
df_cnct = [df2g, df3g, df4g, dfmf, dfdl, dftmp, dfcell, dfth]
df_all = pd.concat(df_cnct)
df_final = df_all.rename(columns={'EQUIPMENTKEY': 'Resource', 'CUSTOMATTR26': 'AssociatedCR',
'CUSTOMATTR24': 'BCCH',
'OWNERGID': 'Incident Owner',
'EVENTID': 'Frequency',
'TTREQUESTTIME': 'TT Creation Time'})
dic = df_final.to_dict()
conn.close()
return dic
def orc_qry_on_cond(self, tbl, cond, fdt, tdt):
conn = cx_Oracle.connect('SOC_READ', 'soc_read', 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
print(conn.version)
Q2 = "(LASTOCCURRENCE BETWEEN TO_DATE('" + fdt + "','DD-MM-RRRR') AND TO_DATE('" + tdt + "','DD-MM-RRRR'))"
QF = "SELECT * from " + tbl + " WHERE " + cond + ' AND ' + Q2
print(QF)
tim1 = tmm.localtime()
print(tmm.strftime("%H%M", tim1))
print('----------------')
df = pd.read_sql(QF, con=conn)
tim2 = tmm.localtime()
print(tmm.strftime("%H%M", tim2))
print('----------------')
dic = df.to_dict()
conn.close()
return dic
def orc_qry_all_active(self, tbl, cond):
conn = cx_Oracle.connect('SOC_READ', 'soc_read', 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
print(conn.version)
qry1 = "Select * from " + tbl + " WHERE " + cond
print(qry1)
tim1 = tmm.localtime()
print(tmm.strftime("%H%M", tim1))
df = | pd.read_sql(qry1, con=conn) | pandas.read_sql |
import random
from itertools import combinations
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
import pandas as pd
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, recall_score, precision_score
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings("ignore")
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def get_mapping(Path):
nodes = []
with open(Path) as ef:
for line in ef.readlines():
nodes.extend(line.strip().split()[:2])
nodes = sorted(list(set(nodes)))
node_id_mapping = {old_id: new_id for new_id, old_id in enumerate(nodes)}
return node_id_mapping
def get_Graph(Path, node_id_mapping):
edges = []
f = open(Path)
for line in f.readlines():
node1, node2 = line.strip().split()[:2]
edges.append([node_id_mapping[node1], node_id_mapping[node2]])
f.close()
G = nx.Graph(edges)
nx.info(G)
print("generate graph done!")
return G
def del_node(dataname):
path_original_root = r"../data/{}-original/".format(dataname)
edges = np.genfromtxt(path_original_root + "edges.txt", dtype=np.int32)
def_node = pd.read_csv(path_original_root + "del_node.csv").iloc[:, 1]
idx_list = []
for n in def_node:
idx_tmp = np.where(edges == n)[0]
idx_list.extend(list(idx_tmp))
# or edges[:, 1] in def_node
edges = np.delete(edges, idx_list, axis=0)
np.savetxt(path_original_root + "edges.txt", edges, fmt='%d')
print("done!")
#
# # 用于生成节点的度作为节点的属性
def get_degrees(G, path=r"D:\zhiqiang\coding\pygcn-master\data\traces-simulated-original\traces.content"):
print("正在生成特征(度)......")
degrees = nx.degree(G)
features = pd.DataFrame(degrees)
features.columns = ['idx', 'degree']
features.to_csv(path, index=False)
# print([G.degree[i] for i in range(num_nodes)])
# return np.array([G.degree[i] for i in range(num_nodes)])
def load_data(path="../data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np .int32)
idx_map = {j: i for i, j in enumerate(idx)}
print(idx_map)
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def load_data_original(feature_type, dataname="cora", test_split_rate=0.7):
print('Loading {} dataset...'.format(dataname))
path_original_root = r"../data/{}-original/".format(dataname)
idx_features_labels = pd.read_csv("{}/{}".format(path_original_root, feature_type))
features = sp.csr_matrix(idx_features_labels.iloc[:, 1:], dtype=np.float32)
# build graph
idx = np.array(idx_features_labels.iloc[:, 0], dtype=np .int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}edges.txt".format(path_original_root),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(idx_features_labels.shape[0], idx_features_labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
# 生成原始网络的train val test 数据集
length_edge_path_origin = edges_unordered.shape[0]
idx_pos_train = range(int(length_edge_path_origin * 0.1))
idx_pos_val = range(int(length_edge_path_origin * 0.1), int(length_edge_path_origin * 0.2))
idx_pos_test = range(int(length_edge_path_origin * 0.2), length_edge_path_origin)
idx_pos_test_train = range(int(length_edge_path_origin * 0.2),
int(length_edge_path_origin * 0.2) + int(len(idx_pos_test) * test_split_rate))
idx_pos_test_test = range(int(length_edge_path_origin * 0.2) + int(len(idx_pos_test) * test_split_rate),
length_edge_path_origin)
G = nx.Graph(edges.tolist())
neg_sets = sample_neg_sets(G, n_samples=length_edge_path_origin, set_size=2)
neg_sets = np.array(neg_sets)
idx_neg_train = range(int(length_edge_path_origin * 0.1))
idx_neg_val = range(int(length_edge_path_origin * 0.1), int(length_edge_path_origin * 0.2))
idx_neg_test = range(int(length_edge_path_origin * 0.2), int(length_edge_path_origin * 1))
idx_neg_test_train = idx_pos_test_train
idx_neg_test_test = idx_pos_test_test
idx_pos_train = torch.LongTensor(idx_pos_train)
idx_pos_val = torch.LongTensor(idx_pos_val)
idx_pos_test = torch.LongTensor(idx_pos_test)
idx_pos_test_train = torch.LongTensor(idx_pos_test_train)
idx_pos_test_test = torch.LongTensor(idx_pos_test_test)
idx_neg_train = torch.LongTensor(idx_neg_train)
idx_neg_val = torch.LongTensor(idx_neg_val)
idx_neg_test = torch.LongTensor(idx_neg_test)
idx_neg_test_train = torch.LongTensor(idx_neg_test_train)
idx_neg_test_test = torch.LongTensor(idx_neg_test_test)
features = torch.FloatTensor(np.array(features.todense()))
adj = sparse_mx_to_torch_sparse_tensor(adj)
return adj, features, edges, neg_sets, idx_pos_train, idx_pos_val, idx_pos_test, idx_pos_test_train, idx_pos_test_test, idx_neg_train, idx_neg_val, idx_neg_test, idx_neg_test_train, idx_neg_test_test
def load_hon_data(edge_path=r"..\data\traces-simulated-original\edges.txt",
content_path=r"..\data\traces-simulated-original\traces.content"):
"""Load citation network dataset (cora only for now)"""
print('Loading dataset...')
idx_features = pd.read_csv(content_path, dtype=np.dtype(str))
print(idx_features)
# features = sp.csr_matrix(idx_features.iloc[:, 1:-1], dtype=np.float32)
features = sp.csr_matrix(np.expand_dims(idx_features.iloc[:, 1], 1), dtype=np.float32)
# labels = encode_onehot(idx_features_labels[:, -1])
# build graph
# idx = np.array([i for i in range(len(idx_features))], dtype=np.int32) # 自己的高阶数据
idx = idx_features.values[:, 0]
idx_map = {j: i for i, j in enumerate(idx)}
print(idx_map)
# edges_unordered = np.genfromtxt(edge_path, dtype=np.int32)
edges_unordered = []
with open(edge_path) as file:
line = file.readline()
while line:
line = line.split()
edges_unordered.append([line[0], line[1]])
line = file.readline()
edges_unordered = np.array(edges_unordered)
# print(list(map(idx_map.get, edges_unordered.flatten())))
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(idx_features.shape[0], idx_features.shape[0]),
dtype=np.float32)
# 跑GCN croa数据时使用
# idx_features_labels = np.genfromtxt("../data/cora/cora.content", dtype=np.dtype(str))
# features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
# labels = encode_onehot(idx_features_labels[:, -1])
# # build graph
# idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
# idx_map = {j: i for i, j in enumerate(idx)}
# edges_unordered = np.genfromtxt("../data/cora/cora.cites",
# dtype=np.int32)
# edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
# dtype=np.int32).reshape(edges_unordered.shape)
# adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
# shape=(labels.shape[0], labels.shape[0]),
# dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
G = get_Graph(edge_path, idx_map)
pos_edges, neg_edges, idx_pos_train, idx_pos_val, \
idx_pos_test, idx_neg_train, idx_neg_val, idx_neg_test = sample_pos_neg_sets(G)
features = torch.FloatTensor(np.array(features.todense()))
# labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_pos_train = torch.LongTensor(idx_pos_train)
idx_pos_val = torch.LongTensor(idx_pos_val)
idx_pos_test = torch.LongTensor(idx_pos_test)
idx_neg_train = torch.LongTensor(idx_neg_train)
idx_neg_val = torch.LongTensor(idx_neg_val)
idx_neg_test = torch.LongTensor(idx_neg_test)
return adj, features, pos_edges, neg_edges, idx_pos_train, idx_pos_val, idx_pos_test, idx_neg_train, idx_neg_val, idx_neg_test
# 构建一个neg图,并进行label
def sample_neg_graph(G, Path_hon_root, Path_original_root, feature_type, n_samples=1000):
'''
:param G:
:param Path_hon_root:
:param Path_original_root:
:param n_samples:
:return:返回对应于高阶结点中的id,其中,每个list是一个二维的数据,
每行中的id代表与原始网络中对应的高阶网络中的节点的id
'''
neg_sets = []
n_nodes = G.number_of_nodes()
nodes = G.nodes()
nodes = list(nodes)
while len(neg_sets) < n_samples:
candid_set = [int(random.random() * n_nodes) for _ in range(2)]
node1, node2 = nodes[candid_set[0]], nodes[candid_set[1]]
if not G.has_edge(node1, node2):
neg_sets.append([node1, node2])
neg_sets = np.array(neg_sets)
neg_sets = np.unique(neg_sets, axis=0)
neg_sets = neg_sets.tolist()
neg_sets = np.array(neg_sets)
feature = pd.read_csv(Path_hon_root + feature_type)
feature = feature.values.tolist()
id_hon_map = []
for l in feature:
node1 = l[0].split("|")[0]
id_hon_map.append(int(node1))
id_hon_map = np.array(id_hon_map)
node1_id_list = []
node2_id_list = []
c = set()
for i in range(len(neg_sets)):
n1, n2 = neg_sets[i, 0], neg_sets[i, 1]
n1, n2 = int(n1), int(n2)
n1_id = np.where(id_hon_map == n1)[0].tolist()
n2_id = np.where(id_hon_map == n2)[0].tolist()
if len(n1_id)!=0 and len(n2_id)!=0:
node1_id_list.append(n1_id)
node2_id_list.append(n2_id)
else:
print("映射有问题!")
exit(0)
# else:
# neg_sets[i, 0], neg_sets[i, 1] = tmp_n1, tmp_n2
# node1_id_list.append(node1_id_list[-1])
# node2_id_list.append(node1_id_list[-1])
# if len(n1_id)==0:
# print("n1----", n1)
# c.add(n1)
# # exit(0)
# if len(n2_id)==0:
# print("n2----", n2)
# c.add(n2)
# tmp = pd.DataFrame(c)
# tmp.to_csv('../data/click-stream-10-original/del_node.csv')
# del_node('click-stream-10')
f = open(Path_original_root + "edges_neg.txt", '+w')
for i in neg_sets:
s = [str(ss) for ss in i]
s = ' '.join(s) + '\n'
f.write(s)
f.close()
print("生成 neg graph done!")
print("获取高阶结点中对应的id done!")
node1_id_list = np.array(node1_id_list)
node2_id_list = np.array(node2_id_list)
return node1_id_list, node2_id_list
# edge_path_hon=r"..\data\traces-simulated\edges_label.txt",
# edge_path_origin="../data/traces-simulated-original/edges_label.txt",
# content_path_hon=r"..\data\traces-simulated\traces.content",
def load_hon_data_label(test_split_rate, dataname, feature_type):
"""Load citation network dataset (cora only for now)"""
print('Loading dataset...')
path_hon_root = r"../data/{}/".format(dataname)
path_original_root = r"../data/{}-original/".format(dataname)
edge_path_hon = path_hon_root + "edges_label.txt"
edge_path_origin = path_original_root + "edges_label.txt"
content_path_hon = path_hon_root + "{}".format(feature_type)
# 生成原始网络的train val test 数据集
edge_origin = pd.read_csv(edge_path_origin)
length_edge_path_origin = edge_origin.shape[0]
idx_origin_train = range(int(length_edge_path_origin * 0.1))
idx_origin_val = range(int(length_edge_path_origin * 0.1), int(length_edge_path_origin * 0.2))
idx_origin_test = range(int(length_edge_path_origin * 0.2), int(length_edge_path_origin * 1))
idx_origin_test_train =range(int(length_edge_path_origin * 0.2), int(length_edge_path_origin * 0.2) + int(len(idx_origin_test) * test_split_rate))
idx_origin_test_test =range(int(length_edge_path_origin * 0.2) + int(len(idx_origin_test) * test_split_rate), int(length_edge_path_origin * 0.2) + int(len(idx_origin_test)))
origin_train_label = edge_origin.iloc[idx_origin_train]['label'].tolist()
origin_val_label = edge_origin.iloc[idx_origin_val]['label'].tolist()
origin_test_label = edge_origin.iloc[idx_origin_test]['label'].tolist()
idx_origin_test_train_label = edge_origin.iloc[idx_origin_test_train]['label'].tolist()
idx_origin_test_test_label = edge_origin.iloc[idx_origin_test_test]['label'].tolist()
origin_edge_test_test = edge_origin.iloc[idx_origin_test_test]
idx_features = pd.read_csv(content_path_hon, dtype=np.dtype(str))
features = sp.csr_matrix(idx_features.iloc[:, 1:], dtype=np.float32)
# idx_mapping
idx = idx_features['idx']
idx_map = {j: i for i, j in enumerate(idx)}
# 读取高阶网络的边
edge_hon = pd.read_csv(edge_path_hon)
edges_unordered = np.array(edge_hon.iloc[:, 1:3])
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
np.savetxt(path_hon_root + "edges_mapping.txt", edges, delimiter=' ', fmt='%d')
edges_with_labed =np.concatenate((edges, np.array(edge_hon.iloc[:, 3]).reshape(edges.shape[0], -1)), axis=1)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(idx_features.shape[0], idx_features.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
G = nx.Graph(edges[:, :2].tolist())
edges_with_labed = pd.DataFrame(edges_with_labed, columns=['node1', 'node2', 'label'])
edges_hon, neg_edges, idx_pos_train, idx_pos_val, idx_pos_test, idx_neg_train, idx_neg_val, idx_neg_test,\
id_pos_edge_test_train_hon, id_pos_edge_test_test_hon, idx_neg_test_train, idx_neg_test_test = \
sample_pos_neg_sets_label(G, edges_with_labed, origin_train_label, origin_val_label, origin_test_label,
idx_origin_test_train_label, idx_origin_test_test_label)
features = torch.FloatTensor(np.array(features.todense()))
# labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_pos_train = torch.LongTensor(idx_pos_train)
idx_pos_val = torch.LongTensor(idx_pos_val)
idx_pos_test = torch.LongTensor(idx_pos_test)
id_pos_edge_test_train_hon = torch.LongTensor(idx_pos_train)
id_pos_edge_test_test_hon = torch.LongTensor(id_pos_edge_test_test_hon)
idx_neg_train = torch.LongTensor(idx_neg_train)
idx_neg_val = torch.LongTensor(idx_neg_val)
idx_neg_test = torch.LongTensor(idx_neg_test)
idx_neg_test_train = torch.LongTensor(idx_neg_test_train)
idx_neg_test_test = torch.LongTensor(idx_neg_test_test)
return adj, features, edges_hon, neg_edges, idx_pos_train, idx_pos_val, idx_pos_test, idx_neg_train, idx_neg_val, idx_neg_test, \
id_pos_edge_test_train_hon, id_pos_edge_test_test_hon, idx_neg_test_train, idx_neg_test_test, origin_edge_test_test, \
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def _link_prediction(loop, node1_emb_train_list, node2_emb_train_list, node1_emb_test_list, node2_emb_test_list,
y_test_train, y_test_test, cal_edge_method='Hadamard'):
x_train = np.zeros((len(node1_emb_train_list), node1_emb_train_list.shape[1]))
x_test = np.zeros((len(node1_emb_test_list), node2_emb_test_list.shape[1]))
for i, (node1_emb, node2_emb) in enumerate(zip(node1_emb_train_list, node2_emb_train_list)):
if cal_edge_method == 'Hadamard':
x_train[i] = node1_emb * node2_emb
elif cal_edge_method == 'Average':
x_train[i] == np.add(node1_emb, node2_emb) * 0.5
for i, (node1_emb, node2_emb) in enumerate(zip(node1_emb_test_list, node2_emb_test_list)):
if cal_edge_method == 'Hadamard':
x_test[i] = node1_emb * node2_emb
elif cal_edge_method == 'Average':
x_test[i] == np.add(node1_emb, node2_emb) * 0.5
y_train = y_test_train
y_test = y_test_test
clf = LogisticRegression(class_weight='balanced', solver='liblinear', max_iter=5000)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
y_score = clf.predict_proba(x_test)[:, -1]
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_score)
eval_dict = {'auc': metrics.auc(fpr, tpr),
'pr': metrics.average_precision_score(y_test, y_score),
'recall': recall_score(y_test, y_pred, average='macro'),
'precision': precision_score(y_test, y_pred, average='macro'),
'f1': metrics.f1_score(y_test, y_pred),
'f1-micro': metrics.f1_score(y_test, y_pred, average='micro'),
'f1-macro': metrics.f1_score(y_test, y_pred, average='macro')}
if loop % 10 == 0:
print(eval_dict)
return eval_dict
def link_prediction(node1_emb_train_list, node2_emb_train_list, node1_emb_test_list, node2_emb_test_list, y_test_train, y_test_test, cal_edge_method, loop=50):
print("link_prediction by {}".format("LR"))
eval_dict = eval_dict = {'auc': 0.0, 'pr': 0.0, 'recall': 0.0, 'precision': 0.0,'f1': 0.0, 'f1-micro': 0.0, 'f1-macro': 0.0}
for i in range(loop):
tmp_dict = _link_prediction(i, node1_emb_train_list, node2_emb_train_list, node1_emb_test_list, node2_emb_test_list,
y_test_train, y_test_test, cal_edge_method)
for key in tmp_dict.keys():
eval_dict[key] += tmp_dict[key]
for key in tmp_dict.keys():
eval_dict[key] = round((1.0 * eval_dict[key]) / loop, 4)
print('average performance')
print(eval_dict)
return eval_dict
def _link_prediction_label(loop, node1_emb_train_list, node2_emb_train_list, node1_emb_test_list, node2_emb_test_list,
y_test_train, y_test_test, edge_test_test_hon_label, edge_test_test_origin_label, cal_edge_method):
ft = np.zeros((len(node1_emb_train_list) + len(node1_emb_test_list), node1_emb_train_list.shape[1]))
node1_emb_list = np.concatenate((node1_emb_train_list, node1_emb_test_list))
node2_emb_list = np.concatenate((node2_emb_train_list, node2_emb_test_list))
for i, (node1_emb, node2_emb) in enumerate(zip(node1_emb_list, node2_emb_list)):
if cal_edge_method == 'Hadamard':
ft[i] = node1_emb * node2_emb
elif cal_edge_method == 'Average':
ft[i] == np.add(node1_emb, node2_emb) * 0.5
train_size = len(node1_emb_train_list)
x_train = ft[:train_size]
y_train = y_test_train
x_test = ft[train_size:]
y_test = y_test_test
clf = LogisticRegression(class_weight='balanced', solver='liblinear', max_iter=5000)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
y_pred = y_pred.reshape(y_pred.shape[0], -1)
lenght_pos_test = len(edge_test_test_hon_label)
y_pos_pred = y_pred[:lenght_pos_test]
y_neg_pred = y_pred[lenght_pos_test:]
y_score = clf.predict_proba(x_test)[:, -1]
y_score = y_score.reshape(y_score.shape[0], -1)
y_pos_score = y_score[:lenght_pos_test]
y_neg_score = y_score[lenght_pos_test:]
edge_test_test_hon_label = edge_test_test_hon_label.reshape(y_pos_score.shape[0], -1)
y_score_label = np.concatenate((y_pos_score, edge_test_test_hon_label), axis=1)
y_score_label = | pd.DataFrame(y_score_label, columns=['score', 'label']) | pandas.DataFrame |
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from lux.vis.VisList import VisList
from lux.vis.Vis import Vis
from lux.core.frame import LuxDataFrame
from lux.executor.Executor import Executor
from lux.utils import utils
from lux.utils.date_utils import is_datetime_series
from lux.utils.utils import check_import_lux_widget, check_if_id_like
import warnings
import lux
class PandasExecutor(Executor):
"""
Given a Vis objects with complete specifications, fetch and process data using Pandas dataframe operations.
"""
def __init__(self):
self.name = "PandasExecutor"
warnings.formatwarning = lux.warning_format
def __repr__(self):
return f"<PandasExecutor>"
@staticmethod
def execute_sampling(ldf: LuxDataFrame):
# General Sampling for entire dataframe
SAMPLE_FLAG = lux.config.sampling
SAMPLE_START = lux.config.sampling_start
SAMPLE_CAP = lux.config.sampling_cap
SAMPLE_FRAC = 0.75
if SAMPLE_FLAG and len(ldf) > SAMPLE_CAP:
if ldf._sampled is None: # memoize unfiltered sample df
ldf._sampled = ldf.sample(n=SAMPLE_CAP, random_state=1)
ldf._message.add_unique(
f"Large dataframe detected: Lux is only visualizing a random sample capped at {SAMPLE_CAP} rows.",
priority=99,
)
elif SAMPLE_FLAG and len(ldf) > SAMPLE_START:
if ldf._sampled is None: # memoize unfiltered sample df
ldf._sampled = ldf.sample(frac=SAMPLE_FRAC, random_state=1)
ldf._message.add_unique(
f"Large dataframe detected: Lux is only visualizing a random sample of {len(ldf._sampled)} rows.",
priority=99,
)
else:
ldf._sampled = ldf
@staticmethod
def execute(vislist: VisList, ldf: LuxDataFrame):
"""
Given a VisList, fetch the data required to render the vis.
1) Apply filters
2) Retrieve relevant attribute
3) Perform vis-related processing (aggregation, binning)
4) return a DataFrame with relevant results
Parameters
----------
vislist: list[lux.Vis]
vis list that contains lux.Vis objects for visualization.
ldf : lux.core.frame
LuxDataFrame with specified intent.
Returns
-------
None
"""
PandasExecutor.execute_sampling(ldf)
for vis in vislist:
# The vis data starts off being original or sampled dataframe
vis._vis_data = ldf._sampled
filter_executed = PandasExecutor.execute_filter(vis)
# Select relevant data based on attribute information
attributes = set([])
for clause in vis._inferred_intent:
if clause.attribute != "Record":
attributes.add(clause.attribute)
# TODO: Add some type of cap size on Nrows ?
vis._vis_data = vis.data[list(attributes)]
if vis.mark == "bar" or vis.mark == "line":
PandasExecutor.execute_aggregate(vis, isFiltered=filter_executed)
elif vis.mark == "histogram":
PandasExecutor.execute_binning(vis)
elif vis.mark == "scatter":
HBIN_START = 5000
if lux.config.heatmap and len(ldf) > HBIN_START:
vis._postbin = True
ldf._message.add_unique(
f"Large scatterplots detected: Lux is automatically binning scatterplots to heatmaps.",
priority=98,
)
# vis._mark = "heatmap"
# PandasExecutor.execute_2D_binning(vis) # Lazy Evaluation (Early pruning based on interestingness)
@staticmethod
def execute_aggregate(vis: Vis, isFiltered=True):
"""
Aggregate data points on an axis for bar or line charts
Parameters
----------
vis: lux.Vis
lux.Vis object that represents a visualization
ldf : lux.core.frame
LuxDataFrame with specified intent.
Returns
-------
None
"""
import numpy as np
x_attr = vis.get_attr_by_channel("x")[0]
y_attr = vis.get_attr_by_channel("y")[0]
has_color = False
groupby_attr = ""
measure_attr = ""
if x_attr.aggregation is None or y_attr.aggregation is None:
return
if y_attr.aggregation != "":
groupby_attr = x_attr
measure_attr = y_attr
agg_func = y_attr.aggregation
if x_attr.aggregation != "":
groupby_attr = y_attr
measure_attr = x_attr
agg_func = x_attr.aggregation
if groupby_attr.attribute in vis.data.unique_values.keys():
attr_unique_vals = vis.data.unique_values[groupby_attr.attribute]
# checks if color is specified in the Vis
if len(vis.get_attr_by_channel("color")) == 1:
color_attr = vis.get_attr_by_channel("color")[0]
color_attr_vals = vis.data.unique_values[color_attr.attribute]
color_cardinality = len(color_attr_vals)
# NOTE: might want to have a check somewhere to not use categorical variables with greater than some number of categories as a Color variable----------------
has_color = True
else:
color_cardinality = 1
if measure_attr != "":
if measure_attr.attribute == "Record":
# need to get the index name so that we can rename the index column to "Record"
# if there is no index, default to "index"
index_name = vis.data.index.name
if index_name == None:
index_name = "index"
vis._vis_data = vis.data.reset_index()
# if color is specified, need to group by groupby_attr and color_attr
if has_color:
vis._vis_data = (
vis.data.groupby(
[groupby_attr.attribute, color_attr.attribute], dropna=False, history=False
)
.count()
.reset_index()
.rename(columns={index_name: "Record"})
)
vis._vis_data = vis.data[[groupby_attr.attribute, color_attr.attribute, "Record"]]
else:
vis._vis_data = (
vis.data.groupby(groupby_attr.attribute, dropna=False, history=False)
.count()
.reset_index()
.rename(columns={index_name: "Record"})
)
vis._vis_data = vis.data[[groupby_attr.attribute, "Record"]]
else:
# if color is specified, need to group by groupby_attr and color_attr
if has_color:
groupby_result = vis.data.groupby(
[groupby_attr.attribute, color_attr.attribute], dropna=False, history=False
)
else:
groupby_result = vis.data.groupby(
groupby_attr.attribute, dropna=False, history=False
)
groupby_result = groupby_result.agg(agg_func)
intermediate = groupby_result.reset_index()
vis._vis_data = intermediate.__finalize__(vis.data)
result_vals = list(vis.data[groupby_attr.attribute])
# create existing group by attribute combinations if color is specified
# this is needed to check what combinations of group_by_attr and color_attr values have a non-zero number of elements in them
if has_color:
res_color_combi_vals = []
result_color_vals = list(vis.data[color_attr.attribute])
for i in range(0, len(result_vals)):
res_color_combi_vals.append([result_vals[i], result_color_vals[i]])
# For filtered aggregation that have missing groupby-attribute values, set these aggregated value as 0, since no datapoints
if isFiltered or has_color and attr_unique_vals:
N_unique_vals = len(attr_unique_vals)
if len(result_vals) != N_unique_vals * color_cardinality:
columns = vis.data.columns
if has_color:
df = pd.DataFrame(
{
columns[0]: attr_unique_vals * color_cardinality,
columns[1]: pd.Series(color_attr_vals).repeat(N_unique_vals),
}
)
vis._vis_data = vis.data.merge(
df,
on=[columns[0], columns[1]],
how="right",
suffixes=["", "_right"],
)
for col in columns[2:]:
vis.data[col] = vis.data[col].fillna(0) # Triggers __setitem__
assert len(list(vis.data[groupby_attr.attribute])) == N_unique_vals * len(
color_attr_vals
), f"Aggregated data missing values compared to original range of values of `{groupby_attr.attribute, color_attr.attribute}`."
# Keep only the three relevant columns not the *_right columns resulting from merge
vis._vis_data = vis.data.iloc[:, :3]
else:
df = pd.DataFrame({columns[0]: attr_unique_vals})
vis._vis_data = vis.data.merge(
df, on=columns[0], how="right", suffixes=["", "_right"]
)
for col in columns[1:]:
vis.data[col] = vis.data[col].fillna(0)
assert (
len(list(vis.data[groupby_attr.attribute])) == N_unique_vals
), f"Aggregated data missing values compared to original range of values of `{groupby_attr.attribute}`."
vis._vis_data = vis._vis_data.dropna(subset=[measure_attr.attribute])
try:
vis._vis_data = vis._vis_data.sort_values(by=groupby_attr.attribute, ascending=True)
except TypeError:
warnings.warn(
f"\nLux detects that the attribute '{groupby_attr.attribute}' maybe contain mixed type."
+ f"\nTo visualize this attribute, you may want to convert the '{groupby_attr.attribute}' into a uniform type as follows:"
+ f"\n\tdf['{groupby_attr.attribute}'] = df['{groupby_attr.attribute}'].astype(str)"
)
vis._vis_data[groupby_attr.attribute] = vis._vis_data[groupby_attr.attribute].astype(str)
vis._vis_data = vis._vis_data.sort_values(by=groupby_attr.attribute, ascending=True)
vis._vis_data = vis._vis_data.reset_index()
vis._vis_data = vis._vis_data.drop(columns="index")
@staticmethod
def execute_binning(vis: Vis):
"""
Binning of data points for generating histograms
Parameters
----------
vis: lux.Vis
lux.Vis object that represents a visualization
ldf : lux.core.frame
LuxDataFrame with specified intent.
Returns
-------
None
"""
import numpy as np
bin_attribute = list(filter(lambda x: x.bin_size != 0, vis._inferred_intent))[0]
bin_attr = bin_attribute.attribute
if not np.isnan(vis.data[bin_attr]).all():
# np.histogram breaks if array contain NaN
series = vis.data[bin_attr].dropna()
# TODO:binning runs for name attribte. Name attribute has datatype quantitative which is wrong.
counts, bin_edges = np.histogram(series, bins=bin_attribute.bin_size)
# bin_edges of size N+1, so need to compute bin_center as the bin location
bin_center = np.mean(np.vstack([bin_edges[0:-1], bin_edges[1:]]), axis=0)
# TODO: Should vis.data be a LuxDataFrame or a Pandas DataFrame?
binned_result = np.array([bin_center, counts]).T
vis._vis_data = pd.DataFrame(binned_result, columns=[bin_attr, "Number of Records"])
@staticmethod
def execute_filter(vis: Vis):
assert (
vis.data is not None
), "execute_filter assumes input vis.data is populated (if not, populate with LuxDataFrame values)"
filters = utils.get_filter_specs(vis._inferred_intent)
if filters:
# TODO: Need to handle OR logic
for filter in filters:
vis._vis_data = PandasExecutor.apply_filter(
vis.data, filter.attribute, filter.filter_op, filter.value
)
return True
else:
return False
@staticmethod
def apply_filter(df: pd.DataFrame, attribute: str, op: str, val: object) -> pd.DataFrame:
"""
Helper function for applying filter to a dataframe
Parameters
----------
df : pandas.DataFrame
Dataframe to filter on
attribute : str
Filter attribute
op : str
Filter operation, '=', '<', '>', '<=', '>=', '!='
val : object
Filter value
Returns
-------
df: pandas.DataFrame
Dataframe resulting from the filter operation
"""
# Handling NaN filter values
if utils.like_nan(val):
if op != "=" and op != "!=":
warnings.warn("Filter on NaN must be used with equality operations (i.e., `=` or `!=`)")
else:
if op == "=":
return df[df[attribute].isna()]
elif op == "!=":
return df[~df[attribute].isna()]
# Applying filter in regular, non-NaN cases
if op == "=":
return df[df[attribute] == val]
elif op == "<":
return df[df[attribute] < val]
elif op == ">":
return df[df[attribute] > val]
elif op == "<=":
return df[df[attribute] <= val]
elif op == ">=":
return df[df[attribute] >= val]
elif op == "!=":
return df[df[attribute] != val]
return df
@staticmethod
def execute_2D_binning(vis: Vis):
pd.reset_option("mode.chained_assignment")
with pd.option_context("mode.chained_assignment", None):
x_attr = vis.get_attr_by_channel("x")[0].attribute
y_attr = vis.get_attr_by_channel("y")[0].attribute
vis._vis_data["xBin"] = pd.cut(vis._vis_data[x_attr], bins=lux.config.heatmap_bin_size)
vis._vis_data["yBin"] = pd.cut(vis._vis_data[y_attr], bins=lux.config.heatmap_bin_size)
color_attr = vis.get_attr_by_channel("color")
if len(color_attr) > 0:
color_attr = color_attr[0]
groups = vis._vis_data.groupby(["xBin", "yBin"], history=False)[color_attr.attribute]
if color_attr.data_type == "nominal":
# Compute mode and count. Mode aggregates each cell by taking the majority vote for the category variable. In cases where there is ties across categories, pick the first item (.iat[0])
result = groups.agg(
[
("count", "count"),
(color_attr.attribute, lambda x: pd.Series.mode(x).iat[0]),
]
).reset_index()
elif color_attr.data_type == "quantitative":
# Compute the average of all values in the bin
result = groups.agg(
[("count", "count"), (color_attr.attribute, "mean")]
).reset_index()
result = result.dropna()
else:
groups = vis._vis_data.groupby(["xBin", "yBin"], history=False)[x_attr]
result = groups.count().reset_index(name=x_attr)
result = result.rename(columns={x_attr: "count"})
result = result[result["count"] != 0]
# convert type to facilitate weighted correlation interestingess calculation
result["xBinStart"] = result["xBin"].apply(lambda x: x.left).astype("float")
result["xBinEnd"] = result["xBin"].apply(lambda x: x.right)
result["yBinStart"] = result["yBin"].apply(lambda x: x.left).astype("float")
result["yBinEnd"] = result["yBin"].apply(lambda x: x.right)
vis._vis_data = result.drop(columns=["xBin", "yBin"])
#######################################################
############ Metadata: data type, model #############
#######################################################
def compute_dataset_metadata(self, ldf: LuxDataFrame):
ldf._data_type = {}
self.compute_data_type(ldf)
def compute_data_type(self, ldf: LuxDataFrame):
from pandas.api.types import is_datetime64_any_dtype as is_datetime
for attr in list(ldf.columns):
if attr in ldf._type_override:
ldf._data_type[attr] = ldf._type_override[attr]
else:
temporal_var_list = ["month", "year", "day", "date", "time", "weekday"]
if is_datetime(ldf[attr]):
ldf._data_type[attr] = "temporal"
elif self._is_datetime_string(ldf[attr]):
ldf._data_type[attr] = "temporal"
elif isinstance(attr, pd._libs.tslibs.timestamps.Timestamp):
ldf._data_type[attr] = "temporal"
elif str(attr).lower() in temporal_var_list:
ldf._data_type[attr] = "temporal"
elif self._is_datetime_number(ldf[attr]):
ldf._data_type[attr] = "temporal"
elif pd.api.types.is_float_dtype(ldf.dtypes[attr]):
# int columns gets coerced into floats if contain NaN
convertible2int = pd.api.types.is_integer_dtype(ldf[attr].convert_dtypes())
if (
convertible2int
and ldf.cardinality[attr] != len(ldf)
and ldf.cardinality[attr] < 20
):
ldf._data_type[attr] = "nominal"
else:
ldf._data_type[attr] = "quantitative"
elif pd.api.types.is_integer_dtype(ldf.dtypes[attr]):
# See if integer value is quantitative or nominal by checking if the ratio of cardinality/data size is less than 0.4 and if there are less than 10 unique values
if ldf.pre_aggregated:
if ldf.cardinality[attr] == len(ldf):
ldf._data_type[attr] = "nominal"
if ldf.cardinality[attr] / len(ldf) < 0.4 and ldf.cardinality[attr] < 20:
ldf._data_type[attr] = "nominal"
else:
ldf._data_type[attr] = "quantitative"
if check_if_id_like(ldf, attr):
ldf._data_type[attr] = "id"
# Eliminate this clause because a single NaN value can cause the dtype to be object
elif pd.api.types.is_string_dtype(ldf.dtypes[attr]):
if check_if_id_like(ldf, attr):
ldf._data_type[attr] = "id"
else:
ldf._data_type[attr] = "nominal"
# check if attribute is any type of datetime dtype
elif is_datetime_series(ldf.dtypes[attr]):
ldf._data_type[attr] = "temporal"
else:
ldf._data_type[attr] = "nominal"
if not | pd.api.types.is_integer_dtype(ldf.index) | pandas.api.types.is_integer_dtype |
# -*- coding: utf-8 -*-
"""
Tests that apply specifically to the Python parser. Unless specifically
stated as a Python-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
import csv
import sys
import pytest
import pandas.compat as compat
from pandas.compat import BytesIO, StringIO, u
from pandas.errors import ParserError
from pandas import DataFrame, Index
import pandas.util.testing as tm
class PythonParserTests(object):
def test_default_separator(self):
# GH17333
# csv.Sniffer in Python treats 'o' as separator.
text = 'aob\n1o2\n3o4'
expected = DataFrame({'a': [1, 3], 'b': [2, 4]})
result = self.read_csv(StringIO(text), sep=None)
tm.assert_frame_equal(result, expected)
def test_invalid_skipfooter(self):
text = "a\n1\n2"
# see gh-15925 (comment)
msg = "skipfooter must be an integer"
with pytest.raises(ValueError, match=msg):
self.read_csv(StringIO(text), skipfooter="foo")
with pytest.raises(ValueError, match=msg):
self.read_csv(StringIO(text), skipfooter=1.5)
with pytest.raises(ValueError, match=msg):
self.read_csv(StringIO(text), skipfooter=True)
msg = "skipfooter cannot be negative"
with pytest.raises(ValueError, match=msg):
self.read_csv(StringIO(text), skipfooter=-1)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
tm.assert_index_equal(data.index,
Index(['foo', 'bar', 'baz'], name='index'))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_BytesIO_input(self):
if not compat.PY3:
pytest.skip(
"Bytes-related test - only needs to work on Python 3")
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = self.read_table(data, sep="::", encoding='cp1255')
expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_single_line(self):
# see gh-6607: sniff separator
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
def test_skipfooter(self):
# see gh-6607
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
import sys
import torch
import torch.nn
import torch.autograd as autograd
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook, tqdm
import torch
import torch.nn as nn
from icecream import ic
import warnings
warnings.filterwarnings('ignore')
dtype = torch.FloatTensor
long_dtype = torch.LongTensor
torch.set_default_tensor_type('torch.FloatTensor')
import torch.nn.functional as F
from icecream import ic
import sys
from filenames import cdataset as data_directories
from tqdm import tqdm
NUMCHAN=8
perturb_y_data = False
TOP = 9
def gen_dens(y, bins=int(5.0/0.5*10)):
dens, bins = np.histogram(y[:, 0].ravel(), bins=bins, density=True, range=[4, TOP])
dens[dens == 0.] = np.min(dens[dens > 0.])
plt.clf()
inv_dens = 1.0/dens
inv_dens /= np.average(inv_dens)
bins = bins
return inv_dens, bins
def load_data_normalized(debug=False, downsample=10,
dataset='resonant'):
base = './data/summary_features/'
labels = []
mass_ratios = []
time_series = []
# features = []
if dataset == 'resonant':
from filenames import cdataset as data_directories
elif dataset == 'random':
from filenames import cdataset_rand as data_directories
elif dataset == 'combined':
from filenames import cdataset_rand, cdataset
data_directories = cdataset + cdataset_rand
else:
raise NotImplementedError
from icecream import ic
total_num = []
for dataset in tqdm(data_directories):
dataset_base = base + dataset + '/get_extended_tseriesNorbits10000.0Nout1000trio/'
# features_base = base + dataset + '/resparamsv5Norbits10000.0Nout1000window10/'
try:
time_series_tmp = np.load(dataset_base + 'trainingdata.npy', allow_pickle=True)[:, ::downsample]
assert time_series_tmp.shape[1] == 100
labels_tmp = pd.read_csv(dataset_base + 'labels.csv')
mass_ratios_tmp = pd.read_csv(dataset_base + 'massratios.csv')
# features_tmp = pd.read_csv(features_base + 'trainingdata.csv')
except (FileNotFoundError, IndexError):
print('Skipping', dataset)
continue
time_series.append(time_series_tmp)
labels.append(labels_tmp)
mass_ratios.append(mass_ratios_tmp)
# features.append(features_tmp)
total_num.append(len(labels_tmp))
ic(total_num[-1])
if dataset[:4] == 'only':
labels[-1]['instability_time'] = 1e9
labels[-1]['shadow_instability_time'] = 1e9
if debug:
break
time_series = np.concatenate(time_series)
mass_ratios = pd.concat(mass_ratios)
labels = | pd.concat(labels) | pandas.concat |
from urllib.request import urlretrieve
from pathlib import Path
from tqdm import tqdm
import pandas as pd
states = [
"AL",
"AK",
"AZ",
"AR",
"CA",
"CO",
"CT",
"DE",
"FL",
"GA",
"HI",
"ID",
"IL",
"IN",
"IA",
"KS",
"KY",
"LA",
"ME",
"MD",
"MA",
"MI",
"MN",
"MS",
"MO",
"MT",
"NE",
"NV",
"NH",
"NJ",
"NM",
"NY",
"NC",
"ND",
"OH",
"OK",
"OR",
"PA",
"RI",
"SC",
"SD",
"TN",
"TX",
"UT",
"VT",
"VA",
"WA",
"WV",
"WI",
"WY"
]
base_geo_top_five_industries = "https://data.epa.gov/efservice/v_tri_state_industries/st/{}/data_set/INFO/year/2017/CSV"
base_disposal_releases = "https://data.epa.gov/efservice/v_tri_state_onsite_releases/st/{}/data_set/INFO/year/%3E/2004/CSV"
base_top_five_chems = 'https://data.epa.gov/efservice/v_tri_state_chemicals/st/{}/data_set/INFO/year/2017/CSV'
base_facility_list = 'https://data.epa.gov/efservice/MV_TRI_BASIC_DOWNLOAD/st/{}/year/2017/CSV'
base_path = Path("TRI/")
base_path.mkdir(exist_ok=True)
top_five_industries = pd.DataFrame()
disposal_releases = pd.DataFrame()
top_five_chems = pd.DataFrame()
facilities = pd.DataFrame()
for state in tqdm(states):
state_path = (base_path / state)
state_path.mkdir(exist_ok=True)
urlretrieve(base_geo_top_five_industries.format(state), state_path / 'top_five_industries.csv' )
urlretrieve(base_disposal_releases.format(state), state_path / 'disposal.csv' )
urlretrieve(base_top_five_chems.format(state), state_path / 'top_five_chems.csv' )
urlretrieve(base_facility_list.format(state), state_path / 'facilities.csv' )
top_five_industries = top_five_industries.append(pd.read_csv(state_path / 'top_five_industries.csv'))
disposal_releases = top_five_industries.append(pd.read_csv(state_path / 'disposal.csv'))
top_five_chems = top_five_chems.append( | pd.read_csv(state_path / 'top_five_chems.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 14:29:57 2020
@author: Shane
"""
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import scipy
import scipy.stats
import operator
from operator import truediv
import glob
import statsmodels.stats.api as sms
#import matplotlib for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import seaborn as sns
import math
from math import sqrt
from scipy.spatial import distance
#import os to handle operating system
import os
#=============================================================================
#Goal: Import appended datasets to generate summary plots.
#==============================================================================
#setup the data directory
datadir = "D:\\Goode_Lab\\Projects\\actin_cables\\data\\cable_trajectory_data\\"
#initalize data frame to append all data
df_t0 = pd.DataFrame()
df_t8 = pd.DataFrame()
all_df = pd.DataFrame()
#read in the summary data files to compare t0 to t8 cells
df_t0 = pd.read_csv(datadir + \
"200826_t0_all_cable_extension_rate_analysis_cutoff.csv")
df_t8 = pd.read_csv(datadir + \
"200826_t8_all_cable_extension_rate_analysis_cutoff.csv")
#combine data into a single dataframe for some of the plotting/stats
frames = [df_t0, df_t8]
all_df = pd.concat(frames)
#=============================================================================
#calculate means and std deviation for each time point
df_t0_t_mean = pd.DataFrame()
df_t0_t_mean = df_t0.groupby(['lifetime']).mean().reset_index()
df_t0_t_std = | pd.DataFrame() | pandas.DataFrame |
import argparse
import time
import os
import pandas as pd
from pytrends.request import TrendReq
class GoogleTrends(object):
def __init__(self, DATA_PATH, SAVING_PATH, GEO, TIMEFRAME, PYTREND):
self.data_path = DATA_PATH
self.saving_path = SAVING_PATH
self.geo = GEO
self.timeframe = TIMEFRAME
self.pytrend = PYTREND
def get_ticker_list(self, TICKER_PATH):
ticker = | pd.read_excel(TICKER_PATH) | pandas.read_excel |
import datetime as dt
from functools import partial
from io import BytesIO, StringIO
from fastapi import HTTPException
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather
import pytest
from solarperformanceinsight_api import utils, models
httpfail = partial(
pytest.param, marks=pytest.mark.xfail(strict=True, raises=HTTPException)
)
@pytest.mark.parametrize(
"inp,typ,exp",
(
(
"time,datas\n2020-01-01T00:00Z,8.9",
StringIO,
pd.DataFrame({"time": [pd.Timestamp("2020-01-01T00:00Z")], "datas": [8.9]}),
),
(
b"time,datas\n2020-01-01T00:00Z,8.9",
BytesIO,
pd.DataFrame({"time": [pd.Timestamp("2020-01-01T00:00Z")], "datas": [8.9]}),
),
(
b"time,datas\n2020-01-01T00:00,8.9\n2020-01-02T00:00,-999",
BytesIO,
pd.DataFrame(
{
"time": [
pd.Timestamp("2020-01-01T00:00"),
pd.Timestamp("2020-01-02T00:00"),
],
"datas": [8.9, None],
}
),
),
# not valid later, but rely on dataframe validation to check dtypes
(
b"multi,header\ntime,datas\n2020-01-01T00:00,8.9\n2020-01-02T00:00,-999",
BytesIO,
pd.DataFrame(
{
"multi": ["time", "2020-01-01T00:00", "2020-01-02T00:00"],
"header": ["datas", "8.9", np.nan],
}
),
),
# no header row
httpfail(
b"2020-01-01T00:00,8.9\n2020-01-02T00:00,-999",
BytesIO,
None,
),
httpfail(
"",
StringIO,
None,
),
httpfail(
"empty",
StringIO,
None,
),
httpfail(
"notenoughheaders,\na,b",
StringIO,
None,
),
httpfail(
"a,b\n0,1,2\n0,1,3,4,5,6",
StringIO,
None,
),
),
)
def test_read_csv(inp, typ, exp):
out = utils.read_csv(typ(inp))
pd.testing.assert_frame_equal(out, exp)
@pytest.mark.parametrize(
"tbl,exp",
(
(
pa.Table.from_arrays([[1.0, 2, 3], [4.0, 5, 6]], ["a", "b"]),
pd.DataFrame({"a": [1, 2, 3.0], "b": [4, 5, 6.0]}),
),
# complex types to test to_pandas
(
pa.Table.from_arrays(
[pa.array([1.0, 2, 3]), pa.array([[], [5, 6], [7, 8]])], ["a", "b"]
),
pd.DataFrame({"a": [1, 2, 3.0], "b": [[], [5, 6], [7, 8]]}),
),
httpfail(
b"notanarrowfile",
None,
),
),
)
def test_read_arrow(tbl, exp):
if isinstance(tbl, bytes):
tblbytes = BytesIO(tbl)
else:
tblbytes = BytesIO(utils.dump_arrow_bytes(tbl))
out = utils.read_arrow(tblbytes)
pd.testing.assert_frame_equal(out, exp)
@pytest.mark.parametrize(
"inp,exp",
(
("text/csv", utils.read_csv),
("application/vnd.ms-excel", utils.read_csv),
("application/vnd.apache.arrow.file", utils.read_arrow),
("application/octet-stream", utils.read_arrow),
httpfail("application/json", None),
),
)
def test_verify_content_type(inp, exp):
out = utils.verify_content_type(inp)
assert out == exp
@pytest.mark.parametrize(
"inp,cols,exp",
(
(pd.DataFrame({"a": [0, 1], "b": [1, 2]}), ["a", "b"], set()),
(
pd.DataFrame(
{"time": [pd.Timestamp("2020-01-01")], "b": [0.8], "c": ["notnumeric"]}
),
["time", "b"],
{"c"},
),
httpfail(
pd.DataFrame({"time": [pd.Timestamp("2020-01-01")], "b": ["willfail"]}),
["time", "b"],
set(),
),
httpfail(pd.DataFrame({"a": [0, 1], "b": [1, 2]}), ["c"], {"a", "b"}),
httpfail(pd.DataFrame({"time": [0, 1], "b": [1, 2]}), ["time", "b"], set()),
(
pd.DataFrame(
{
"time": [
pd.Timestamp.now(),
pd.Timestamp("2020-01-01T00:00:01.09230"),
],
"b": [1, 2],
}
),
["time", "b"],
set(),
),
httpfail(
pd.DataFrame(
{
"time": [pd.Timestamp("2020-01-01"), | pd.Timestamp("2020-01-01") | pandas.Timestamp |
""" Configuration file and options
A number of globals are defined here to be available everywhere.
"""
import logging
import os
import shutil
import sys
import glob
import json
from collections import OrderedDict
from multiprocessing import Manager
from distutils.util import strtobool
import numpy as np
import pandas as pd
from scipy.signal import gaussian
from configobj import ConfigObj, ConfigObjError
try:
import geopandas as gpd
except ImportError:
pass
try:
import salem
except ImportError:
pass
from oggm.exceptions import InvalidParamsError
# Local logger
log = logging.getLogger(__name__)
# Path to the cache directory
CACHE_DIR = os.path.join(os.path.expanduser('~'), '.oggm')
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
# Path to the config file
CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.oggm_config')
# config was changed, indicates that multiprocessing needs a reset
CONFIG_MODIFIED = False
# Share state accross processes
DL_VERIFIED = Manager().dict()
# Machine epsilon
FLOAT_EPS = np.finfo(float).eps
class DocumentedDict(dict):
"""Quick "magic" to document the BASENAMES entries."""
def __init__(self):
self._doc = dict()
def _set_key(self, key, value, docstr=''):
if key in self:
raise ValueError('Cannot overwrite a key.')
dict.__setitem__(self, key, value)
self._doc[key] = docstr
def __setitem__(self, key, value):
# Overrides the original dic to separate value and documentation
global CONFIG_MODIFIED
try:
self._set_key(key, value[0], docstr=value[1])
CONFIG_MODIFIED = True
except BaseException:
raise ValueError('DocumentedDict accepts only tuple of len 2')
def info_str(self, key):
"""Info string for the documentation."""
return ' {}'.format(self[key]) + '\n' + ' ' + self._doc[key]
def doc_str(self, key):
"""Info string for the documentation."""
return ' {}'.format(self[key]) + '\n' + ' ' + \
self._doc[key]
class ResettingOrderedDict(OrderedDict):
"""OrderedDict wrapper that resets our multiprocessing on set"""
def __setitem__(self, key, value):
global CONFIG_MODIFIED
OrderedDict.__setitem__(self, key, value)
CONFIG_MODIFIED = True
class PathOrderedDict(ResettingOrderedDict):
"""Quick "magic" to be sure that paths are expanded correctly."""
def __setitem__(self, key, value):
# Overrides the original dic to expand the path
try:
value = os.path.expanduser(value)
except AttributeError:
raise InvalidParamsError('The value you are trying to set does '
'not seem to be a valid path: '
'{}'.format(value))
ResettingOrderedDict.__setitem__(self, key, value)
class ParamsLoggingDict(ResettingOrderedDict):
"""Quick "magic" to log the parameter changes by the user."""
do_log = False
def __setitem__(self, key, value):
# Overrides the original dic to log the change
if self.do_log:
self._log_param_change(key, value)
ResettingOrderedDict.__setitem__(self, key, value)
def _log_param_change(self, key, value):
prev = self.get(key)
if prev is None:
if key in ['baseline_y0', 'baseline_y1']:
raise InvalidParamsError('The `baseline_y0` and `baseline_y1` '
'parameters have been removed. '
'You now have to set them explicitly '
'in your call to '
'`process_climate_data`.')
log.warning('WARNING: adding an unknown parameter '
'`{}`:`{}` to PARAMS.'.format(key, value))
return
if prev == value:
return
if key == 'use_multiprocessing':
msg = 'ON' if value else 'OFF'
log.workflow('Multiprocessing switched {} '.format(msg) +
'after user settings.')
return
if key == 'mp_processes':
if value == -1:
import multiprocessing
value = multiprocessing.cpu_count()
log.workflow('Multiprocessing: using all available '
'processors (N={})'.format(value))
else:
log.workflow('Multiprocessing: using the requested number of '
'processors (N={})'.format(value))
return
log.workflow("PARAMS['{}'] changed from `{}` to `{}`.".format(key,
prev,
value))
# Globals
IS_INITIALIZED = False
PARAMS = ParamsLoggingDict()
PATHS = PathOrderedDict()
BASENAMES = DocumentedDict()
LRUHANDLERS = ResettingOrderedDict()
DATA = ResettingOrderedDict()
# Constants
SEC_IN_YEAR = 365*24*3600
SEC_IN_DAY = 24*3600
SEC_IN_HOUR = 3600
SEC_IN_MONTH = 2628000
DAYS_IN_MONTH = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
G = 9.80665 # gravity
GAUSSIAN_KERNEL = dict()
for ks in [5, 7, 9]:
kernel = gaussian(ks, 1)
GAUSSIAN_KERNEL[ks] = kernel / kernel.sum()
_doc = ('A geotiff file containing the DEM (reprojected into the local grid).'
'This DEM is not smoothed or gap filles, and is the closest to the '
'original DEM source.')
BASENAMES['dem'] = ('dem.tif', _doc)
_doc = ('A glacier mask geotiff file with the same extend and projection as '
'the `dem.tif`. This geotiff has value 1 at glaciated grid points and '
' value 0 at unglaciated points.')
BASENAMES['glacier_mask'] = ('glacier_mask.tif', _doc)
_doc = ('The glacier outlines in the local map projection (Transverse '
'Mercator).')
BASENAMES['outlines'] = ('outlines.shp', _doc)
_doc = ('The glacier intersects in the local map projection (Transverse '
'Mercator).')
BASENAMES['intersects'] = ('intersects.shp', _doc)
_doc = ('Each flowline has a catchment area computed from flow routing '
'algorithms: this shapefile stores the catchment outlines (in the '
'local map projection (Transverse Mercator).')
BASENAMES['flowline_catchments'] = ('flowline_catchments.shp', _doc)
_doc = ('The intersections between cathments (shapefile) in the local map '
'projection (Transverse Mercator).')
BASENAMES['catchments_intersects'] = ('catchments_intersects.shp', _doc)
_doc = 'A ``salem.Grid`` handling the georeferencing of the local grid.'
BASENAMES['glacier_grid'] = ('glacier_grid.json', _doc)
_doc = 'A dictionary containing runtime diagnostics useful for debugging.'
BASENAMES['diagnostics'] = ('diagnostics.json', _doc)
_doc = ('A netcdf file containing several gridded data variables such as '
'topography, the glacier masks, the interpolated 2D glacier bed, '
'and more.')
BASENAMES['gridded_data'] = ('gridded_data.nc', _doc)
_doc = ('A dictionary containing the shapely.Polygons of a glacier. The '
'"polygon_hr" entry contains the geometry transformed to the local '
'grid in (i, j) coordinates, while the "polygon_pix" entry contains '
'the geometries transformed into the coarse grid (the i, j elements '
'are integers). The "polygon_area" entry contains the area of the '
'polygon as computed by Shapely. The "catchment_indices" entry'
'contains a list of len `n_centerlines`, each element containing '
'a numpy array of the indices in the glacier grid which represent '
'the centerlines catchment area.')
BASENAMES['geometries'] = ('geometries.pkl', _doc)
_doc = ('A dictionary containing the downsteam line geometry as well as the '
'bed shape computed from a parabolic fit.')
BASENAMES['downstream_line'] = ('downstream_line.pkl', _doc)
_doc = 'A text file with the source of the topo file (GIMP, SRTM, ...).'
BASENAMES['dem_source'] = ('dem_source.txt', _doc)
_doc = ('A hypsometry file computed by OGGM and provided in the same format '
'as the RGI (useful for diagnostics).')
BASENAMES['hypsometry'] = ('hypsometry.csv', _doc)
_doc = 'A list of :py:class:`oggm.Centerline` instances, sorted by flow order.'
BASENAMES['centerlines'] = ('centerlines.pkl', _doc)
_doc = ('A "better" version of the Centerlines, now on a regular spacing '
'i.e., not on the gridded (i, j) indices. The tails of the '
'tributaries are cut out to make more realistic junctions. '
'They are now "1.5D" i.e., with a width.')
BASENAMES['inversion_flowlines'] = ('inversion_flowlines.pkl', _doc)
_doc = 'The historical monthly climate timeseries stored in a netCDF file.'
BASENAMES['climate_historical'] = ('climate_historical.nc', _doc)
_doc = 'Deprecated: old name for `climate_historical`.'
BASENAMES['climate_monthly'] = ('climate_monthly.nc', _doc)
_doc = ('Some information (dictionary) about the mass '
'balance parameters for this glacier.')
BASENAMES['climate_info'] = ('climate_info.json', _doc)
_doc = 'The monthly GCM climate timeseries stored in a netCDF file.'
BASENAMES['gcm_data'] = ('gcm_data.nc', _doc)
_doc = "A dict containing the glacier's t*, bias, and the flowlines' mu*"
BASENAMES['local_mustar'] = ('local_mustar.json', _doc)
_doc = 'List of dicts containing the data needed for the inversion.'
BASENAMES['inversion_input'] = ('inversion_input.pkl', _doc)
_doc = 'List of dicts containing the output data from the inversion.'
BASENAMES['inversion_output'] = ('inversion_output.pkl', _doc)
_doc = 'Dict of fs and fd as computed by the inversion optimisation.'
BASENAMES['inversion_params'] = ('inversion_params.pkl', _doc)
_doc = 'List of flowlines ready to be run by the model.'
BASENAMES['model_flowlines'] = ('model_flowlines.pkl', _doc)
_doc = ('When using a linear mass-balance for the inversion, this dict stores '
'the optimal ela_h and grad.')
BASENAMES['linear_mb_params'] = ('linear_mb_params.pkl', _doc)
_doc = ('A netcdf file containing enough information to reconstruct the '
'entire flowline glacier along the run (can be data expensive).')
BASENAMES['model_run'] = ('model_run.nc', _doc)
_doc = ('A netcdf file containing the model diagnostics (volume, '
'mass-balance, length...).')
BASENAMES['model_diagnostics'] = ('model_diagnostics.nc', _doc)
_doc = ("A dict containing the glacier's t*, bias, mu*. Analogous "
"to 'local_mustar.json', but for the volume/area scaling model.")
BASENAMES['vascaling_mustar'] = ('vascaling_mustar.json', _doc)
_doc = "A table containing the Huss&Farinotti 2012 squeezed flowlines."
BASENAMES['elevation_band_flowline'] = ('elevation_band_flowline.csv', _doc)
def set_logging_config(logging_level='INFO'):
"""Set the global logger parameters.
Logging levels:
DEBUG
Print detailed information, typically of interest only when diagnosing
problems.
INFO
Print confirmation that things are working as expected, e.g. when
each task is run correctly (this is the default).
WARNING
Indication that something unexpected happened on a glacier,
but that OGGM is still working on this glacier.
WORKFLOW
Print only high level, workflow information (typically, one message
per task). Errors and warnings will still be printed.
ERROR
Print errors only, e.g. when a glacier cannot run properly.
CRITICAL
Print nothing but fatal errors.
Parameters
----------
logging_level : str or None
the logging level. See description above for a list of options. Setting
to `None` is equivalent to `'CRITICAL'`, i.e. no log output will be
generated.
"""
# Add a custom level - just for us
logging.addLevelName(25, 'WORKFLOW')
def workflow(self, message, *args, **kws):
"""Standard log message with a custom level."""
if self.isEnabledFor(25):
# Yes, logger takes its '*args' as 'args'.
self._log(25, message, args, **kws)
logging.WORKFLOW = 25
logging.Logger.workflow = workflow
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Spammers
logging.getLogger("Fiona").setLevel(logging.CRITICAL)
logging.getLogger("fiona").setLevel(logging.CRITICAL)
logging.getLogger("shapely").setLevel(logging.CRITICAL)
logging.getLogger("rasterio").setLevel(logging.CRITICAL)
logging.getLogger("matplotlib").setLevel(logging.CRITICAL)
logging.getLogger("numexpr").setLevel(logging.CRITICAL)
# Basic config
if logging_level is None:
logging_level = 'CRITICAL'
logging_level = logging_level.upper()
logging.basicConfig(format='%(asctime)s: %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=getattr(logging, logging_level))
def initialize_minimal(file=None, logging_level='INFO'):
"""Same as initialise() but without requiring any download of data.
This is useful for "flowline only" OGGM applications
Parameters
----------
file : str
path to the configuration file (default: OGGM params.cfg)
logging_level : str
set a logging level. See :func:`set_logging_config` for options.
"""
global IS_INITIALIZED
global PARAMS
global PATHS
set_logging_config(logging_level=logging_level)
is_default = False
if file is None:
file = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'params.cfg')
is_default = True
try:
cp = ConfigObj(file, file_error=True)
except (ConfigObjError, IOError) as e:
log.critical('Config file could not be parsed (%s): %s', file, e)
sys.exit()
if is_default:
log.workflow('Reading default parameters from the OGGM `params.cfg` '
'configuration file.')
else:
log.workflow('Reading parameters from the user provided '
'configuration file: %s', file)
# Paths
oggm_static_paths()
PATHS['working_dir'] = cp['working_dir']
PATHS['dem_file'] = cp['dem_file']
PATHS['climate_file'] = cp['climate_file']
# Do not spam
PARAMS.do_log = False
# Multiprocessing pool
try:
use_mp = bool(int(os.environ['OGGM_USE_MULTIPROCESSING']))
msg = 'ON' if use_mp else 'OFF'
log.workflow('Multiprocessing switched {} '.format(msg) +
'according to the ENV variable OGGM_USE_MULTIPROCESSING')
except KeyError:
use_mp = cp.as_bool('use_multiprocessing')
msg = 'ON' if use_mp else 'OFF'
log.workflow('Multiprocessing switched {} '.format(msg) +
'according to the parameter file.')
PARAMS['use_multiprocessing'] = use_mp
# Spawn
try:
use_mp_spawn = bool(int(os.environ['OGGM_USE_MP_SPAWN']))
msg = 'ON' if use_mp_spawn else 'OFF'
log.workflow('MP spawn context switched {} '.format(msg) +
'according to the ENV variable OGGM_USE_MP_SPAWN')
except KeyError:
use_mp_spawn = cp.as_bool('use_mp_spawn')
PARAMS['use_mp_spawn'] = use_mp_spawn
# Number of processes
mpp = cp.as_int('mp_processes')
if mpp == -1:
try:
mpp = int(os.environ['SLURM_JOB_CPUS_PER_NODE'])
log.workflow('Multiprocessing: using slurm allocated '
'processors (N={})'.format(mpp))
except KeyError:
import multiprocessing
mpp = multiprocessing.cpu_count()
log.workflow('Multiprocessing: using all available '
'processors (N={})'.format(mpp))
else:
log.workflow('Multiprocessing: using the requested number of '
'processors (N={})'.format(mpp))
PARAMS['mp_processes'] = mpp
# Size of LRU cache
try:
lru_maxsize = int(os.environ['LRU_MAXSIZE'])
log.workflow('Size of LRU cache set to {} '.format(lru_maxsize) +
'according to the ENV variable LRU_MAXSIZE')
except KeyError:
lru_maxsize = cp.as_int('lru_maxsize')
PARAMS['lru_maxsize'] = lru_maxsize
# Some non-trivial params
PARAMS['continue_on_error'] = cp.as_bool('continue_on_error')
PARAMS['grid_dx_method'] = cp['grid_dx_method']
PARAMS['topo_interp'] = cp['topo_interp']
PARAMS['use_intersects'] = cp.as_bool('use_intersects')
PARAMS['use_compression'] = cp.as_bool('use_compression')
PARAMS['border'] = cp.as_int('border')
PARAMS['mpi_recv_buf_size'] = cp.as_int('mpi_recv_buf_size')
PARAMS['use_multiple_flowlines'] = cp.as_bool('use_multiple_flowlines')
PARAMS['filter_min_slope'] = cp.as_bool('filter_min_slope')
PARAMS['auto_skip_task'] = cp.as_bool('auto_skip_task')
PARAMS['correct_for_neg_flux'] = cp.as_bool('correct_for_neg_flux')
PARAMS['filter_for_neg_flux'] = cp.as_bool('filter_for_neg_flux')
PARAMS['run_mb_calibration'] = cp.as_bool('run_mb_calibration')
PARAMS['rgi_version'] = cp['rgi_version']
PARAMS['use_rgi_area'] = cp.as_bool('use_rgi_area')
PARAMS['compress_climate_netcdf'] = cp.as_bool('compress_climate_netcdf')
PARAMS['use_tar_shapefiles'] = cp.as_bool('use_tar_shapefiles')
PARAMS['clip_mu_star'] = cp.as_bool('clip_mu_star')
PARAMS['clip_tidewater_border'] = cp.as_bool('clip_tidewater_border')
PARAMS['dl_verify'] = cp.as_bool('dl_verify')
PARAMS['calving_line_extension'] = cp.as_int('calving_line_extension')
k = 'use_kcalving_for_inversion'
PARAMS[k] = cp.as_bool(k)
PARAMS['use_kcalving_for_run'] = cp.as_bool('use_kcalving_for_run')
PARAMS['calving_use_limiter'] = cp.as_bool('calving_use_limiter')
k = 'error_when_glacier_reaches_boundaries'
PARAMS[k] = cp.as_bool(k)
# Climate
PARAMS['baseline_climate'] = cp['baseline_climate'].strip().upper()
PARAMS['hydro_month_nh'] = cp.as_int('hydro_month_nh')
PARAMS['hydro_month_sh'] = cp.as_int('hydro_month_sh')
PARAMS['climate_qc_months'] = cp.as_int('climate_qc_months')
PARAMS['temp_use_local_gradient'] = cp.as_bool('temp_use_local_gradient')
PARAMS['tstar_search_glacierwide'] = cp.as_bool('tstar_search_glacierwide')
k = 'temp_local_gradient_bounds'
PARAMS[k] = [float(vk) for vk in cp.as_list(k)]
k = 'tstar_search_window'
PARAMS[k] = [int(vk) for vk in cp.as_list(k)]
PARAMS['use_bias_for_run'] = cp.as_bool('use_bias_for_run')
k = 'free_board_marine_terminating'
PARAMS[k] = [float(vk) for vk in cp.as_list(k)]
# Inversion
k = 'use_shape_factor_for_inversion'
PARAMS[k] = cp[k]
# Flowline model
k = 'use_shape_factor_for_fluxbasedmodel'
PARAMS[k] = cp[k]
# Delete non-floats
ltr = ['working_dir', 'dem_file', 'climate_file', 'use_tar_shapefiles',
'grid_dx_method', 'run_mb_calibration', 'compress_climate_netcdf',
'mp_processes', 'use_multiprocessing', 'climate_qc_months',
'temp_use_local_gradient', 'temp_local_gradient_bounds',
'topo_interp', 'use_compression', 'bed_shape', 'continue_on_error',
'use_multiple_flowlines', 'tstar_search_glacierwide', 'border',
'mpi_recv_buf_size', 'hydro_month_nh', 'clip_mu_star',
'tstar_search_window', 'use_bias_for_run', 'hydro_month_sh',
'use_intersects', 'filter_min_slope', 'clip_tidewater_border',
'auto_skip_task', 'correct_for_neg_flux', 'filter_for_neg_flux',
'rgi_version', 'dl_verify', 'use_mp_spawn', 'calving_use_limiter',
'use_shape_factor_for_inversion', 'use_rgi_area',
'use_shape_factor_for_fluxbasedmodel', 'baseline_climate',
'calving_line_extension', 'use_kcalving_for_run', 'lru_maxsize',
'free_board_marine_terminating', 'use_kcalving_for_inversion',
'error_when_glacier_reaches_boundaries']
for k in ltr:
cp.pop(k, None)
# Other params are floats
for k in cp:
PARAMS[k] = cp.as_float(k)
PARAMS.do_log = True
# Empty defaults
set_intersects_db()
IS_INITIALIZED = True
def initialize(file=None, logging_level='INFO'):
"""Read the configuration file containing the run's parameters.
This should be the first call, before using any of the other OGGM modules
for most (all?) OGGM simulations.
Parameters
----------
file : str
path to the configuration file (default: OGGM params.cfg)
logging_level : str
set a logging level. See :func:`set_logging_config` for options.
"""
global PARAMS
global DATA
initialize_minimal(file=file, logging_level=logging_level)
# Do not spam
PARAMS.do_log = False
# Make sure we have a proper cache dir
from oggm.utils import download_oggm_files, get_demo_file
download_oggm_files()
# Read-in the reference t* data for all available models types (oggm, vas)
model_prefixes = ['oggm_', 'vas_']
for prefix in model_prefixes:
fns = ['ref_tstars_rgi5_cru4', 'ref_tstars_rgi6_cru4',
'ref_tstars_rgi5_histalp', 'ref_tstars_rgi6_histalp']
for fn in fns:
fpath = get_demo_file(prefix + fn + '.csv')
PARAMS[prefix + fn] = | pd.read_csv(fpath) | pandas.read_csv |
"""
Code snippet for producing CCN Mind Matching session 2019.
We create affinity matrix of people-people using topic modeling
then solve linear programming problem and apply networkx to solve the schedule problem
the given data includes the following columns
- RegistrantID
- NameFirst, first name of the attendee
- NameLast, last name of the attendee
- Affiliation
- Email
- mindMatchPersons, list of people attendee wants to meet (not used)
- RepresentativeWork
- mindMatchExclude
"""
import itertools
import numpy as np
import pandas as pd
import random
import networkx as nx
from itertools import chain
from fuzzywuzzy import fuzz
from paper_reviewer_matcher import (
preprocess, compute_affinity,
create_lp_matrix, linprog,
create_assignment
)
from docx import Document
def build_line_graph(people):
"""
Edge coloring and Vizing's theorem solution
can be found from Stack Overflow question below
ref: https://stackoverflow.com/questions/51758406/creating-time-schedule-from-list-of-people-and-who-they-have-to-meet
"""
G = nx.Graph()
G.add_edges_from(((p, q) for p, L in people for q in L))
return nx.line_graph(G)
def color_graph(G):
return nx.greedy_color(G)
def format_answer(coloring):
res = {}
N = max(coloring.values()) + 1
for meeting in coloring:
time_slot = coloring[meeting]
for meeting_member in (0, 1):
if meeting[meeting_member] not in res:
res[meeting[meeting_member]] = [None] * N
res[meeting[meeting_member]][time_slot] = meeting[1-meeting_member]
return res
def nest_answer(people, formatted):
return [[p, formatted[p]] for p, v in people]
def split_exclude_string(people):
"""
Function to split a given text of persons' name who wants to exclude
with comma separated for each name e.g. ``<NAME>``
"""
people = people.replace('Mentor: ', '').replace('Lab-mates: ', '').replace('\r\n', ',').replace(';', ',')
people_list = people.split(',')
return [p.strip() for p in people_list if p.strip() is not '']
def create_coi_dataframe(df, people_maps, threshold=85, coreferred=True):
"""
For a given dataframe of for mind-match people with
``full_name``, ``mindMatchExcludeList`` column, and
a dictionary that map ``full_name`` to person_id,
create conflict of interest dataframe
Parameters
==========
df: dataframe, original mind matching dataset
people_maps: list, list dictionary that map person id to their person_id, full_name, and affiliation
threshold: int, fuzzy string match ratio for matching name in ``mindMatchExcludeList`` and ``full_name``
coreferred: bool, if True, add extra conflict of interest for people who mentioned the same person
Output
======
coi_df: dataframe, conflict of interest
"""
coi_list = []
for i, r in df.iterrows():
if len(r['mindMatchExcludeList']) > 0:
exclude_list = []
for exclude in r['mindMatchExcludeList']:
exclude_list.extend([
p['person_id'] for p in people_maps if
exclude in p['full_name'] or
fuzz.ratio(p['full_name'], exclude) >= threshold or
fuzz.ratio(p['affiliation'], exclude) >= threshold
])
exclude_list = sorted(pd.unique(exclude_list))
if len(exclude_list) > 0:
for e in exclude_list:
coi_list.append([i, e])
coi_df = pd.DataFrame(coi_list, columns=['person_id', 'person_id_exclude'])
# add extra co-referred COI for people who refers the same person
if coreferred:
coi_coreferred = [[g, list(g_df.person_id)] for g, g_df in coi_df.groupby(['person_id_exclude'])
if len(list(g_df.person_id)) >= 2]
coi_coreferred_list = []
for _, exclude_list in coi_coreferred:
coi_coreferred_list.extend(list(itertools.combinations(exclude_list, 2)))
coi_coreferred_df = pd.DataFrame(coi_coreferred_list, columns=['person_id', 'person_id_exclude'])
coi_df = | pd.concat((coi_df, coi_coreferred_df)) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = [[6.1101, 12.5920], [5.5277, 9.1302], [8.5186, 17.6620], [6.5186, 11.6620], [7.5186, 15.6620]]
df = | pd.DataFrame(data, columns=['X', 'Y']) | pandas.DataFrame |
from unittest import TestCase
from unittest.mock import patch
import pandas as pd
from fireant import DataSet, DataType, Field
from fireant.tests.dataset.mocks import mock_dataset, politicians_table, test_database
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
class DimensionsLatestQueryBuilderTests(TestCase):
maxDiff = None
def test_query_single_dimension(self):
query = mock_dataset.latest(mock_dataset.fields.timestamp).sql[0]
self.assertEqual('SELECT ' 'MAX("timestamp") "$timestamp" ' 'FROM "politics"."politician"', str(query))
def test_query_single_dimension_with_join(self):
query = mock_dataset.latest(mock_dataset.fields.join_timestamp).sql[0]
self.assertEqual(
'SELECT '
'MAX("voter"."timestamp") "$join_timestamp" '
'FROM "politics"."politician" '
'JOIN "politics"."voter" ON "politician"."id"="voter"."politician_id"',
str(query),
)
def test_query_multiple_dimensions(self):
query = mock_dataset.latest(mock_dataset.fields.timestamp, mock_dataset.fields.timestamp2).sql[0]
self.assertEqual(
'SELECT '
'MAX("timestamp") "$timestamp",'
'MAX("timestamp2") "$timestamp2" '
'FROM "politics"."politician"',
str(query),
)
@patch('fireant.queries.builder.dimension_latest_query_builder.fetch_data')
def test_envelopes_responses_if_return_additional_metadata_True(self, mock_fetch_data):
dataset = DataSet(
table=politicians_table,
database=test_database,
return_additional_metadata=True,
fields=[
Field(
"timestamp1",
label="timestamp1",
definition=politicians_table.timestamp1,
data_type=DataType.text,
hyperlink_template="http://example.com/{political_party}",
)
],
)
df = pd.DataFrame({'political_party': ['a', 'b', 'c']}).set_index('political_party')
mock_fetch_data.return_value = 100, df
result = dataset.latest(dataset.fields.timestamp1).fetch()
self.assertEqual(dict(max_rows_returned=100), result['metadata'])
self.assertTrue(result['data'].equals( | pd.Series(['a'], index=['political_party']) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 11:39:33 2020
@author: cristian
"""
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import os
from gurobipy import *
from matplotlib import cm
from time import time
from scripts.utils import save_solution
#from shapely.geometry import Point, shape
#from numpy.random import uniform
#from collections import Counter
"""
DistanceBetweenNodes: Given a set of coordinates XY, computes the euclidean distance between all nodes
"""
def DistanceBetweenNodes(XY):
n = XY.shape[0]
# Distance between points
r = np.zeros((n, n))
for i in range(n):
for j in range(i,n):
form = np.around(np.sqrt((XY[i,0] - XY[j,0])**2 + (XY[i,1] - XY[j,1])**2))
r[i,j] = form
r[j,i] = form
return r
"""
ReadData: function for getting data from a .xlsx file
Returns a dictionary with all the data extracted from the .xlsx file
"""
def ReadData(datadir, file):
# Nodes
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Nodes')
XY = df[['X','Y']].values
F = df[df['Type'] == 'F'].index.tolist()
D = df[df['Type'] == 'D'].index.tolist()
S = df[df['Type'] == 'S'].index.tolist()
C = df[df['Type'] == 'C'].index.tolist()
LEZ = dict(zip(df.index.tolist(),df['LEZ'].tolist()))
city = dict(zip(df.index.tolist(),df['City'].tolist()))
# Products
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Products')
P = df['Product'].tolist()
nu = df['Volume'].tolist()
omega = df['Weight'].tolist()
omegaeff = df['Weight eff'].tolist()
P_f = {}
for f in F:
P_f[f] = df[df['Firm'] == f]['Product'].tolist()
# Demands
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Demands')
DEM = {}
for c in C:
DEM[c] = df[df['Customer'] == c]['Demand'].tolist()
# Depots cap.
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Depots cap.')
Lambd = {}
Omega = {}
epsil = {}
for i in range(df.shape[0]):
d = int(df['Depot'].iloc[i])
Lambd[d] = df['Lambd'].iloc[i]
Omega[d] = df['Omega'].iloc[i]
epsil[d] = df['epsil'].iloc[i]
# Vehicles
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Vehicles')
K = df['Vehicle'].tolist()
V_i = {}
Phi = {}
Theta = {}
rho = {}
delta = {}
gamma = {}
vehictype = {}
DcupS = D+S
for d in DcupS:
V_i[d] = df[df['Depot'] == d]['Vehicle'].tolist()
for k in V_i[d]:
Phi[k] = df[df['Vehicle'] == k]['Phi'].sum()
Theta[k] = df[df['Vehicle'] == k]['Theta'].sum()
rho[k] = df[df['Vehicle'] == k]['rho'].sum()
delta[k] = df[df['Vehicle'] == k]['delta'].sum()
gamma[k] = df[df['Vehicle'] == k]['gamma'].sum()
vehictype[k] = df[df['Vehicle'] == k]['VehicleType'].iloc[0]
r = DistanceBetweenNodes(XY)
"""
DATA DICTIONARY
"""
data = {}
data['XY'] = XY
data['F'] = F
data['D'] = D
data['S'] = S
data['C'] = C
data['P'] = P
data['P_f'] = P_f
data['K'] = K
data['V_i'] = V_i
data['DEM'] = DEM
data['Lambd'] = Lambd
data['Omega'] = Omega
data['Phi'] = Phi
data['Theta'] = Theta
data['nu'] = nu
data['omega'] = omega
data['omegaeff'] = omegaeff
data['rho'] = rho
data['delta'] = delta
data['gamma'] = gamma
data['epsil'] = epsil
data['r'] = r
data['LEZ'] = LEZ
data['vehictype'] = vehictype
data['city'] = city
A = np.ones((len(F+D+S+C), len(K)), dtype=int)
for s in S:
for k in V_i[s]:
for s1 in S:
if s1 != s:
# Bikes aren't shared between satellites
A[s1,k] = 0
for n in F+D+C:
# Bikes only visit nodes from the same city
if vehictype[k] == 'bike' and city[s] != city[n]:
A[n,k] = 0
# Non eco vehicles aren't allowed in LEZ points
if vehictype[k] != 'bike' and LEZ[n] > 0:
A[n,k] = 0
for d in D:
for k in V_i[d]:
for d1 in D:
if d1 != d:
# Vehicles aren't shared between delivering
A[d1,k] = 0
for n in F+S+C:
# Non eco vehicles aren't allowed in LEZ points
if vehictype[k] != 'bike' and LEZ[n] > 0:
A[n,k] = 0
data['A'] = A
return data
"""
MATH HEURISTIC FUNCTIONS
Here are all the steps for solving the multi-echelon multi-vehicle problem
"""
def GreedyRoutingForServingCost(d0, W0, NodesToVisit, WeightNodes, gamma_k, rho_k, r):
# This function estimates the serving cost via greedy routing
VisitedNodes = [d0]
PendingNodes = [n for n in NodesToVisit]
TotalCost = 0
CumulatedWeight = W0
# Initial case: select the first node to visit
i = d0
ArcCost = np.inf
for j in PendingNodes:
CurrentCost = r[i,j]*(gamma_k*CumulatedWeight + rho_k)
if CurrentCost < ArcCost:
ArcCost = CurrentCost
j_ = j
TotalCost = TotalCost + ArcCost
VisitedNodes.append(j_)
CumulatedWeight = CumulatedWeight + WeightNodes[j_]
PendingNodes = [n for n in PendingNodes if n not in VisitedNodes]
i = j_
# rest of the cases
while PendingNodes:
i = j_
ArcCost = np.inf
for j in PendingNodes:
CurrentCost = r[i,j]*(gamma_k*CumulatedWeight + rho_k)
if CurrentCost < ArcCost:
ArcCost = CurrentCost
j_ = j
TotalCost = TotalCost + ArcCost
VisitedNodes.append(j_)
CumulatedWeight = CumulatedWeight + WeightNodes[j_]
PendingNodes = [n for n in PendingNodes if n not in VisitedNodes]
# return a tuple with the last node visited and the total cost
return j_, TotalCost
def GetMinimalLoadCostF1(r, i, gamma, Weight_cf, rho, FminFw, D, V_i):
loadcostf1 = 0
# for f in FminFw:
# gamma_kf = max([gamma[v] for v in V_i[f]])
# rho_kf = max([rho[v] for v in V_i[f]])
# cost_f = r[f,i]*(gamma_kf*Weight_cf[f] + rho_kf)
# cost_d = np.inf
# for d in D:
# gamma_kd = max([gamma[v] for v in V_i[d]])
# rho_kd = max([rho[v] for v in V_i[d]])
# cost_d_ = r[f,d]*(gamma_kf*Weight_cf[f] + rho_kf) + r[d,i]*(gamma_kd*Weight_cf[f] + rho_kd)
# if cost_d_ < cost_d:
# cost_d = cost_d_
# loadcostf1 = loadcostf1 + min(cost_f, cost_d)
return loadcostf1
def GetBestDeliveringCost(r, i, gamma, Weight_cf, rho, FirmsToVisit, D, V_i):
cost_d = np.inf
for d in D:
gamma_kd = max([gamma[v] for v in V_i[d]])
rho_kd = max([rho[v] for v in V_i[d]])
f0, cost_d_ = GreedyRoutingForServingCost(d, 0, FirmsToVisit, Weight_cf, gamma_kd, rho_kd, r)
cost_d_ = cost_d_ + r[f0,i]*(sum([gamma_kd*Weight_cf[f] for f in FirmsToVisit]) + rho_kd)
if cost_d_ < cost_d:
cost_d = cost_d_
return cost_d
def Inter(list1, list2):
return [i for i in list1 if i in list2]
def GetFeasibleCombinationsForVehicles(minlen, nodes, Theta, Phi, WeightClient, VolClient, banned):
result = []
for i in range(len(nodes), minlen, -1):
for seq in itertools.combinations(nodes, i):
if sum([WeightClient[c] for c in seq]) <= Theta and sum([VolClient[c] for c in seq]) <= Phi:
result.append(list(seq))
return [r for r in result if not Inter(r,banned)]
def GetBestListOfNodes(result, VolClient, WeightClient, banned):
prod = 0
bestlist = []
for l in result:
if l not in banned:
vol_l = sum([VolClient[c] for c in l])
weight_l = sum([WeightClient[c] for c in l])
if vol_l*weight_l > prod:
prod = vol_l*weight_l
bestlist = l
return bestlist
def GetRoutingList(k,d0,N,w_final):
routing = []
test = int(sum([w_final[i,j,k] for i in N for j in N]))
if test > 2:
routing_list = [d0]
j = int(sum([w_final[d0,l,k]*l for l in N]))
routing.append((d0,j))
routing_list.append(j)
while j != d0:
i = j
j = int(sum([w_final[i,l,k]*l for l in N]))
routing_list.append(j)
routing.append((i,j))
elif test == 2:
j = int(sum([w_final[d0,l,k]*l for l in N]))
routing = [(d0,j), (j,d0)]
routing_list = [d0, j]
else:
routing = []
routing_list = []
##print('empty route')
return routing, routing_list
def CreateSetE(DEM, F, C, P_f):
DictFirmCl = {}
for c in C:
listdem = []
for f in F:
listdem.append(min(sum([DEM[c][p] for p in P_f[f]]),1))
DictFirmCl[c] = listdem
listdem = []
for key in DictFirmCl.keys():
if DictFirmCl[key] not in listdem:
listdem.append(DictFirmCl[key])
DemVecCl = {}
for l in range(len(listdem)):
dem = listdem[l]
DemVecCl[l] = [c for c in DictFirmCl.keys() if DictFirmCl[c] == dem]
E_c = {}
for key in DemVecCl.keys():
l = DemVecCl[key]
if len(l) % 2 != 0:
l = l[:-1]
for c in l:
E_c[c] = [e for e in l if e != c]
return E_c
def ConstructorForRouting(dictclass, d0, k, m_opt, x_opt, data):
# Unpack data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
r = data['r']
N = F+D+S+C
N_ = []
DEM_ = {}
Q0 = []
if dictclass[d0] != 'D':
for p in P:
valid_keys = [(p,j,k) for j in N if (p,j,k) in m_opt.keys()]
q0_ = int(sum([m_opt[t] for t in valid_keys]))
Q0.append(q0_)
else:
for p in P:
valid_keys = [(p,j,k) for j in N if (p,j,k) in m_opt.keys()]
valid_keys_f = [(p,f,k) for f in F if (p,f,k) in x_opt.keys()]
q0_ = int(sum([m_opt[t] for t in valid_keys]) - sum([x_opt[t] for t in valid_keys_f]))
Q0.append(q0_)
# if dictclass[d0] == 'F':
# Q0 = [int(sum([m_opt[p,j,k] for j in N])) for p in P]
# else:
# Q0 = [int(sum([(m_opt[p,j,k])for j in N]) - sum([(x_opt[p,f,k])for f in F])) for p in P]
Q0 = [max(q,0) for q in Q0]
N_.append(d0)
Nmin_i = [n for n in N if n != d0]
DEM_[d0] = [int(m_opt.get((p,d0,k), 0)) for p in P]
for j in Nmin_i:
if dictclass[j] != 'F':
tot = sum([m_opt.get((p,j,k), 0) for p in P])
else:
tot = sum([x_opt.get((p,j,k),0) for p in P])
if tot > 0:
N_.append(j)
if dictclass[j] != 'F':
DEM_[j] = [int(m_opt.get((p,j,k),0)) for p in P]
else:
DEM_[j] = [-int(x_opt.get((p,j,k),0)) for p in P]
F_ = [f for f in F if f in N_]
D_ = [d for d in D if d in N_]
S_ = [s for s in S if s in N_]
C_ = [c for c in C if c in N_]
data_routing = {'Q0' : Q0,
'DEM': DEM_,
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[k],
'Theta': Theta[k],
'nu' : nu,
'omega': omega,
'omegaeff': omegaeff,
'd0' : d0,
'gamma': gamma[k],
'rho' : rho[k],
'r' : r}
return data_routing
def FlowRouting(data_routing):
# Unpacking data
F = data_routing['F']
D = data_routing['D']
S = data_routing['S']
C = data_routing['C']
P = data_routing['P']
Phi = data_routing['Phi']
Theta = data_routing['Theta']
nu = data_routing['nu']
omega = data_routing['omega']
omegaeff = data_routing['omegaeff']
gamma = data_routing['gamma']
DEM = data_routing['DEM']
d0 = data_routing['d0']
Q0 = data_routing['Q0']
rho = data_routing['rho']
r = data_routing['r']
# Auxiliary sets
N = F+D+S+C
NminC = F+D+S
NminF = D+S+C
Nmind0 = [n for n in N if n != d0]
Nmind0_i = {}
ScupCmind0 = [i for i in S+C if i != d0]
for i in Nmind0:
Nmind0_i[i] = [j for j in Nmind0 if j != i]
# Consolidation of weight and volume
Weight = {}
Volume = {}
WeightE = {}
for i in N:
try:
Weight[i] = sum([DEM[i][p]*omega[p] for p in P])
except:
Weight[i] = 0
try:
WeightE[i] = sum([DEM[i][p]*omegaeff[p] for p in P])
except:
WeightE[i] = 0
try:
Volume[i] = sum([DEM[i][p]*nu[p] for p in P])
except:
Volume[i] = 0
#print(Q0)
W0 = sum([Q0[p]*omega[p] for p in P])
W0e = sum([Q0[p]*omegaeff[p] for p in P])
##print('W0 = ', W0)
V0 = sum([Q0[p]*nu[p] for p in P])
#print('V0 = ', V0, " Vol cap = ", Phi)
#print('W0 = ', W0, " Weight cap = ", Theta)
#print('W0 effective = ', W0e)
# #print('N = ', N)
# #print('Nmind0 = ', Nmind0)
# Model start
model = Model()
model.setParam('OutputFlag', 0)
# Decision variables
q = {}
for i in N:
for j in N:
q[i,j] = model.addVar(vtype = GRB.CONTINUOUS, name = 'q[%s,%s]' % (i,j))
qe = {}
for i in N:
for j in N:
qe[i,j] = model.addVar(vtype = GRB.CONTINUOUS, name = 'q[%s,%s]' % (i,j))
v = {}
for i in N:
for j in N:
v[i,j] = model.addVar(vtype = GRB.CONTINUOUS, name = 'v[%s,%s]' % (i,j))
w = {}
for i in N:
for j in N:
w[i,j] = model.addVar(vtype = GRB.BINARY, name = 'w[%s,%s]' % (i,j))
e = {}
for i in Nmind0:
e[i] = model.addVar(vtype = GRB.INTEGER, name = 'e[%s]' % i)
# Aux
fc = model.addVar(vtype = GRB.CONTINUOUS, name = 'fc')
ac = model.addVar(vtype = GRB.CONTINUOUS, name = 'ac')
# Constraints
# Flow
model.addConstrs(
quicksum(q[i,j] for i in N) - quicksum(q[j,l] for l in N) == Weight[j] for j in Nmind0
)
model.addConstrs(
quicksum(qe[i,j] for i in N) - quicksum(qe[j,l] for l in N) == WeightE[j] for j in Nmind0
)
model.addConstrs(
quicksum(v[i,j] for i in N) - quicksum(v[j,l] for l in N) == Volume[j] for j in Nmind0
)
model.addConstrs(
quicksum(w[i,j] for i in N) == quicksum(w[j,l] for l in N) for j in N
)
model.addConstrs(
q[i,j] <= Theta*w[i,j] for i in N for j in N
)
model.addConstrs(
qe[i,j] <= 2*(Theta + Phi)*w[i,j] for i in N for j in N
)
model.addConstrs(
v[i,j] <= Phi*w[i,j] for i in N for j in N
)
# Out
model.addConstr(
quicksum(q[d0,j] for j in N) == W0
)
model.addConstr(
quicksum(qe[d0,j] for j in N) == W0e
)
model.addConstr(
quicksum(v[d0,j] for j in N) == V0
)
# Back to depot OK with this one
model.addConstr(
quicksum(q[i,d0] for i in N) == Weight[d0]
)
model.addConstr(
quicksum(qe[i,d0] for i in N) == WeightE[d0]
)
model.addConstr(
quicksum(v[i,d0] for i in N) == Volume[d0]
)
# Node visiting OK with this one
model.addConstrs(
quicksum(w[i,j] for i in N) == 1 for j in N
)
# Node leaving OK with this one
model.addConstrs(
quicksum(w[i,j] for j in N) == 1 for i in N
)
# TMZ
model.addConstrs(
e[i] - e[j] + len(N)*w[i,j] <= len(N) - 1 for i in Nmind0 for j in Nmind0
)
model.addConstrs(
e[i] >= 0 for i in Nmind0
)
Fmind0 = [f for f in F if f != d0]
model.addConstrs(
e[i] >= e[f] for i in ScupCmind0 for f in Fmind0
)
# Logic
model.addConstrs(
q[i,i] == 0 for i in N
)
# Logic
model.addConstrs(
qe[i,i] == 0 for i in N
)
model.addConstrs(
v[i,i] == 0 for i in N
)
model.addConstrs(
w[i,i] == 0 for i in N
)
# Capacity and arc utilization
model.addConstr(
fc == quicksum(quicksum(qe[i,j]*gamma*r[i,j] for i in N) for j in N)
)
model.addConstr(
ac == quicksum(quicksum(w[i,j]*r[i,j]*rho for i in N) for j in N)
)
model.update()
model.__data = qe, q, w, v
model.setObjective(fc + ac,
GRB.MINIMIZE)
model.update()
return model
"""STEP 4: Vehicle routing"""
def MultiEchelonRouting(data, x_opt, y_opt, m_opt, z_opt, u_opt):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
r = data['r']
# Auxiliary sets
N = F+D+S+C
NminC = F+D+S
# for p in P:
# for c in C:
# for k in K:
# m_opt[p,c,k] = DEM[c][p]*z_opt[k,c]
model = Model()
dictclass = {}
for f in F:
dictclass[f] = 'F'
for d in D:
dictclass[d] = 'D'
for s in S:
dictclass[s] = 'S'
for c in C:
dictclass[c] = 'C'
# DEFINITIVE DECISION VARIABLES
q_final, qe_final, w_final, v_final = {}, {} ,{}, {}
for i in N:
for j in N:
for k in K:
q_final[i,j,k] = 0
qe_final[i,j,k] = 0
w_final[i,j,k] = 0
v_final[i,j,k] = 0
# Auxiliary dictionary for routes
DictRoutes = {}
DictRoutesList = {}
# Auxiliary dictionary for subsets for each vehicle
DictNodes = {}
# Auxiliary dictionaries for remaining capacities
Phi_, Theta_ = {}, {}
Q0_, DEMS_ = {}, {}
for k in K:
DictRoutes[k] = []
DictRoutesList[k] = []
DictNodes[k] = {'F' : [], 'D' : [], 'S': [], 'C': []}
Phi_[k] = Phi[k]
Theta_[k] = Theta[k]
# for d0 in NminC:
#print('y_opt = ', y_opt)
for d0 in D+S:
##print('Node: %s, Vehicles = %s' % (d0,V_i[d0]))
for k in V_i[d0]:
data_routing = ConstructorForRouting(dictclass, d0, k, m_opt, x_opt, data)
if y_opt.get(k,0) > 0 and len(data_routing['N']) > 1:
#print('data for routing vehicle %s' % k)
#print(data_routing)
model_rou = FlowRouting(data_routing)
model_rou.optimize()
qe, q, w, v = model_rou.__data
q_rou = model_rou.getAttr('x', q)
qe_rou = model_rou.getAttr('x', qe)
w_rou = model_rou.getAttr('x', w)
v_rou = model_rou.getAttr('x', v)
F_ = data_routing['F']
D_ = data_routing['D']
S_ = data_routing['S']
C_ = data_routing['C']
N_ = F_ + D_ + S_ + C_
Q0 = data_routing['Q0']
DEM_ = data_routing['DEM']
try:
# model_rou.##printAttr('X')
qe, q, w, v = model_rou.__data
q_rou = model_rou.getAttr('x', q)
qe_rou = model_rou.getAttr('x', qe)
w_rou = model_rou.getAttr('x', w)
v_rou = model_rou.getAttr('x', v)
for i in N_:
for j in N_:
try:
q_final[i,j,k] = q_rou[i,j]
qe_final[i,j,k] = qe_rou[i,j]
w_final[i,j,k] = w_rou[i,j]
v_final[i,j,k] = v_rou[i,j]
except:
q_final[i,j,k] = 0
w_final[i,j,k] = 0
v_final[i,j,k] = 0
Theta_[k] = Theta[k] - max([q_rou[i,j] for i in N_ for j in N_])
Phi_[k] = Phi[k] - max([v_rou[i,j] for i in N_ for j in N_])
DictRoutes[k], DictRoutesList[k] = GetRoutingList(k,d0,N,w_final)
DictNodes[k] = {'F' : F_, 'D' : D_, 'S': S_, 'C': C_}
Q0_[k] = Q0
DEMS_[k] = DEM_
except:
pass
##print('ERROR IN VEHICLE %s' % k)
else:
for i in N:
for j in N:
q_final[i,j,k] = 0
qe_final[i,j,k] = 0
v_final[i,j,k] = 0
w_final[i,j,k] = 0
solution = {'q_final' : q_final,
'qe_final' : qe_final,
'v_final' : v_final,
'w_final' : w_final,
'DictRoutes' : DictRoutes,
'DictRoutesList' : DictRoutesList,
'DictNodes' : DictNodes,
'Theta_' : Theta_,
'Phi_' : Phi_,
'Q0_' : Q0_,
'DEMS_' : DEMS_}
return solution
"""
AUXILIARY FUNCTIONS FOR HEURISTIC LOOP
"""
# Function that computes the freight cost for the route of a certain vehicle
def ComputeRouteCost(q, routing, k, gamma, r):
return sum([q[i,j,k]*r[i,j]*gamma[k] for (i,j) in routing])
def DictFromListofTuples(listtuples):
dic = {}
for i,j in listtuples:
dic[i] = j
return dic
# Function that computes every routing cost for every vehicle that visits clients and satellites
def GetMaxRoutingCosts(N, K, depots, DictNodes, r, gamma, w_final, q_final):
RC = {}
Kfil = [k for k in K if DictNodes[k]['S'] and DictNodes[k]['C']] # Vehicles that visit satellites and clients
for k in Kfil:
routing, routing_list = GetRoutingList(k,depots[k],N,w_final)
freightcost = ComputeRouteCost(q_final, routing, k, gamma, r)
RC[k] = freightcost
try:
RC = DictFromListofTuples(sorted(RC.items(), key=lambda x: x[1], reverse=True))
except:
pass
return RC
# Function that determines the "most expensive" destination in a route
def GetNode2Exclude(routing, q, qe, v, C, k, gamma, r, banlist):
DictRouFreight = dict(zip(routing,[qe[i,j,k]*r[i,j]*gamma[k] for (i,j) in routing]))
MaxCost = 0
ex = None
q_ex = None
v_ex = None
for t in routing:
if t[1] in C and t[1] not in banlist:
if DictRouFreight[t] > MaxCost:
ex = t[1]
MaxCost = DictRouFreight[t]
# get freight from that node
if ex != None:
ant_ex = [t for t in routing if t[1] == ex][0][0]
post_ex = [t for t in routing if t[0] == ex][0][1]
q_ex = q[ant_ex,ex,k] - q[ex,post_ex,k]
qe_ex = qe[ant_ex,ex,k] - qe[ex,post_ex,k]
v_ex = v[ant_ex,ex,k] - v[ex,post_ex,k]
return ex, q_ex, qe_ex, v_ex
def GetNode2ExcludeFromVs(routing, q, v, C, k, gamma, r, banlist):
DictRouFreight = dict(zip(routing,[q[i,j,k]*r[i,j]*gamma[k] for (i,j) in routing]))
MaxCost = 0
ex = None
q_ex = None
v_ex = None
if len(routing) > 2:
for t in routing:
if t[1] in C and t[1] not in banlist:
if DictRouFreight[t] > MaxCost:
ex = t[1]
MaxCost = DictRouFreight[t]
elif len(routing) == 2:
##print('Route of lenght 2')
ex = routing[0][1]
else:
pass
if ex != None:
ant_ex = [t for t in routing if t[1] == ex][0][0]
post_ex = [t for t in routing if t[0] == ex][0][1]
q_ex = q[ant_ex,ex,k] - q[ex,post_ex,k]
v_ex = v[ant_ex,ex,k] - v[ex,post_ex,k]
return ex, q_ex, v_ex
# Function that takes a route and adds a node to that route
def ReroutingAdd(routing, tremove, tadd):
rerouting = []
for t in routing:
if t == tremove:
rerouting = rerouting + tadd #tadd is a list of 2 tuples
else:
rerouting.append(t)
return rerouting
# Function that takes a route and removes a node from that route
def ReroutingRemove(routing, ex):
rerouting = []
t_aux = [None,None]
for t in routing:
flag = True
if t[1] == ex:
t_aux[0] = t[0]
flag = False
if t[0] == ex:
t_aux[1] = t[1]
flag = False
try:
if sum(t_aux) > 0:
rerouting.append(tuple(t_aux))
t_aux = [None,None]
except:
pass
if flag:
rerouting.append(t)
return rerouting
# Function that decides which is the best way for adding a node to a route
def MinRouteVariation(Vsat, ex, r, DictRoutes):
MinDist = np.inf
for k in Vsat:
d0 = DictRoutes[k][0]
for t in DictRoutes[k]:
i1 = t[0]
j1 = t[1]
dist = r[i1,ex] + r[j1,t[1]]
if i1 != d0 and j1 != d0:
if dist < MinDist:
ks = k
i = t[0]
l = t[1]
tremove = (i,l)
tadd = [(i,ex), (ex,l)]
MinDist = dist
# Rerouting
rerouting = ReroutingAdd(DictRoutes[ks], tremove, tadd)
return ks, rerouting
# Function that decides which satellite will receive the freight from the excluded node
def SelectSatellite(ex, q_ex, v_ex, Sk, V_i, cdv, cdw, Phi_, Theta_, r, DictRoutes):
sat = None
ks = None
rerouting = None
MinDist = np.inf
for s in Sk:
if q_ex <= cdw[s] and v_ex <= cdv[s]:
Vsat = [k for k in V_i[s] if Theta_[k] >= q_ex and Phi_[k] >= v_ex]
if len(Vsat) > 0:
if r[s,ex] < MinDist:
sat = s
MinDist = r[s,ex]
ks, rerouting = MinRouteVariation(Vsat, ex, r, DictRoutes)
return sat, ks, rerouting
# Function for recomputing the freight for routing
def RecomputeFreightEx(q_final,w_final, N, k, ex, q_ex, sat, routing, gamma, r):
routing_list = [routing[0][0]]
for t in routing:
routing_list.append(t[1])
flag_ex = 0
q_rec = {}
for i in routing_list[:-1]:
j = int(sum([w_final[i,j,k]*j for j in N]))
if j == ex:
ex_ant = i
flag_ex = q_ex
elif j == sat:
if i == ex:
q_rec[ex_ant,j,k] = q_final[i,j,k] + flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] + flag_ex
flag_ex = 0
else:
if i == ex:
q_rec[ex_ant,j,k] = q_final[i,j,k] + flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] + flag_ex
rerouting = ReroutingRemove(routing, ex)
return ComputeRouteCost(q_rec, rerouting, k, gamma, r)
# Function for recomputing the freight for routing
def RecomputeFreightAdd(q_final, N, k, ex, q_ex, rerouting, gamma, r):
flag_ex = q_ex
q_rec = {}
for t in rerouting:
i = t[0]
j = t[1]
if j == ex:
ex_ant = i
else:
if i == ex:
q_rec[ex_ant,i,k] = q_final[ex_ant,j,k] + flag_ex
flag_ex = 0
q_rec[i,j,k] = q_final[ex_ant,j,k] + flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] + flag_ex
return ComputeRouteCost(q_rec, rerouting, k, gamma, r)
def RecomputeFreightExFromKs(q_final,w_final, N, k, ex, q_ex, routing, gamma, r):
##print('Removing %s from freigh of vehicle %s' %(q_ex, k))
# Function for recomputing freight
if len(routing) > 2:
routing_list = [routing[0][0]]
for t in routing:
routing_list.append(t[1])
flag_ex = q_ex
q_rec = {}
for i in routing_list[:-1]:
j = int(sum([w_final[i,j,k]*j for j in N]))
if j == ex:
ex_ant = i
flag_ex = 0
else:
if i == ex:
q_rec[ex_ant,j,k] = q_final[i,j,k] - flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] - flag_ex
rerouting = ReroutingRemove(routing, ex)
cost = ComputeRouteCost(q_rec, rerouting, k, gamma, r)
else:
cost = 0
return cost
def RecomputeFreightAddToKd(q_final, N, k, ex, q_ex, sat, rerouting, gamma, r):
# Function for recomputing the freight for routing
##print('Adding %s to freight of vehicle %s' %(q_ex, k))
##print('Rerouting: ', rerouting)
q_or = {}
for (i,j) in rerouting:
try:
q_or[i,j,k] = q_final[i,j,k]
except:
pass
routing_list = [rerouting[0][0]]
for t in rerouting:
routing_list.append(t[1])
if routing_list.index(ex) < routing_list.index(sat):
flag_ex = 0
q_rec = {}
for t in rerouting:
i = t[0]
j = t[1]
if j == ex:
ex_ant = i
else:
if i == ex:
q_rec[ex_ant,ex,k] = q_final[ex_ant,j,k]
flag_ex = q_ex
q_rec[ex,j,k] = q_final[ex_ant,j,k] - flag_ex
elif i == sat:
flag_ex = 0
q_rec[sat,j,k] = q_final[sat,j,k] - flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] - flag_ex
else:
flag_ex = 0
q_rec = {}
for t in rerouting:
i = t[0]
j = t[1]
if j == ex:
ex_ant = i
else:
if i == ex:
q_rec[ex_ant,ex,k] = q_final[ex_ant,j,k]
q_rec[ex,j,k] = q_final[ex_ant,j,k] - flag_ex
flag_ex = 0
elif i == sat:
flag_ex = q_ex
q_rec[sat,j,k] = q_final[sat,j,k] + flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] + flag_ex
##print('Q nuevo: ', q_rec)
return ComputeRouteCost(q_rec, rerouting, k, gamma, r)
def ImproveOptimalSwapKdKs(RCVd, data, cdv, cdw, DictRoutes, DictRoutesList, DictNodes, DEMS_, Q0_, q_final, qe_final, v_final, w_final, Phi_, Theta_, depots):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
V_i = data['V_i']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
rho = data['rho']
gamma = data['gamma']
r = data['r']
A = data['A']
N = F+D+S+C
banlist = []
for kd in RCVd.keys():
flag_feasible = True
flag_descend = True
while flag_feasible and flag_descend:
# Get node to exclude REMARK: IS ALWAYS A CLIENT!!
try:
ex, q_ex, qe_ex, v_ex = GetNode2Exclude(DictRoutes[kd], q_final, qe_final, v_final, C, kd, gamma, r, banlist)
# Get satellite
sat, ks, rerouting_ks = SelectSatellite(ex, q_ex, v_ex, DictNodes[kd]['S'], V_i, cdv, cdw, Phi_, Theta_, r, DictRoutes)
except:
sat = None
# If there is a satelite...
if sat != None:
IncumbentCost = np.inf
# PrevCost: routing cost for kd and ks without changes
PrevCost = ComputeRouteCost(qe_final, DictRoutes[kd], kd, gamma, r) + ComputeRouteCost(qe_final, DictRoutes[ks], ks, gamma, r)
Costkd = RecomputeFreightEx(qe_final,w_final, N, kd, ex, qe_ex, sat, DictRoutes[kd], gamma, r)
Costks = RecomputeFreightAdd(qe_final, N, ks, ex, qe_ex, rerouting_ks, gamma, r)
IncumbentCost = Costkd + Costks
if A[ex,ks] < 0:
IncumbentCost = np.inf
##print('Incumbent: ', IncumbentCost, ' previous: ', PrevCost)
if IncumbentCost <= PrevCost:
# Modify nodes for kd and ks
##print('Removing %s from the route of vehicle %s' % (ex,kd))
DictNodes[kd]['C'] = [c for c in DictNodes[kd]['C'] if c != ex]
DictNodes[ks]['C'] = DictNodes[ks]['C'] + [ex]
# Create entry for exchanged node
DEMS_[ks][ex] = [0 for p in P]
# Correct demand for excluded node
for p in P:
aux = DEMS_[kd][ex][p]
DEMS_[kd][sat][p] = DEMS_[kd][sat][p] + aux
Q0_[ks][p] = Q0_[ks][p] + aux
DEMS_[ks][ex][p] = aux
cdv[sat] = cdv[sat] + aux*nu[p]
cdw[sat] = cdw[sat] + aux*omega[p]
del DEMS_[kd][ex]
# Re routing for kd
##print('RE ROUTING FOR VEHICLE %s' % kd)
F_ = DictNodes[kd]['F']
D_ = DictNodes[kd]['D']
S_ = DictNodes[kd]['S']
C_ = DictNodes[kd]['C']
N_ = F_ + D_ + S_ + C_
data_routing = {'Q0' : Q0_[kd],
'DEM': DEMS_[kd],
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[kd],
'Theta': Theta[kd],
'nu' : nu,
'omega': omega,
'd0' : depots[kd],
'gamma': gamma[kd],
'rho' : rho[kd],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
try:
qe, q, w, v = model_rou.__data
q_rou = model_rou.getAttr('x', q)
qe_rou = model_rou.getAttr('x', qe)
w_rou = model_rou.getAttr('x', w)
v_rou = model_rou.getAttr('x', v)
for i in N_:
for j in N_:
try:
q_final[i,j,kd] = q_rou[i,j]
qe_final[i,j,kd] = qe_rou[i,j]
w_final[i,j,kd] = w_rou[i,j]
v_final[i,j,kd] = v_rou[i,j]
except:
q_final[i,j,kd] = 0
qe_final[i,j,kd] = 0
w_final[i,j,kd] = 0
v_final[i,j,kd] = 0
# Delete route for excluded node
for i in N_:
q_final[i,ex,kd] = 0
qe_final[i,ex,kd] = 0
w_final[i,ex,kd] = 0
v_final[i,ex,kd] = 0
q_final[ex,i,kd] = 0
qe_final[ex,i,kd] = 0
w_final[ex,i,kd] = 0
v_final[ex,i,kd] = 0
Theta_[kd] = Theta[kd] - max([q_rou[i,j] for i in N_ for j in N_])
Phi_[kd] = Phi[kd] - max([v_rou[i,j] for i in N_ for j in N_])
DictRoutes[kd], DictRoutesList[kd] = GetRoutingList(kd,depots[kd],N,w_final)
DictNodes[kd] = {'F' : F_, 'D' : D_, 'S': S_, 'C': C_}
banlist.append(ex)
except:
pass
##print('ERROR FOR VEHICLE %s' % kd)
##print('RE ROUTING FOR VEHICLE %s' % ks)
F_ = DictNodes[ks]['F']
D_ = DictNodes[ks]['D']
S_ = DictNodes[ks]['S']
C_ = DictNodes[ks]['C']
N_ = F_ + D_ + S_ + C_
data_routing = {'Q0' : Q0_[ks],
'DEM': DEMS_[ks],
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[ks],
'Theta': Theta[ks],
'nu' : nu,
'omega': omega,
'd0' : depots[ks],
'gamma': gamma[ks],
'rho' : rho[ks],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
try:
qe, q, w, v = model_rou.__data
q_rou = model_rou.getAttr('x', q)
qe_rou = model_rou.getAttr('x', qe)
w_rou = model_rou.getAttr('x', w)
v_rou = model_rou.getAttr('x', v)
for i in N_:
for j in N_:
try:
q_final[i,j,ks] = q_rou[i,j]
qe_final[i,j,ks] = qe_rou[i,j]
w_final[i,j,ks] = w_rou[i,j]
v_final[i,j,ks] = v_rou[i,j]
except:
q_final[i,j,ks] = 0
qe_final[i,j,ks] = 0
w_final[i,j,ks] = 0
v_final[i,j,ks] = 0
Theta_[ks] = Theta[ks] - max([q_rou[i,j] for i in N_ for j in N_])
Phi_[ks] = Phi[ks] - max([v_rou[i,j] for i in N_ for j in N_])
DictRoutes[ks], DictRoutesList[ks] = GetRoutingList(ks,depots[ks],N,w_final)
DictNodes[ks] = {'F' : F_, 'D' : D_, 'S': S_, 'C': C_}
except:
pass
##print('ERROR IN REOPTI?IWING VEHICLE %s' % ks)
else:
##print('No more feasible changes for Vehicle %s' % kd)
flag_descend = False
else:
##print('No more feasible changes for Vehicle %s' % kd)
flag_feasible = False
solution_swapkdks = {'DictRoutes' : DictRoutes,
'DictNodes' : DictNodes,
'DEMS_' : DEMS_,
'Q0_' : Q0_,
'q_final' : q_final,
'v_final' : v_final,
'w_final' : w_final,
'Phi_' : Phi_,
'Theta_' : Theta_,
'cdv' : cdv,
'cdw' : cdw,
'banlist' : banlist}
return solution_swapkdks
def AddNodeToSet(dictclass, add, F, D, S, C):
cla = dictclass[add]
if cla == 'F':
F = F + [add]
elif cla == 'D':
D = D + [add]
elif cla == 'S':
S = S + [add]
elif cla == 'C':
C = C + [add]
else:
pass
return F,D,S,C
def RemoveNodeFromSet(dictclass, rem, F, D, S, C):
cla = dictclass[rem]
if cla == 'F':
F = [f for f in F if f != rem]
elif cla == 'D':
D = [d for d in D if d != rem]
elif cla == 'S':
S = [s for s in S if s != rem]
elif cla == 'C':
C = [c for c in C if c != rem]
else:
pass
return F,D,S,C
def AddAndRemoveFromRoute(dictclass, DEMS_, P, DictNodes, k, add, DemAdd, rem, DemRem, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma):
demms = DEMS_.copy()
DEM_ = demms[k]
Q0 = Q0_[k].copy()
F_ = DictNodes[k]['F']
D_ = DictNodes[k]['D']
S_ = DictNodes[k]['S']
C_ = DictNodes[k]['C']
d0 = depots[k]
##print('AddAndRemoveFromRoute: Original demands ', DEM_)
if add != None:
##print('AddAndRemoveFromRoute: Attempting to add node %s to vehicle %s' % (add,k))
F_, D_, S_, C_ = AddNodeToSet(dictclass, add, F_, D_, S_, C_)
DEM_[add] = [0 for p in P]
for p in P:
aux = DemAdd[p]
DEM_[add][p] = aux # Demand for the new node
if rem != None:
##print('AddAndRemoveFromRoute: Attempting to remove node %s to vehicle %s' % (rem,k))
F_, D_, S_, C_ = RemoveNodeFromSet(dictclass, rem, F_, D_, S_, C_)
for p in P:
aux = DemRem[p]
DEM_[rem][p] = DEM_[rem][p] - aux # If rem is depot, it will receive less feight
# If rem is client, it will have demand 0
N_ = F_ + D_ + S_ + C_
q_rou, w_rou, v_rou = {},{},{}
for i in N_:
for j in N_:
q_rou[i,j,k] = 0
v_rou[i,j,k] = 0
w_rou[i,j,k] = 0
N_mind0 = [n for n in N_ if n != d0]
for n in N_mind0:
if max([np.absolute(DEM_[n][p]) for p in P]) < 1:
##print('AddAndRemoveFromRoute: Removing node %s from route of vehicle %s because of empty demand' % (n,k))
# ##printx = True
F_, D_, S_, C_ = RemoveNodeFromSet(dictclass, n, F_, D_, S_, C_)
Route, RouteList = [], []
NewDictNodes = {'F' : F_, 'D': D_, 'S': S_, 'C': C_}
##print('AddAndRemoveFromRoute: Vehicle %s, Nodes: ' % k, NewDictNodes)
##print('AddAndRemoveFromRoute: Vehicle %s, Demands: ' % k, DEM_)
flag_optim = True
N_ = F_ + D_ + S_ + C_
if len(N_) > 2:
data_routing = {'Q0' : Q0,
'DEM': DEMS_,
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[k],
'Theta': Theta[k],
'nu' : nu,
'omega': omega,
'd0' : depots[k],
'gamma': gamma[k],
'rho' : rho[k],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
try:
q, w, v = model_rou.__data
q_rou2 = model_rou.getAttr('x', q)
w_rou2 = model_rou.getAttr('x', w)
v_rou2 = model_rou.getAttr('x', v)
for (i,j) in q_rou2:
q_rou[i,j,k] = q_rou2[i,j]
w_rou[i,j,k] = w_rou2[i,j]
v_rou[i,j,k] = v_rou2[i,j]
Route, RouteList = GetRoutingList(k,d0,N_,w_rou)
##print('End routing vehicle %s' % k)
except:
flag_optim = False
##print('Infeasible routing for vehicle %s' % k)
elif len(N_) == 2:
j = [n for n in N_ if n != d0][0]
w_rou[d0, j, k] = 1
q_rou[d0, j, k] = sum([Q0[p]*omega[p] for p in P])
v_rou[d0, j, k] = sum([Q0[p]*nu[p] for p in P])
w_rou[j, d0, k] = 1
q_rou[j, d0, k] = 0
v_rou[j, d0, k] = 0
Route, RouteList = [(d0,j), (j,d0)], [d0,j]
else:
pass
CapVolRem = Theta[k] - max([q_rou[i,j,k] for i in N_ for j in N_])
CapWeiRem = Phi[k] - max([v_rou[i,j,k] for i in N_ for j in N_])
if flag_optim:
FreightCost = sum([q_rou[i,j,k]*gamma[k]*r[i,j] for i in N_ for j in N_])
else:
FreightCost = np.infty
return FreightCost, Route, RouteList, DEM_, Q0, CapVolRem, CapWeiRem, q_rou, w_rou, v_rou, NewDictNodes
def AuxRoutingKd(DEMS_, P, DictNodes, k, add, DemAdd, sat, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma, omegaeff):
# Kd gets a new node on its route. This new node has demand and, as before
# was served by a satellite, now that satelite receives less freight
demms = DEMS_.copy()
DEM_ = demms[k]
Q0 = Q0_[k].copy()
F_ = DictNodes[k]['F']
D_ = DictNodes[k]['D']
S_ = DictNodes[k]['S']
C_ = DictNodes[k]['C']
d0 = depots[k]
if add != None:
##print('AuxRoutingKd: adding node %s to route of vehicle %s' % (add, k))
C_ = C_ + [add]
DEM_[add] = [0 for p in P]
for p in P:
aux = DemAdd[p]
DEM_[sat][p] = DEM_[sat][p] - aux # satellite receives less freight
DEM_[add][p] = aux # Demand for the new node
N_ = F_ + D_ + S_ + C_
q_rou, w_rou, v_rou = {},{},{}
for i in N_:
for j in N_:
q_rou[i,j,k] = 0
v_rou[i,j,k] = 0
w_rou[i,j,k] = 0
N_mind0 = [n for n in N_ if n != d0]
for n in N_mind0:
if max([np.absolute(DEM_[n][p]) for p in P]) < 1:
##print('AuxRoutingKd: Removing node %s from route of vehicle %s because of empty demand' % (n,k))
F_ = [f for f in F_ if f != n]
D_ = [d for d in D_ if d != n]
S_ = [s for s in S_ if s != n]
C_ = [c for c in C_ if c != n]
N_ = F_ + D_ + S_ + C_
Route, RouteList = [], []
NewDictNodes = {'F' : F_, 'D': D_, 'S': S_, 'C': C_}
##print('AuxRoutingKd: Vehicle %s, Nodes: ' % k, NewDictNodes)
# Consolidation of weight and volume
flag_optim = True
if len(N_) > 2:
data_routing = {'Q0' : Q0,
'DEM': DEM_,
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[k],
'Theta': Theta[k],
'nu' : nu,
'omega': omega,
'omegaeff': omegaeff,
'd0' : depots[k],
'gamma': gamma[k],
'rho' : rho[k],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
try:
q, w, v = model_rou.__data
q_rou2 = model_rou.getAttr('x', q)
w_rou2 = model_rou.getAttr('x', w)
v_rou2 = model_rou.getAttr('x', v)
for (i,j) in q_rou2:
q_rou[i,j,k] = q_rou2[i,j]
w_rou[i,j,k] = w_rou2[i,j]
v_rou[i,j,k] = v_rou2[i,j]
Route, RouteList = GetRoutingList(k,d0,N_,w_rou)
##print('End routing vehicle %s' % k)
except:
flag_optim = False
##print('Infeasible routing for vehicle %s' % k)
elif len(N_) == 2:
j = [n for n in N_ if n != d0][0]
w_rou[d0, j, k] = 1
q_rou[d0, j, k] = sum([Q0[p]*omega[p] for p in P])
v_rou[d0, j, k] = sum([Q0[p]*nu[p] for p in P])
w_rou[j, d0, k] = 1
q_rou[j, d0, k] = 0
v_rou[j, d0, k] = 0
Route, RouteList = [(d0,j), (j,d0)], [d0,j]
else:
pass
CapVolRem = Theta[k] - max([q_rou[i,j,k] for i in N_ for j in N_])
CapWeiRem = Phi[k] - max([v_rou[i,j,k] for i in N_ for j in N_])
if flag_optim:
FreightCost = sum([q_rou[i,j,k]*gamma[k]*r[i,j] for i in N_ for j in N_])
else:
FreightCost = np.infty
return FreightCost, Route, RouteList, DEM_, Q0, CapVolRem, CapWeiRem, q_rou, w_rou, v_rou, NewDictNodes
def AuxRoutingKs(DEMS_, P, DictNodes, k, ex, DemRem, sat, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma, omegaeff):
# Ks got a node removed. So Q0 changes
demms = DEMS_.copy()
DEM_ = demms[k]
Q0 = Q0_[k].copy()
F_ = DictNodes[k]['F']
D_ = DictNodes[k]['D']
S_ = DictNodes[k]['S']
C_ = DictNodes[k]['C']
d0 = depots[k]
N_ = F_ + D_ + S_ + C_
C_ = [c for c in C_ if c != ex]
DEM_[ex] = [0 for p in P]
for p in P:
aux = DemRem[p]
Q0[p] = Q0[p] - aux # Vehicle starts with less freight
DEM_[ex][p] = 0
N_ = F_ + D_ + S_ + C_
##print('AuxRoutingKs: Vehicle %s, Nodes: ' % k, DictNodes[k])
q_rou, w_rou, v_rou = {},{},{}
for i in N_:
for j in N_:
q_rou[i,j,k] = 0
v_rou[i,j,k] = 0
w_rou[i,j,k] = 0
Route, RouteList = [], []
# Consolidation of weight and volume
Weight = {}
Volume = {}
for i in N_:
Weight[i] = sum([DEM_[i][p]*omega[p] for p in P])
Volume[i] = sum([DEM_[i][p]*nu[p] for p in P])
flag_optim = True
if len(N_) > 2:
data_routing = {'Q0' : Q0,
'DEM': DEMS_,
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[k],
'Theta': Theta[k],
'nu' : nu,
'omega': omega,
'omegaeff': omegaeff,
'd0' : depots[k],
'gamma': gamma[k],
'rho' : rho[k],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
model_rou.optimize()
try:
q, w, v = model_rou.__data
q_rou2 = model_rou.getAttr('x', q)
w_rou2 = model_rou.getAttr('x', w)
v_rou2 = model_rou.getAttr('x', v)
for (i,j) in q_rou2:
q_rou[i,j,k] = q_rou2[i,j]
w_rou[i,j,k] = w_rou2[i,j]
v_rou[i,j,k] = v_rou2[i,j]
Route, RouteList = GetRoutingList(k,d0,N_,w_rou)
##print('End routing vehicle %s' % k)
except:
flag_optim = False
##print('Infeasible routing for vehicle %s' % k)
elif len(N_) == 2:
j = [n for n in N_ if n != d0][0]
w_rou[d0, j, k] = 1
q_rou[d0, j, k] = sum([Q0[p]*omega[p] for p in P])
v_rou[d0, j, k] = sum([Q0[p]*nu[p] for p in P])
w_rou[j, d0, k] = 1
q_rou[j, d0, k] = 0
v_rou[j, d0, k] = 0
Route, RouteList = [(d0,j), (j,d0)], [d0,j]
else:
pass
CapVolRem = Theta[k] - max([q_rou[i,j,k] for i in N_ for j in N_])
CapWeiRem = Phi[k] - max([v_rou[i,j,k] for i in N_ for j in N_])
if flag_optim:
FreightCost = sum([q_rou[i,j,k]*gamma[k]*r[i,j] for i in N_ for j in N_])
else:
FreightCost = np.infty
NewDictNodes = {'F' : F_, 'D': D_, 'S': S_, 'C': C_}
return FreightCost, Route, RouteList, DEM_, Q0, CapVolRem, CapWeiRem, q_rou, w_rou, v_rou, NewDictNodes
def ImproveOptimalSwapKsKd(RCVs, data, banlist, DictSatKd, cdv, cdw, DictRoutes, DictRoutesList, DictNodes, DEMS_, Q0_, q_final, v_final, w_final, Phi_, Theta_, depots):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
V_i = data['V_i']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
gamma = data['gamma']
DEM = data['DEM']
r = data['r']
A = data['A']
N = F+D+S+C
##print('starting: ImproveOptimalSwapKsKd')
# PARAMETER THAT SAYS HOW MANY VEHICLES FROM DEPOTS ARE BEING USED:
depots_list = []
for dep in depots.values():
if dep not in depots_list:
depots_list.append(dep)
VehiclesPerDepot = {}
for dep in depots_list:
VehiclesPerDepot[dep] = sum([w_final[dep,j,k] for j in N for k in V_i[dep]])
##print(VehiclesPerDepot)
for ks in RCVs.keys():
##print('Vehicle %s' % ks)
flag_feasible = True
flag_descend = True
while flag_feasible and flag_descend:
ex, q_ex, v_ex = GetNode2ExcludeFromVs(DictRoutes[ks], q_final, v_final,C, ks, gamma, r, banlist)
if ex != None:
# patch qex y vex (sometimes it has errors)
q_ex = sum([DEM[ex][p]*omega[p] for p in P])
v_ex = sum([DEM[ex][p]*nu[p] for p in P])
##print('Original demand of node %s: ' % ex, DEM[ex])
##print('Original freight of node %s: ' % ex, q_ex)
sat = depots[ks]
kd, rerouting_kd = MinRouteVariation([DictSatKd[sat]], ex, r, DictRoutes)
# Backup for satellite demand
dem_sat = [dem for dem in DEMS_[kd][sat]]
else:
sat = None
# If there is a satelite...
if sat != None and A[ex,kd] > 0:
IncumbentCost = np.inf
# PrevCost: routing cost for kd and ks without changes
Costkd_pre = ComputeRouteCost(q_final, DictRoutes[kd], kd, gamma, r)
Costks_pre = ComputeRouteCost(q_final, DictRoutes[ks], ks, gamma, r)
PrevCost = Costkd_pre + Costks_pre
##print('Attempting to remove node %s from route of vehicle %s (sat = %s)' % (ex, ks,sat))
# Aux##printPreRerouting(DEMS_, Q0_, kd , nu, omega, P, DictNodes)
Sol_kd = AuxRoutingKd(DEMS_, P, DictNodes, kd, ex, DEM[ex], sat, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma, omegaeff)
Costkd_pos = Sol_kd[0]
# Aux##printPreRerouting(DEMS_, Q0_, ks , nu, omega, P, DictNodes)
Sol_ks = AuxRoutingKs(DEMS_, P, DictNodes, ks, ex, DEM[ex], sat, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma, omegaeff)
Costks_pos = Sol_ks[0]
# CHECK IF SATELLITE HAS EMPTY ROUTE
if Sol_ks[10]['C']:
IncumbentCost = Costkd_pos + Costks_pos
else:
##print('Vehicle %s has an empty route' % ks)
if VehiclesPerDepot[depots[ks]] - 1 == 0:
IncumbentCost = Costkd_pos + Costks_pos - 1000
##print('Attempting to close satelite %s' % depots[ks])
##print('Incumbent: ', IncumbentCost, ' previous: ', PrevCost)
if IncumbentCost <= PrevCost:
##print('Updating routes for vehicles kd = %s and ks = %s' % (kd,ks))
DictSol = {kd : Sol_kd, ks: Sol_ks}
#FreightCost, Route, RouteList, DEM, Q0, CapVolRem, CapWeiRem, q_rou, w_rou, v_rou
for k in [kd,ks]:
OldRoute = DictRoutes[k]
for (i,j) in OldRoute:
q_final[i,j,k] = 0
w_final[i,j,k] = 0
v_final[i,j,k] = 0
DictRoutes[k] = DictSol[k][1]
DictRoutesList[k] = DictSol[k][2]
DEMS_[k] = DictSol[k][3]
Q0_[k] = DictSol[k][4]
Phi_[k] = DictSol[k][5]
Theta_[k] = DictSol[k][6]
for (i,j) in DictSol[k][1]:
q_final[i,j,k] = DictSol[k][7][i,j,k]
w_final[i,j,k] = DictSol[k][8][i,j,k]
v_final[i,j,k] = DictSol[k][9][i,j,k]
# Nodes are modified
DictNodes[k] = DictSol[k][10]
# Se agrega nuevo Node a kd y se quita de ks:
# Remaining capacities of depots are modified:
cdw[depots[ks]] = cdw[depots[ks]] + q_ex
cdv[depots[ks]] = cdv[depots[ks]] + v_ex
if Sol_ks[10]['C']:
pass
else:
VehiclesPerDepot[depots[ks]] = VehiclesPerDepot[depots[ks]] - 1
##print('There was an exchange between kd = %s y ks = %s' % (kd,ks))
else:
##print('There was not an exchange between kd = %s y ks = %s' % (kd,ks))
DEMS_[kd][sat] = dem_sat
del DEMS_[kd][ex]
flag_descend = False
else:
##print('No more feasible changes for Vehicle %s' % ks)
flag_feasible = False
solution_swapkskd = {'DictRoutes' : DictRoutes,
'DictNodes' : DictNodes,
'DEMS_' : DEMS_,
'Q0_' : Q0_,
'q_final' : q_final,
'v_final' : v_final,
'w_final' : w_final,
'Phi_' : Phi_,
'Theta_' : Theta_,
'cdv' : cdv,
'cdw' : cdw,
'banlist' : banlist}
return solution_swapkskd
def Steps1To3(data):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
r = data['r']
epsil = data['epsil']
A = data['A']
firmswithout = F[- max(int(len(F)*0.2), 1):] #This is just for saying that 20% of the firms (or at least 1)
# don't have vehicles
Fw = [f for f in firmswithout]
FminFw = [f for f in F if f not in Fw]
Vs = [item for sublist in [V_i[s] for s in S] for item in sublist]
Vd = [item for sublist in [V_i[d] for d in D] for item in sublist]
N = F+D+S+C
DcupS = D + S
ScupC = S + C
NminC = F + D + S
# Other parameters
VolClient = {}
WeightClient = {}
for client, dem in DEM.items():
VolClient[client] = sum([dem[p]*nu[p] for p in P])
WeightClient[client] = sum([dem[p]*omega[p] for p in P])
MinVolDep = {}
MinWeightDep = {}
for i in DcupS:
MinVolDep[i] = min(Lambd[i], sum([Phi[k] for k in V_i[i]]))
MinWeightDep[i] = min(Omega[i], sum([Theta[k] for k in V_i[i]]))
ServCost = []
PfromFw = [item for sublist in [P_f[f] for f in Fw] for item in sublist]
F_p = {}
for f in F:
for p in P_f[f]:
F_p[p] = f
# Serving cost for delivering to customers
for i in D:
# gamma_ki = max([gamma[v] for v in V_i[i]])
# rho_ki = max([rho[v] for v in V_i[i]])
for c in C:
Weight_cf = {}
gamma_kf = {}
rho_kf = {}
for f in F:
Weight_cf[f] = sum([DEM[c][p]*omegaeff[p] for p in P_f[f]])
gamma_kf[f] = 0
rho_kf[f] = 0
# gamma_kf[f] = max([gamma[v] for v in V_i[f]])
# rho_kf[f] = max([rho[v] for v in V_i[f]])
# Check if customer c demanded products from firms without vehicles
if sum([DEM[c][p] for p in PfromFw]) > 0:
flag_fw = True
FirmsToVisit = [f for f in Fw if max([DEM[c][p] for p in P_f[f]]) > 0]
else:
flag_fw = False
load_cost_f1 = sum([r[f,i]*(gamma_kf[f]*Weight_cf[f] + rho_kf[f]) for f in FminFw])
for k in V_i[i]:
if A[c,k] > 0:
gamma_ki = gamma[k]
rho_ki = rho[k]
if flag_fw:
f0, load_cost_f2 = GreedyRoutingForServingCost(i,
sum([Weight_cf[f] for f in FminFw]),
FirmsToVisit,
Weight_cf,
gamma_ki,
rho_ki,
r)
del_cost = r[f0,c]*(gamma_ki*sum([Weight_cf[f] for f in Fw]) + rho_ki)
else:
load_cost_f2 = 0
del_cost = r[i,c]*(gamma_ki*sum([Weight_cf[f] for f in F]) + rho_ki)
"""HERE WE CAN ADD ADDITIONAL COSTS FOR ROUTING"""
sc = load_cost_f1 + load_cost_f2 + del_cost + delta[k]
ServCost.append([i,c,k, sc, VolClient[c], WeightClient[c]])
# serving cost for satellite
for i in S:
for c in C:
Weight_cf = {}
gamma_kf = {}
rho_kf = {}
for f in F:
Weight_cf[f] = sum([DEM[c][p]*omegaeff[p] for p in P_f[f]])
gamma_kf[f] = 0
rho_kf[f] = 0
# gamma_kf[f] = max([gamma[v] for v in V_i[f]])
# rho_kf[f] = max([rho[v] for v in V_i[f]])
# Check if customer c demanded products from firms without vehicles
if sum([DEM[c][p] for p in PfromFw]) > 0:
flag_fw = True
FirmsToVisit = [f for f in Fw if max([DEM[c][p] for p in P_f[f]]) > 0]
else:
flag_fw = False
load_cost_f1 = GetMinimalLoadCostF1(r, i, gamma, Weight_cf, rho, FminFw, D, V_i)
if flag_fw:
del_cost = GetBestDeliveringCost(r, i, gamma, Weight_cf, rho, FirmsToVisit, D, V_i)
else:
del_cost = 0
for k in V_i[i]:
if A[c,k] > 0:
gamma_ki = max([gamma[v] for v in V_i[i]])
rho_ki = max([rho[v] for v in V_i[i]])
del_cost = r[i,c]*(gamma_ki*sum([Weight_cf[f] for f in Fw]) + rho_ki) + epsil[i]
sc = load_cost_f1 + del_cost + epsil[i] + delta[k]
ServCost.append([i,c,k,sc, VolClient[c], WeightClient[c]])
df_sc = pd.DataFrame(data = ServCost, columns = ['depot','customer','vehicle','servcost','volume','weight'])
df_sc = df_sc.sort_values(by='servcost', ascending = True).reset_index(drop=True)
openedS = []
usedK = []
DEM_i = {} # Dictionary for depots demands
for i in DcupS:
DEM_i[i] = [0 for p in P]
h_opt = {}
m_opt = {}
x_opt = {}
y_opt = {}
u_opt = {}
z_opt = {}
Weight_i = {}
Volume_i = {}
for i in DcupS:
Weight_i[i] = 0
Volume_i[i] = 0
Weight_k = {}
Volume_k = {}
for k in K:
Weight_k[k] = 0
Volume_k[k] = 0
# #print(df_sc)
while df_sc.shape[0] > 0:
# Always check first element in dataframe
i = int(df_sc.loc[0]['depot'])
c = int(df_sc.loc[0]['customer'])
k = int(df_sc.loc[0]['vehicle'])
w = df_sc.loc[0]['weight']
v = df_sc.loc[0]['volume']
# #print(df_sc.head())
# #print(df_sc.shape[0])
#print('Customer %s trying to be added to depot %s' %(c,i))
# #print('Depot incumbent weight: %s of %s' % (Weight_i[i] + w, MinWeightDep[i]))
# #print('Depot incumbent Volume: %s of %s' % (Volume_i[i] + v, MinVolDep[i]))
if Weight_i[i] + w <= MinWeightDep[i] and Volume_i[i] + v <= MinVolDep[i]:
# #print('Vehicle incumbent weight: %s of %s' % (Weight_k[k] + w, Theta[k]))
# #print('Vehicle incumbent Volume: %s of %s' % (Volume_k[k] + v, Phi[k]))
if Weight_k[k] + w <= Theta[k] and Volume_k[k] + v <= Phi[k]:
# Add
for p in P:
if DEM[c][p] > 0:
h_opt[p,i,c] = DEM[c][p]
m_opt[p,c,k] = DEM[c][p]
DEM_i[i][p] = DEM_i[i][p] + DEM[c][p]
fp = F_p[p]
if fp in Fw and k in Vd:
if (p,fp,k) in x_opt.keys():
x_opt[p,fp,k] = x_opt[p,fp,k] + DEM[c][p]
else:
x_opt[p,fp,k] = DEM[c][p]
Weight_i[i] = Weight_i[i] + w
Volume_i[i] = Volume_i[i] + v
Weight_k[k] = Weight_k[k] + w
Volume_k[k] = Volume_k[k] + v
z_opt[k,c] = 1
# Delete customer from set (becasue it was assigned)
df_sc = df_sc[df_sc['customer'] != c]
if i in S and i not in openedS:
openedS.append(i)
u_opt[i] = 1
# Substract the opening cost
df_sc['servcost'] = np.where(df_sc['depot'] == i,
df_sc['servcost'] - epsil[i],
df_sc['servcost'])
if k not in usedK:
usedK.append(k)
y_opt[k] = 1
# Substract the opening cost
df_sc['servcost'] = np.where(df_sc['vehicle'] == k,
df_sc['servcost'] - delta[k],
df_sc['servcost'])
# #print('Customer %s added to depot %s' %(c,i))
else:
df_sc = df_sc[1:]
else:
df_sc = df_sc[1:]
# wm = df_sc['weight'].min()
# vm = df_sc['volume'].min()
# if Weight_i[i] == MinWeightDep[i] or Volume_i[i] == MinVolDep[i]:
# df_sc = df_sc[df_sc['depot'] != i]
# if Weight_k[k] == Theta[k] or Volume_k[k] == Phi[k]:
# df_sc = df_sc[df_sc['vehicle'] != k]
# Reorder by servingcost
df_sc = df_sc.sort_values(by='servcost', ascending = True).reset_index(drop=True)
# Now, we know the satellites' demand for products. So, we will assign dustributor(s) to
# each satellite as if they were customers
# Serving cost for products from firms with vehicles
# for s in openedS:
# #print(DEM_i[s])
# #print('Opened satellites = %s' % len(openedS))
ServCost = []
for f in FminFw:
for s in openedS:
for p in P_f[f]:
if DEM_i[s][p] > 0:
w = DEM_i[s][p]*omega[p]
we = DEM_i[s][p]*omegaeff[p]
v = DEM_i[s][p]*nu[p]
# for k in V_i[f]:
# gamma_kf = gamma[k]
# rho_kf = rho[k]
# sc = r[f,s]*(gamma_kf*w + rho_kf)
# ServCost.append([f, s, p, k, sc, v, w])
# gamma_kf = max([gamma[v] for v in V_i[f]])
# rho_kf = max([rho[v] for v in V_i[f]])
gamma_kf = 0
rho_kf = 0
for d in D:
for k in V_i[d]:
gamma_kd = gamma[k]
rho_kd = rho[k]
sc = r[f,d]*(gamma_kf*we + rho_kf) + r[d,s]*(gamma_kd*we + rho_kd)
ServCost.append([d, s, p, k, sc, v, w])
# Serving cost for products from firms without vehicles:
for f in Fw:
for s in openedS:
for p in P_f[f]:
if DEM_i[s][p] > 0:
w = DEM_i[s][p]*omega[p]
we = DEM_i[s][p]*omegaeff[p]
v = DEM_i[s][p]*nu[p]
for d in D:
for k in V_i[d]:
gamma_kd = gamma[k]
rho_kd = rho[k]
sc = r[f,d]*(gamma_kd*we + rho_kd) + r[d,s]*(gamma_kd*we + rho_kd)
if k not in usedK:
sc = sc + delta[k]
ServCost.append([d, s, p, k, sc, v, w])
df_sc = pd.DataFrame(data = ServCost, columns = ['depot','satellite','product','vehicle','servcost','volume','weight'])
df_sc = df_sc.sort_values(by='servcost', ascending = True).reset_index(drop=True)
df_sc['fixcostvehicle'] = [delta[v] for v in df_sc['vehicle'].tolist()]
df_sc['servcost'] = np.where(df_sc['vehicle'].isin(usedK), df_sc['servcost'], df_sc['servcost'] + df_sc['fixcostvehicle'])
while df_sc.shape[0] > 0:
# Always check first element in dataframe
i = int(df_sc.loc[0]['depot'])
s = int(df_sc.loc[0]['satellite'])
p = int(df_sc.loc[0]['product'])
k = int(df_sc.loc[0]['vehicle'])
w = int(df_sc.loc[0]['weight'])
v = int(df_sc.loc[0]['volume'])
if i in F:
condition1 = True
else:
condition1 = Weight_i[i] + w <= MinWeightDep[i] and Volume_i[i] + v <= MinVolDep[i]
# Add
condition2 = Weight_k[k] + w <= Theta[k] and Volume_k[k] + v <= Phi[k]
if condition1 and condition2:
# if DEM_i[s][p] == 0:
#print('Warning: s = %s and p = %s' % (s,p))
fp = F_p[p]
h_opt[p,i,s] = DEM_i[s][p]
# PATCH FOR MAKING PRODUCTS FROM FIRMS WITH VEHICLES APPEAR
if fp not in Fw:
m_opt[p,s,k] = 0
else:
m_opt[p,s,k] = DEM_i[s][p]
Weight_k[k] = Weight_k[k] + w
Volume_k[k] = Volume_k[k] + v
if i in D:
DEM_i[i][p] = DEM_i[i][p] + DEM_i[s][p]
Weight_i[i] = Weight_i[i] + w
Volume_i[i] = Volume_i[i] + v
if fp in Fw and k in Vd:
if (p,fp,k) in x_opt.keys():
x_opt[p,fp,k] = x_opt[p,fp,k] + DEM_i[s][p]
else:
x_opt[p,fp,k] = DEM_i[s][p]
if k not in usedK:
usedK.append(k)
y_opt[k] = 1
df_sc['servcost'] = df_sc['servcost'] - delta[k]
DEM_i[s][p] = 0
df_sc = df_sc[1:]
df_sc = df_sc[~((df_sc['satellite'] == s) & (df_sc['product'] == p))]
else:
df_sc = df_sc[1:]
wm = df_sc['weight'].min()
vm = df_sc['volume'].min()
if i in D:
if Weight_i[i] + wm > MinWeightDep[i] or Volume_i[i] + vm > MinVolDep[i]:
df_sc = df_sc[df_sc['depot'] != i]
if Weight_k[k] + wm > Theta[k] or Volume_k[k] + vm > Phi[k]:
df_sc = df_sc[df_sc['vehicle'] != k]
# Delete customer from set (becasue it was assigned)
if sum([DEM_i[s][p_] for p_ in P]) < 1:
df_sc = df_sc[df_sc['satellite'] != s]
# Reorder by servingcost
df_sc = df_sc.sort_values(by='servcost', ascending = True).reset_index(drop=True)
cdv = {}
cdw = {}
for i in DcupS:
cdv[i] = Lambd[i] - Volume_i[i]
cdw[i] = Omega[i] - Weight_i[i]
return m_opt, u_opt, x_opt, y_opt, z_opt, cdv, cdw
def ExecuteMultiEchelon(data, filename = None, preplots = False):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
epsil = data['epsil']
r = data['r']
N = F+D+S+C
dictclass = {}
for f in F:
dictclass[f] = 'F'
for d in D:
dictclass[d] = 'D'
for s in S:
dictclass[s] = 'S'
for c in C:
dictclass[c] = 'C'
m_opt, u_opt, x_opt, y_opt, z_opt, cdv, cdw = Steps1To3(data)
solution = MultiEchelonRouting(data, x_opt, y_opt, m_opt, z_opt, u_opt)
# Unpack data from solution
q_final = solution['q_final']
qe_final = solution['qe_final']
v_final = solution['v_final']
w_final = solution['w_final']
y_final = {}
for k in K:
try:
y_final[k] = min(sum([w_final[i,j,k] for i in N for j in N]), 1)
except:
y_final[k] = 0
DictRoutes = solution['DictRoutes']
DictRoutesList = solution['DictRoutesList']
DictNodes = solution['DictNodes']
Theta_ = solution['Theta_']
Phi_ = solution['Phi_']
Q0_ = solution['Q0_']
DEMS_ = solution['DEMS_']
"""RETRIEVE ORIGINAL OBJECTIVE FUNCTION VALUE"""
# Aux
m_final = {}
# Patch m_final:
for i in N:
for j in N:
for k in K:
m_final[i,j,k] = m_opt.get((i,j,k),0)
# Patch w_final, q final, v_final:
for i in N:
for j in N:
for k in K:
if (i,j,k) not in q_final:
q_final[i,j,k] = 0
if (i,j,k) not in w_final:
w_final[i,j,k] = 0
if (i,j,k) not in v_final:
v_final[i,j,k] = 0
if preplots:
AuxSubPlot(data, w_final, figsize = (5,5), save = True, filename = filename)
u_final = {}
for s in S:
u_final[s] = u_opt.get(s,0)
# Cost of Satellites
SatCost = sum([u_final[s]*epsil[s] for s in S])
VehicCost = sum([y_final[k]*delta[k] for k in K])
ArcCost = sum([w_final[i,j,k]*r[i,j]*rho[k] for i in N for j in N for k in K])
FreightCost = sum([gamma[k]*qe_final[i,j,k]*r[i,j] for i in N for j in N for k in K])
Opt = SatCost + VehicCost + ArcCost + FreightCost
depots = {}
for i in D+S:
for k in V_i[i]:
depots[k] = i
##print('LOOP: START!')
# WORKING HERE
CurrentOpt = Opt
Vd = [item for sublist in [V_i[d] for d in D] for item in sublist]
n_iters = 3
iters = 1
tries = 1
while iters <= n_iters and tries < 2:
##print('Iter: %s, Try: %s' % (iters,tries))
RCVd = GetMaxRoutingCosts(N, Vd, depots, DictNodes, r, gamma, w_final, q_final)
# ADD FUNCTION FOR SORT RCVd dictionary by value
##print('PERMUTACIONES KD A KS')
solution_swapkdks = ImproveOptimalSwapKdKs(RCVd,
data,
cdv,
cdw,
DictRoutes,
DictRoutesList,
DictNodes,
DEMS_,
Q0_,
q_final,
qe_final,
v_final,
w_final,
Phi_,
Theta_,
depots)
# Unpack data
DictRoutes = solution_swapkdks['DictRoutes']
DictNodes = solution_swapkdks['DictNodes']
DEMS_ = solution_swapkdks['DEMS_']
Q0_ = solution_swapkdks['Q0_']
q_final = solution_swapkdks['q_final']
v_final = solution_swapkdks['v_final']
w_final = solution_swapkdks['w_final']
Phi_ = solution_swapkdks['Phi_']
Theta_ = solution_swapkdks['Theta_']
cdv = solution_swapkdks['cdv']
cdw = solution_swapkdks['cdw']
banlist = solution_swapkdks['banlist']
# Patch w_final, q final, v_final:
for i in N:
for j in N:
for k in K:
if (i,j,k) not in q_final:
q_final[i,j,k] = 0
if (i,j,k) not in w_final:
w_final[i,j,k] = 0
if (i,j,k) not in v_final:
v_final[i,j,k] = 0
# Get Dictionary with this structure: key = satellite, value = vehicle from D that visits that satellite
KminV_is = {}
for s in S:
KminV_is[s] = [k for k in K if k not in V_i[s]]
Sserv1 = [s for s in S if sum([w_final[s,j,k] for j in N for k in KminV_is[s]]) == 1]
DictSatKd = {}
for kd in Vd:
if DictNodes[kd]['S']:
for s in DictNodes[kd]['S']:
if s in Sserv1:
DictSatKd[s] = kd
Vs1 = [item for sublist in [V_i[s] for s in DictSatKd.keys()] for item in sublist]
RCVs = GetMaxRoutingCosts(N, Vs1, depots, DictNodes, r, gamma, w_final, q_final)
solution_swapkskd = ImproveOptimalSwapKsKd(RCVs,
data,
banlist,
DictSatKd,
cdv,
cdw,
DictRoutes,
DictRoutesList,
DictNodes,
DEMS_,
Q0_,
q_final,
v_final,
w_final,
Phi_,
Theta_,
depots)
DictRoutes = solution_swapkskd['DictRoutes']
DictNodes = solution_swapkskd['DictNodes']
DEMS_ = solution_swapkskd['DEMS_']
Q0_ = solution_swapkskd['Q0_']
q_final = solution_swapkskd['q_final']
v_final = solution_swapkskd['v_final']
w_final = solution_swapkskd['w_final']
Phi_ = solution_swapkskd['Phi_']
Theta_ = solution_swapkskd['Theta_']
cdv = solution_swapkskd['cdv']
cdw = solution_swapkskd['cdw']
banlist = solution_swapkskd['banlist']
# Patch w_final, q final, v_final:
for i in N:
for j in N:
for k in K:
if (i,j,k) not in q_final:
q_final[i,j,k] = 0
if (i,j,k) not in w_final:
w_final[i,j,k] = 0
if (i,j,k) not in v_final:
v_final[i,j,k] = 0
for s in S:
if sum(w_final[s,j,k] for j in N for k in V_i[s]) < 1:
u_final[s] = 0
else:
u_final[s] = 1
for k in K:
y_final[k] = max(min(sum([w_final[i,j,k] for i in N for j in N]),1),0)
SatCost = sum([u_final[s]*epsil[s] for s in S])
VehicCost = sum([y_final[k]*delta[k] for k in K])
ArcCost = sum([w_final[i,j,k]*r[i,j]*rho[k] for i in N for j in N for k in K])
FreightCost = sum([gamma[k]*q_final[i,j,k]*r[i,j] for i in N for j in N for k in K])
Opt = SatCost + VehicCost + ArcCost + FreightCost
### STEP FOR VEHICLES FROM FIRMS ###
iters = iters + 1
if Opt < CurrentOpt:
##print('####################### REPORT FOR ITER %s #######################' % iters)
##print('Number of satellites open: %s at cost %s' % (sum([u_final[s] for s in S]), SatCost))
##print('Number of vehicles used: %s at cost %s' % (sum([y_final[k] for k in K]), VehicCost))
##print('Arc cost: %s' % ArcCost)
##print('Freight cost: %s' % FreightCost)
##print('Optimal value for original O.F: %s' % Opt)
CurrentOpt = Opt
tries = 1
else:
tries = tries + 1
##print('####################### FINAL REPORT #######################')
for k in K:
y_final[k] = max(min(sum([w_final[i,j,k] for i in N for j in N]),1),0)
SatCost = sum([u_final[s]*epsil[s] for s in S])
VehicCost = sum([y_final[k]*delta[k] for k in K])
ArcCost = sum([w_final[i,j,k]*r[i,j]*rho[k] for i in N for j in N for k in K])
FreightCost = sum([gamma[k]*qe_final[i,j,k]*r[i,j] for i in N for j in N for k in K])
#print('Number of satellites open: %s at cost %s' % (sum([u_final[s] for s in S]), SatCost))
#print('Number of vehicles used: %s at cost %s' % (sum([y_final[k] for k in K]), VehicCost))
#print('Arc cost: %s' % ArcCost)
#print('Freight cost: %s' % FreightCost)
Opt = SatCost + VehicCost + ArcCost + FreightCost
#print('Optimal value for original O.F: %s' % Opt)
return q_final, w_final, u_final, y_final, DictRoutes, Opt
"""
FUNCTIONS FOR PLOTTING
"""
def PlotNodes(XY, F, D, S, C, figsize = (20,20)):
fig, ax = plt.subplots(figsize= figsize)
plt.scatter(XY[F,0],XY[F,1],color='red', label= 'Goods')
plt.scatter(XY[D,0], XY[D,1],color='blue', label = 'Delivery')
plt.scatter(XY[S,0], XY[S,1],color='green', label = 'Satellites')
plt.scatter(XY[C,0], XY[C,1],color='brown', label = 'Clients')
for i in S:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in C:
ax.annotate(i, (XY[i,0], XY[i,1]))
return fig, ax
def PlotAssignsSatCli(XY,F, D, S, C, model, figsize = (20,20)):
l, u = model.__data
N = F+D+S+C
DcupS = D + S
FcupD = D + S
NminC = F + D + S
l_opt = model.getAttr('x', l)
u_opt = model.getAttr('x', u)
colors = {}
for s in NminC:
colors[s] = tuple(np.random.rand(3))
dictveh = {}
fig, ax = plt.subplots(figsize = figsize)
S_op = []
for s in S:
if u_opt[s] > 0:
S_op.append(s)
##print(S_op)
plt.scatter(XY[F,0],XY[F,1],color='red', label= 'Goods')
plt.scatter(XY[D,0], XY[D,1],color='blue', label = 'Delivery')
plt.scatter(XY[S_op,0], XY[S_op,1],color='green', label = 'Satellites')
plt.scatter(XY[C,0], XY[C,1],color='brown', label = 'Clients')
for s in NminC:
flag_v = True
for c in C:
if l_opt[s,c] > 0:
x1, x2 = XY[s,0], XY[c,0]
y1, y2 = XY[s,1], XY[c,1]
plt.plot([x1,x2],[y1,y2],
color = colors[s],
linestyle = 'dashed',
label = 'Satelite %s' % s if flag_v else "")
flag_v = False
for i in F:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in D:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in S:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in C:
ax.annotate(i, (XY[i,0], XY[i,1]))
plt.legend()
plt.show()
def PlotAssignsVehCli(XY,F, D, S, C, V_i, model, figsize = (20,20)):
y, z = model.__data
Vs = [item for sublist in [V_i[s] for s in S] for item in sublist]
Vd = [item for sublist in [V_i[d] for d in D] for item in sublist]
Vf = [item for sublist in [V_i[f] for f in F] for item in sublist]
VdcupVs = Vd + Vs
VfcupVs = Vf + Vd
K = Vf + Vd + Vs
N = F+D+S+C
DcupS = D + S
FcupD = D + S
NminC = F + D + S
z_opt = model.getAttr('x', z)
colors = {}
for s in NminC:
for k in V_i[s]:
colors[k] = tuple(np.random.rand(3))
dictveh = {}
for i in NminC:
for k in V_i[i]:
dictveh[k] = i
fig, ax = plt.subplots(figsize = figsize)
plt.scatter(XY[F,0],XY[F,1],color='red', label= 'Goods')
plt.scatter(XY[D,0], XY[D,1],color='blue', label = 'Delivery')
plt.scatter(XY[S,0], XY[S,1],color='green', label = 'Satellites')
plt.scatter(XY[C,0], XY[C,1],color='brown', label = 'Clients')
for k in K:
flag_v = True
for c in C:
try:
if z_opt[k,c] > 0:
s = dictveh[k]
x1, x2 = XY[s,0], XY[c,0]
y1, y2 = XY[s,1], XY[c,1]
plt.plot([x1,x2],[y1,y2],
color = colors[k],
linestyle = 'dashed',
label = 'Vehicle %s (%s)' % (k, dictveh[k]) if flag_v else "")
flag_v = False
except:
pass
for i in F:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in D:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in S:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in C:
ax.annotate(i, (XY[i,0], XY[i,1]))
plt.legend()
plt.show()
def AuxSubPlot(data, w_opt, figsize = (20,20), save = False, filename = 'test'):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
r = data['r']
X,Y = XY[:,0], XY[:,1]
label = ['Goods' for f in F] + ['Delivery' for d in D] + ['Satellite' for s in S] + ['Clients' for c in C]
n_label = [0 for f in F] + [1 for d in D] + [2 for s in S] + [3 for c in C]
colors_xy = ['red','blue','green','brown']
N = F + D + S + C
NminC = F + D + S
dictveh = {}
for i in NminC:
for k in V_i[i]:
dictveh[k] = i
K = [item for sublist in [V_i[i] for i in NminC] for item in sublist]
cmapp = cm.get_cmap('viridis', len(K))
colors = {}
for k in K:
if k % 2 == 0:
colors[k] = cmapp(k)
else:
colors[k] = cmapp(K[::-1][k])
plt.figure(figsize=figsize)
plt.scatter(X[F], Y[F], label = 'Firms', color = 'red')
plt.scatter(X[D], Y[D], label = 'Delivery', color = 'blue')
plt.scatter(X[S], Y[S], label = 'Satellites', color = 'green')
plt.scatter(X[C], Y[C], label = 'Clients', color = 'brown')
for i in range(XY.shape[0]):
x = X[i]
y = Y[i]
plt.text(x+0.3, y+0.3, i, fontsize=9)
for f in F:
for k in V_i[f]:
for i in N:
for j in N:
key = (i,j,k)
if key in w_opt:
if w_opt[key] > 0:
x1, x2 = XY[i,0], XY[j,0]
y1, y2 = XY[i,1], XY[j,1]
plt.plot([x1,x2],[y1,y2],
color = colors[k],
linestyle = 'dashed',
label = 'Vehicle %s (%s)' % (k, dictveh[k]) if i == dictveh[k] else "")
plt.legend()
plt.title('Vehicles from Firms')
if save:
plt.tight_layout()
plt.savefig('%s-firms.png' % filename, dpi = 250)
plt.figure(figsize=figsize)
plt.scatter(X[F], Y[F], label = 'Firms', color = 'red')
plt.scatter(X[D], Y[D], label = 'Delivery', color = 'blue')
plt.scatter(X[S], Y[S], label = 'Satellites', color = 'green')
plt.scatter(X[C], Y[C], label = 'Clients', color = 'brown')
for i in range(XY.shape[0]):
x = X[i]
y = Y[i]
plt.text(x+0.3, y+0.3, i, fontsize=9)
for d in D:
for k in V_i[d]:
for i in N:
for j in N:
key = (i,j,k)
if key in w_opt:
if w_opt[key] > 0:
x1, x2 = XY[i,0], XY[j,0]
y1, y2 = XY[i,1], XY[j,1]
plt.plot([x1,x2],[y1,y2],
color = colors[k],
linestyle = 'dashed',
label = 'Vehicle %s (%s)' % (k, dictveh[k]) if i == dictveh[k] else "")
plt.legend()
plt.title('Vehicles from Delivery')
if save:
plt.tight_layout()
plt.savefig('%s-delivery.png' % filename, dpi = 250)
plt.figure(figsize=figsize)
plt.scatter(X[F], Y[F], label = 'Firms', color = 'red')
plt.scatter(X[D], Y[D], label = 'Delivery', color = 'blue')
plt.scatter(X[S], Y[S], label = 'Satellites', color = 'green')
plt.scatter(X[C], Y[C], label = 'Clients', color = 'brown')
for i in range(XY.shape[0]):
x = X[i]
y = Y[i]
plt.text(x+0.3, y+0.3, i, fontsize=9)
for s in S:
for k in V_i[s]:
for i in N:
for j in N:
key = (i,j,k)
if key in w_opt:
if w_opt[key] > 0:
x1, x2 = XY[i,0], XY[j,0]
y1, y2 = XY[i,1], XY[j,1]
plt.plot([x1,x2],[y1,y2],
color = colors[k],
linestyle = 'dashed',
label = 'Vehicle %s (%s)' % (k, dictveh[k]) if i == dictveh[k] else "")
plt.legend()
plt.title('Vehicles from Satellite')
if save:
plt.tight_layout()
plt.savefig('%s-sat.png' % filename, dpi = 250)
plt.show()
def RecoverOriginalqValues(data, DictRoutes):
DEM = data['DEM']
V_i = data['V_i']
S = data['S']
D = data['D']
F = data['F']
P = data['P']
P_f = data['P_f']
q_subp = {}
# vehicles from satellites
for s in S:
DEM[s] = np.zeros(len(P), dtype=int)
for k in V_i[s]:
if k in DictRoutes.keys():
if DictRoutes[k]:
# Cummulative demand for vehicle
sumdemand = np.zeros(len(P), dtype=int)
# Last node visited
for t in DictRoutes[k][::-1]:
i,j = t
if j != s:
for p in P:
sumdemand[p] = sumdemand[p] + DEM[j][p]
q_subp[p,i,j,k] = sumdemand[p]
DEM[s] = DEM[s] + sumdemand
DEM[s] = list(DEM[s])
# vehicles from delivery
for d in D:
DEM[d] = np.zeros(len(P), dtype=int)
for k in V_i[d]:
if k in DictRoutes.keys():
if DictRoutes[k]:
# Cummulative demand for vehicle
sumdemand = np.zeros(len(P), dtype=int)
# Last node visited
# HERE I NEED TO DELETE THE FREIGHTS FROM PRODUCTS FROM FIRMS WITH VEHICLES
for t in DictRoutes[k][::-1]:
i,j = t
if j != d:
if j not in F:
for p in P:
sumdemand[p] = sumdemand[p] + DEM[j][p]
q_subp[p,i,j,k] = sumdemand[p]
else:
PminPf = [p for p in P if p not in P_f[j]]
for p in P_f[j]:
q_subp[p,i,j,k] = 0
aux = max([value for key, value in q_subp.items() if key[0] == p and key[3] == k])
sumdemand[p] = sumdemand[p] - aux
for p in PminPf:
aux = max([value for key, value in q_subp.items() if key[0] == p and key[3] == k])
q_subp[p,i,j,k] = aux
DEM[d] = DEM[d] + sumdemand
DEM[d] = list(DEM[d])
# vehicles from firms
# for f in F:
# for k in V_i[f]:
# if k in DictRoutes.keys():
# if DictRoutes[k]:
# # Cummulative demand for vehicle
# sumdemand = np.zeros(len(P), dtype=int)
# # Last node visited
# for t in DictRoutes[k][::-1]:
# i,j = t
# if j != f:
# for p in P:
# sumdemand[p] = sumdemand[p] + DEM[j][p]
# q_subp[p,i,j,k] = sumdemand[p]
# for p in P
return q_subp
def SaveSolHeuristic(data, file, dt, soldir, q_final, w_final, u_final, y_final, DictRoutes, Opt):
#Create Excell Writter
writer = pd.ExcelWriter(os.path.join(soldir, file), engine='xlsxwriter')
# Save solutions: q
q_final = RecoverOriginalqValues(data, DictRoutes)
dfq = []
for key, value in dict(q_final).items():
if value > 0:
# #print(key, value)
dfq.append([*key, value])
dfq = pd.DataFrame(data=dfq, columns=['p', 'i', 'j', 'k', 'q_final'])
dfq.to_excel(writer, index=False, sheet_name = "q")
# Save solutions: w
dfw = []
for key, value in dict(w_final).items():
if value > 0:
dfw.append([*key, value])
dfw = pd.DataFrame(data=dfw, columns=['i', 'j', 'k', 'w_final'])
dfw.to_excel(writer, index=False, sheet_name="w")
# Save solutions: u
dfu = []
for key, value in dict(u_final).items():
if value > 0:
dfu.append([key, value])
dfu = | pd.DataFrame(data=dfu, columns=['s', 'u_final']) | pandas.DataFrame |
#!/usr/bin/env python3
""" Refresh data files for the COVID-19 MTL dashboard """
import datetime as dt
# import logging
import io
import json
import os
import shutil
import sys
from argparse import ArgumentParser
from datetime import datetime, timedelta
from pathlib import Path
import dateparser
import numpy as np
import pandas as pd
import pytz
import requests
from bs4 import BeautifulSoup
pd.options.mode.chained_assignment = None
DATA_DIR = os.path.join(os.path.dirname(__file__), 'app', 'data')
NB_RETRIES = 3
TIMEZONE = pytz.timezone('America/Montreal')
# Data sources mapping
# {filename: url}
# Montreal
SOURCES_MTL = {
# HTML
'data_mtl.html':
'https://santemontreal.qc.ca/en/public/coronavirus-covid-19/situation-of-the-coronavirus-covid-19-in-montreal',
# CSV (Note: ";" separated; Encoding: windows-1252/cp1252)
'data_mtl_ciuss.csv':
'https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/ciusss.csv',
'data_mtl_municipal.csv':
'https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/municipal.csv',
'data_mtl_age.csv':
'https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/grage.csv',
'data_mtl_sex.csv':
'https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/sexe.csv',
'data_mtl_new_cases.csv':
'https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/courbe.csv',
# updated once a week on Tuesdays
'data_mtl_vaccination_by_age.json':
'https://services5.arcgis.com/pBN1lh7yaF4K7Tod/arcgis/rest/services/VAXparGrpAGE_CSVuploadv3_ADEQ/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*', # noqa: E501
}
# INSPQ
SOURCES_INSPQ = {
# HTML
'INSPQ_main.html': 'https://www.inspq.qc.ca/covid-19/donnees',
'INSPQ_region.html': 'https://www.inspq.qc.ca/covid-19/donnees/regions',
'INSPQ_par_region.html': 'https://www.inspq.qc.ca/covid-19/donnees/par-region',
# CSV (Note: "," separated; Encoding: UTF-8)
'data_qc.csv': 'https://www.inspq.qc.ca/sites/default/files/covid/donnees/covid19-hist.csv',
'data_qc_regions.csv': 'https://www.inspq.qc.ca/sites/default/files/covid/donnees/regions.csv',
'data_qc_manual_data.csv': 'https://www.inspq.qc.ca/sites/default/files/covid/donnees/manual-data.csv',
'data_qc_cases_by_network.csv': 'https://www.inspq.qc.ca/sites/default/files/covid/donnees/tableau-rls-new.csv',
'data_qc_death_loc_by_region.csv': 'https://www.inspq.qc.ca/sites/default/files/covid/donnees/tableau-rpa-new.csv',
'data_qc_vaccination.csv': 'https://www.inspq.qc.ca/sites/default/files/covid/donnees/vaccination.csv',
'data_qc_variants.csv': 'https://www.inspq.qc.ca/sites/default/files/covid/donnees/variants-cumul.csv',
# updated once a week on Tuesdays
'data_qc_preconditions.csv': 'https://www.inspq.qc.ca/sites/default/files/covid/donnees/comorbidite.csv',
}
# Quebec.ca/coronavirus
SOURCES_QC = {
# HTML
'QC_situation.html':
'https://www.quebec.ca/en/health/health-issues/a-z/2019-coronavirus/situation-coronavirus-in-quebec/',
'QC_vaccination.html':
'https://www.quebec.ca/en/health/health-issues/a-z/2019-coronavirus/situation-coronavirus-in-quebec/covid-19-vaccination-data/', # noqa: E501
# CSV
'data_qc_outbreaks.csv':
'https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/eclosions-par-milieu.csv', # noqa: E501
'data_qc_vaccines_by_region.csv':
'https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/auto/COVID19_Qc_Vaccination_RegionAdministration.csv', # noqa: E501
'data_qc_vaccines_received.csv':
'https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/doses-vaccins-7jours.csv', # noqa: E501
'data_qc_vaccines_situation.csv':
'https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/situation-vaccination.csv', # noqa: E501
# CSVs retired on July 12th (might be added again in the future)
# 'data_qc_7days.csv':
# 'https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/synthese-7jours.csv',
# 'data_qc_cases_by_region.csv':
# 'https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/cas-region.csv', # noqa: E501
'data_qc_vaccination_by_age.csv':
'https://msss.gouv.qc.ca/professionnels/statistiques/documents/covid19/COVID19_Qc_Vaccination_CatAge.csv',
}
def fetch(url):
"""Get the data at `url`. Our data sources are unreliable, so we retry a few times.
Parameters
----------
url : str
URL of data to fetch (csv or html file).
Returns
-------
str
utf-8 or cp1252 decoded string.
Raises
------
RuntimeError
Failed to retrieve data from URL.
"""
# add "random" string to url to prevent server-side caching
unix_time = datetime.now().strftime('%s')
query_param_separator = '&' if '?' in url else '?'
url = f'{url}{query_param_separator}{unix_time}'
for _ in range(NB_RETRIES):
resp = requests.get(url)
if resp.status_code != 200:
continue
# try to decode with utf-8 first,
# otherwise assume windows-1252
try:
return resp.content.decode('utf-8')
except UnicodeDecodeError:
return resp.content.decode('cp1252')
raise RuntimeError('Failed to retrieve {}'.format(url))
def save_datafile(filename, data):
"""Save `data` to `filename`
Parameters
----------
filename : str
Absolute path of file where data is to be saved.
data : str
Data to be saved
"""
with open(filename, 'w', encoding='utf-8') as f:
f.write(data)
# logging.info('Saved a new version of {}'.format(filename))
# def init_logging(args):
# format = '%(asctime)s:%(levelname)s'
# level = logging.WARNING
# if args.verbose:
# level = logging.INFO
# if args.debug:
# format += ':%(name)s'
# level = logging.DEBUG
# format += ':%(message)s'
# logging.basicConfig(filename=args.log_file, format=format, level=level)
def backup_processed_dir(processed_dir, processed_backups_dir):
"""Copy all files from data/processed to data/processed_backups/YYYY-MM-DD{_v#}
Parameters
----------
processed_dir : dict
Absolute path of dir that contains processed files to backup.
processed_backups_dir : str
Absolute path of dir in which to save the backup.
"""
date_tag = datetime.now(tz=TIMEZONE).date().isoformat()
# make backup dir
current_bkp_dir = os.path.join(processed_backups_dir, date_tag)
i = 1
while os.path.isdir(current_bkp_dir):
i += 1
current_bkp_dirname = date_tag + '_v' + str(i)
current_bkp_dir = os.path.join(processed_backups_dir, current_bkp_dirname)
else:
os.mkdir(current_bkp_dir)
# Copy all files from data/processed to data/processed_backups/YYYY-MM-DD{_v#}
for file in os.listdir(processed_dir):
file_path = os.path.join(processed_dir, file)
shutil.copy(file_path, current_bkp_dir)
def download_source_files(sources, sources_dir, version=True):
"""Download files from URL
Downloaded files will be downloaded into data/sources/YYYY-MM-DD{_v#}/
Parameters
----------
sources : dict
dict in the format {filename:source} where source is a URL and filename is the name of
the file in which to save the downloaded data.
sources_dir : str
Absolute path of dir in which to save downloaded files.
version : bool
True, if source directories should be versioned if sources for the same date already exist, False otherwise.
"""
# create data/sources/YYYY-MM-DD{_v#}/ dir, use previous day date (data is reported for previous day)
yesterday_date = datetime.now(tz=TIMEZONE) - timedelta(days=1)
date_tag = yesterday_date.date().isoformat()
current_sources_dir = Path(sources_dir, date_tag)
if version:
i = 1
while current_sources_dir.is_dir():
i += 1
current_sources_dirname = date_tag + '_v' + str(i)
current_sources_dir = current_sources_dir.parent.joinpath(current_sources_dirname)
current_sources_dir.mkdir(exist_ok=True)
# Download all source data files to sources dir
for file, url in sources.items():
data = fetch(url)
fq_path = current_sources_dir.joinpath(file)
if not fq_path.exists():
save_datafile(fq_path, data)
else:
raise TypeError(f'{fq_path} already exists')
def get_latest_source_dir(sources_dir):
"""Get the latest source dir in data/sources.
Parameters
----------
sources_dir : str
Absolute path of sources dir.
Returns
-------
str
Name of latest source dir (e.g. 2020-06-01_v2) in data/sources/
"""
source_dirs = os.listdir(sources_dir)
source_dirs.sort()
latest_source_dir = source_dirs[-1]
return latest_source_dir
def get_source_dir_for_date(sources_dir, date):
"""Get the last source dir in data/sources for the given date.
Parameters
----------
sources_dir : str
Absolute path of sources dir.
date : str
ISO-8601 formatted date string.
Returns
-------
str
Name of latest source dir (e.g. 2020-06-01_v2) in data/sources/
"""
# get all directories with starting with the date
source_dirs = [directory for directory in Path(sources_dir).glob(f'{date}*/')]
# take the last one from the sorted list
latest_source_dir = sorted(source_dirs)[-1]
return latest_source_dir.name
def is_new_inspq_data_available(expected_date: dt.date):
"""Returns whether new date provided by INSPQ is available.
Data is available if the data's last date is equal the given expected date.
Parameters
----------
expected_date : date
The date that new data needs to have to be considered new.
Returns
----------
bool
True, if new data is available, False otherwise
"""
# check the manual CSV with the date provided in it (used in the boxes on their website)
content = fetch(SOURCES_INSPQ.get('data_qc_manual_data.csv'))
# directly load file from the web
df = pd.read_csv(io.StringIO(content))
# get cell with date
date_string = df.iloc[1, 6]
# remove superscript tags (1<sup>er</sup> février)
date_string = date_string.replace('<sup>', '').replace('</sup>', '')
csv_date = dateparser.parse(date_string).date() # type: ignore[union-attr]
# since 2021-01-29 the date the data was updated is provided
updated_date = expected_date + timedelta(days=1)
# in addition, verify the date of the historic QC data in the main CSV
content = fetch(SOURCES_INSPQ.get('data_qc.csv'))
df = pd.read_csv(io.StringIO(content), usecols=[0])
# get last cell with date
date_string = df.iloc[-1, 0]
csv_date2 = dateparser.parse(date_string).date() # type: ignore[union-attr]
# also check the vaccination data
# only updated Mon-Fri
# content = fetch(SOURCES_INSPQ.get('data_qc_vaccination.csv'))
# df = pd.read_csv(io.StringIO(content))
# get last cell with date
# date_string = df.iloc[-1, 0]
# csv_date3 = dateparser.parse(date_string).date() # type: ignore[union-attr]
return csv_date == updated_date and csv_date2 == expected_date
def is_new_qc_data_available(expected_date: dt.date):
"""Returns whether new date provided by Quebec (quebec.ca/coronavirus) is available.
Data is available if the data's last date is equal the given expected date.
Parameters
----------
expected_date : date
The date that new data needs to have to be considered new.
Returns
----------
bool
True, if new data is available, False otherwise
"""
# CSV retired on July 12th (might be added again in the future)
# check the 7 day overview CSV
# content = fetch(SOURCES_QC.get('data_qc_7days.csv'))
# directly load file from the web
# df = pd.read_csv(io.StringIO(content), header=None, sep=';')
# remove NaN line at the end
# df.dropna(how='all', inplace=True)
# get cell with date
# date_string = df.iloc[-1, 0]
# csv_date = dateparser.parse(date_string).date() # type: ignore[union-attr]
# check vaccine doses received CSV to ensure vaccination CSVs have been updated
content = fetch(SOURCES_QC.get('data_qc_vaccines_received.csv'))
df = pd.read_csv(io.StringIO(content), sep=';')
# get cell with date
date_string = df.iloc[0, 0]
csv_date2 = dateparser.parse(date_string).date() # type: ignore[union-attr]
# check vaccination situation CSV
content = fetch(SOURCES_QC.get('data_qc_vaccines_situation.csv'))
df = pd.read_csv(io.StringIO(content), sep=';', header=None)
# get cell with date
date_string = df.iloc[0, 1]
csv_date3 = dateparser.parse(date_string).date() # type: ignore[union-attr]
# additionally check the date provided on the website under the last table
content = fetch(SOURCES_QC.get('QC_situation.html'))
soup: BeautifulSoup = BeautifulSoup(content, 'lxml')
date_elements = [element for element in soup.select('div.ce-textpic div.ce-bodytext p')
if 'Source: ' in element.text]
# expected format of last element: "Source: TSP, MSSS (Updated on January 11, 2021 at 4 p.m.)"
date_text = date_elements[-1].contents[0]
date_text = date_text.split('Updated on')[-1].split(')')[0]
html_date = dateparser.parse(date_text).date() # type: ignore[union-attr]
# ensure vaccination data (what we are actually interested in is available)
content = fetch(SOURCES_QC.get('QC_vaccination.html'))
soup = BeautifulSoup(content, 'lxml')
date_elements = [element for element in soup.select('div.ce-textpic div.ce-bodytext p')
if 'Source: ' in element.text]
# expected format: "Source: CIUSSSCN-CIUSSSCOMTL-MSSS, January 13, 2021, 11 a.m."
date_text = date_elements[0].contents[0]
date_text = date_text.split(', ', 1)[-1]
vacc_date = dateparser.parse(date_text).date() # type: ignore[union-attr]
# the vaccination data update is provided the day of (not yesterday)
vacc_expected_date = expected_date + timedelta(days=1)
# return csv_date == expected_date \
return csv_date2 == expected_date \
and csv_date3 == vacc_expected_date \
and html_date == expected_date \
and vacc_date == vacc_expected_date
def is_new_mtl_data_available(expected_date: dt.date):
"""Returns whether new date provided by Sante Montreal is available.
Data is available if the data's last date is equal the given expected date.
Parameters
----------
expected_date : date
The date that new data needs to have to be considered new.
Returns
----------
bool
True, if new data is available, False otherwise
"""
# check the date on the website that is provided under the last table
content = fetch(SOURCES_MTL.get('data_mtl.html'))
soup: BeautifulSoup = BeautifulSoup(content, 'lxml')
date_elements = [element for element in soup.select('div.csc-textpic-text p.bodytext')
if 'extracted on' in element.text]
# check the last date element to ensure all data has been updated
date_text = date_elements[-1].contents[0]
date_text = date_text.split('extracted on ')[-1]
html_date = dateparser.parse(date_text).date() # type: ignore[union-attr]
# get new cases reported on page
top_table = soup.select('div.csc-textpic-text table.contenttable')[0]
new_cases = top_table.select('td h3')[1].text[1:]
# convert new cases to int
new_cases = int(new_cases.replace(' ', ''))
# get new cases from municipal CSV
content = fetch(SOURCES_MTL.get('data_mtl_municipal.csv'))
df = pd.read_csv(io.StringIO(content), sep=';', na_values='')
csv_new_cases = int(df.dropna(how='all').iloc[-1, 1])
content = fetch(SOURCES_MTL.get('data_mtl_age.csv'))
df = pd.read_csv(io.StringIO(content), sep=';', na_values='')
csv_new_cases2 = int(df.dropna(how='all').iloc[-1, 1])
return html_date == expected_date and new_cases == csv_new_cases and new_cases == csv_new_cases2
def load_data_qc_csv(source_file):
"""Returns pandas DataFrame with data from QC CSV.
Performs simple cleaning and renaming of columns.
Parameters
----------
source_file : str
Absolute path of source file.
"""
qc_df = pd.read_csv(source_file, encoding='utf-8', na_values=['', ' . ', ' .', '.'])
# cut off first rows with 'Date inconnue'
qc_df = qc_df[qc_df['Date'] != 'Date inconnue']
# sometimes extra rows for INC, Inconnue show up with ' .' or '.' in the column 'cas_cum_tot_n'
# considering the occurring string as NaN and drop those rows (otherwise cannot be converted to int later)
qc_df.dropna(subset=['cas_cum_tot_n'], inplace=True)
column_mappings = {
'Date': 'date',
# 'cas_cum_lab_n': '',
# 'cas_cum_epi_n': '',
'cas_cum_tot_n': 'cases',
# 'cas_cum_tot_t': '',
# 'cas_quo_tot_t': '',
# 'cas_quo_lab_n': '',
# 'cas_quo_epi_n': '',
'cas_quo_tot_n': 'new_cases',
'act_cum_tot_n': 'active_cases',
# 'act_cum_tot_t': '',
# 'cas_quo_tot_m': '',
# 'cas_quo_tot_tm': '',
'ret_cum_tot_n': 'recovered',
'ret_quo_tot_n': 'new_recovered',
'dec_cum_tot_n': 'deaths',
# 'dec_cum_tot_t': '',
# 'dec_quo_tot_t': '',
'dec_cum_chs_n': 'deaths_chsld',
'dec_cum_rpa_n': 'deaths_psr',
'dec_cum_dom_n': 'deaths_home',
'dec_cum_aut_n': 'deaths_other',
'dec_quo_tot_n': 'new_deaths',
# 'dec_quo_chs_n': '',
# 'dec_quo_rpa_n': '',
# 'dec_quo_dom_n': '',
# 'dec_quo_aut_n': '',
# 'dec_quo_tot_m': '',
# 'dec_quo_tot_tm': '',
# 'hos_cum_reg_n': '',
# 'hos_cum_si_n': '',
# 'hos_cum_tot_n': '',
# 'hos_cum_tot_t': '',
# 'hos_quo_tot_t': '',
# 'hos_quo_reg_n': '',
# 'hos_quo_si_n': '',
# 'hos_quo_tot_n': '',
# 'hos_quo_tot_m': '',
# 'psi_cum_tes_n': '',
# 'psi_cum_pos_n': '',
'psi_cum_inf_n': 'negative_tests',
# 'psi_quo_pos_n': '',
'psi_quo_inf_n': 'new_negative_tests',
# 'psi_quo_tes_n': '',
# 'psi_quo_pos_t': '',
}
# rename columns
for (old, new) in column_mappings.items():
qc_df.columns = qc_df.columns.str.replace(old, new)
# convert columns to int
if new != 'date':
qc_df[new] = qc_df[new].astype(int)
return qc_df
def update_data_qc_csv(sources_dir, processed_dir):
"""Replace old copy of data_qc.csv in processed_dir with latest version.
data_qc.csv file will be overwritten with the new updated file.
Parameters
----------
sources_dir : str
Absolute path of sources dir.
processed_dir : str
Absolute path of processed dir.
"""
# read latest data/sources/*/data_qc.csv
lastest_source_file = os.path.join(sources_dir, get_latest_source_dir(sources_dir), 'data_qc.csv')
qc_df = load_data_qc_csv(lastest_source_file)
# filter out all rows except Régions & RS99 (Ensemble du Québec) which contains total numbers for QC
qc_df = qc_df[(qc_df['Regroupement'] == 'Région') & (qc_df['Croisement'] == 'RSS99')]
# overwrite previous data/processed/data_qc.csv
qc_df.to_csv(os.path.join(processed_dir, 'data_qc.csv'), encoding='utf-8', index=False, na_rep='na')
def update_hospitalisations_qc_csv(sources_dir, processed_dir):
"""Takes data_qc_manual_date.csv and extracts historic hospitalisation data.
data_qc_hospitalisations.csv file will be overwritten with the new updated file.
Parameters
----------
sources_dir : str
Absolute path of sources dir.
processed_dir : str
Absolute path of processed dir.
"""
lastest_source_file = os.path.join(sources_dir, get_latest_source_dir(sources_dir), 'data_qc_manual_data.csv')
# read latest data/sources/*/data_qc_manual_data.csv
manual_df = pd.read_csv(lastest_source_file, encoding='utf-8')
# get data after "Hospits et volumétrie"
# see: https://stackoverflow.com/a/38707263
index = (manual_df["Tuiles de l'accueil"] == 'Hospits et volumétrie').idxmax()
filtered_df = manual_df[index + 1:]
# drop columns with all NaN
filtered_df.dropna(axis=1, how='all', inplace=True)
# create new dataframe with proper header
# to use existing columns: filtered_df.iloc[0]
columns = ['date', 'hospitalisations', 'icu', 'hospitalisations (old)', 'tests']
hosp_df = pd.DataFrame(filtered_df.values[1:], columns=columns)
# convert date to ISO-8601
hosp_df['date'] = pd.to_datetime(hosp_df['date'], dayfirst=True, format='%d/%m/%Y')
# add column with all hospitalisation counts (old and new methods)
hosp_df['hospitalisations_all'] = hosp_df['hospitalisations'].combine_first(hosp_df['hospitalisations (old)'])
# overwrite previous data/processed/data_qc_hospitalisations.csv
hosp_df.to_csv(os.path.join(processed_dir, 'data_qc_hospitalisations.csv'), encoding='utf-8', index=False)
def update_vaccination_csv(sources_dir, processed_dir):
"""Replace old copies of vaccination data in processed_dir with latest version.
data_qc_vaccination.csv, data_mtl_vaccination.csv file will be overwritten with the new updated files.
Parameters
----------
sources_dir : str
Absolute path of sources dir.
processed_dir : str
Absolute path of processed dir.
"""
# read latest data/sources/*/data_qc_vaccination.csv
source_file = os.path.join(sources_dir, get_latest_source_dir(sources_dir), 'data_qc_vaccination.csv')
df = pd.read_csv(source_file, encoding='utf-8', index_col=0)
column_mappings = {
'Date': 'date',
'vac_quo_1_n': 'new_doses_1d',
'vac_quo_2_n': 'new_doses_2d',
'vac_cum_1_n': 'total_doses_1d',
'vac_cum_2_n': 'total_doses_2d',
'vac_quo_tot_n': 'new_doses',
'vac_cum_tot_n': 'total_doses',
'cvac_cum_tot_1_p': 'perc_1d',
'cvac_cum_tot_2_p': 'perc_2d',
}
# rename columns
for (old, new) in column_mappings.items():
df.columns = df.columns.str.replace(old, new)
# rename index
df.index.name = 'date'
# filter out rows
qc_df = df[(df['Regroupement'] == 'Région') & (df['Croisement'] == 'RSS99')]
mtl_df = df[(df['Regroupement'] == 'Région') & (df['Croisement'] == 'RSS06')]
# age_df = df[(df['Regroupement'] == "Groupe d'âge 1")]
# just keep the last date
# age_df = age_df.loc[age_df.index[-1]]
# overwrite previous files
qc_df.to_csv(os.path.join(processed_dir, 'data_qc_vaccination.csv'))
mtl_df.to_csv(os.path.join(processed_dir, 'data_mtl_vaccination.csv'))
# age_df.to_csv(os.path.join(processed_dir, 'data_qc_vaccination_by_age.csv'))
def append_mtl_cases_csv(sources_dir, processed_dir, date):
"""Append daily MTL borough data to cases.csv file.
cases.csv file will be overwritten with the new updated file.
Parameters
----------
sources_dir : str
Absolute path of sources dir.
processed_dir : str
Absolute path of processed dir.
date : str
ISO-8601 formatted date string to use as column name in cases_csv
"""
# Load csv files
day_csv = os.path.join(sources_dir, get_source_dir_for_date(sources_dir, date), 'data_mtl_municipal.csv')
cases_csv = os.path.join(processed_dir, 'cases.csv')
day_df = pd.read_csv(day_csv, sep=';', index_col=0, encoding='utf-8')
cases_df = pd.read_csv(cases_csv, index_col=0, encoding='utf-8', na_values='na')
# Select column to append. Select by expected name of column to ensure the column is there.
new_data_col = day_df.loc[:, 'Nombre de cas cumulatif, depuis le début de la pandémie']
# convert string to int if present
if not pd.api.types.is_numeric_dtype(new_data_col.dtype):
# Cleanup new_data_col
# Remove '.' thousands separator and any space
new_data_col = new_data_col.str.replace('.', '').str.replace(' ', '')
# Remove '<' char. Note: this is somewhat inaccurate, as <5 counts
# will be reported as 5, but we cannot have any strings in the data.
new_data_col = new_data_col.str.replace('<', '')
# Enforce int type (will raise ValueError if any unexpected chars remain)
new_data_col = new_data_col.astype(int)
# Remove last row (total count)
new_data_col = new_data_col[:-1]
# check if date already exists in cases_csv, append if it doesn't
if date in cases_df.index:
print(f'MTL cases: {date} has already been appended to {cases_csv}')
else:
# check whether this is actually new data and not data from a previous day (see issue #34)
# data is not updated on Sat (for Fri) and Sun (for Sat) and still shows previous days data
# replace it with 'na' if it is the same
already_exists = False
# check with last row that has actual values
if pd.Series.all(cases_df.dropna().iloc[-1] == list(new_data_col)):
already_exists = True
# Append new row of data
cases_df.loc[date] = list(new_data_col)
if already_exists:
print(f'MTL cases: the data shown on {date} already exists for a previous day, replacing with "na"')
cases_df.iloc[-1] = 'na'
# Overwrite cases.csv
cases_df.to_csv(cases_csv, encoding='utf-8', na_rep='na')
def update_mtl_boroughs_csv(processed_dir):
"""Build MTL borough data based on cases.csv.
Parameters
----------
processed_dir : str
Absolute path to processed data dir.
"""
cases_csv = os.path.join(processed_dir, 'cases.csv')
cases_per100k_csv = os.path.join(processed_dir, 'data_mtl_boroughs.csv')
cases_df = pd.read_csv(cases_csv, index_col=0, encoding='utf-8', na_values='na', parse_dates=True)
# drop to be confirmed (TBC) cases
cases_df = cases_df.iloc[:, :-1]
# and drop rows with only NA
cases_df = cases_df.dropna(how='all').astype(int)
# convert index to DateTimeIndex
cases_df.index = | pd.to_datetime(cases_df.index) | pandas.to_datetime |
"""This module contains the primary benchmarking class in MaterialsCoord."""
import re
import warnings
from collections import Counter, defaultdict
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from pkg_resources import resource_filename
from pymatgen.analysis.local_env import NearNeighbors
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from materialscoord.einstein_crystal_perturbation import perturb_einstein_crystal
_resource_dir = resource_filename("materialscoord", "structures")
_vire_re = re.compile("[^a-zA-Z]+")
_el_re = re.compile(r"[\d+-.]*")
CN_dict = Dict[str, float] # define coordination dictionary type
class Benchmark:
"""
Class for performing coordination number benchmarks on a set of structures
using different nearest neighbor methods.
Args:
structures: A set of structures. Should be given as a dictionary of
``{"name": Structure}``. The structures should be decorated
with a "coordination" property on each site specifying the correct
coordination environment. For example, if a site is bonded to
two oxygen and three chlorine atoms, the coordination property
should be a dictionary of ``{"O": 2, "Cl": 3}``. If multiple
coordination numbers are deemed correct these should be provided
as a list. For example, if the coordination to gallium could be
4 or 12 coordinate, the coordination property should a dictionary
of ``{"Ga": [4, 12]}``.
symprec: If not None, the benchmark will use symmetry to reduce
the number of sites for which the coordinate number is calculated.
symprec is the symmetry precision in Angstrom.
perturb_sigma: If not None, this will enable the
Einstein crystal test rig mode. Each site will be displaced
according a normal distribution with the width equal to
perturb_sigma. The perturbation complies thus with the expectation
for an Einstein crystal, in which the potential is given by
V(dr) = 1/2 * kspring * (dr)^2. kspring denotes the spring constant
with which the sites are tethered to their equilibrium position, and
dr is the distance of the site under consideration from its
equilibrium position.
remove_oxidation_states: Remove oxidation states from structures before
performing the benchmark. As oxidation states will not always be known,
any near neighbor methods that require oxidation states (i.e.,
MinimumVIRENN) should include a method to assign oxidation states
automatically. Setting this option to False allows testing whether the
automatic assignment of oxidation states will itself impact performance.
Furthermore, some methods, such as CrystalNN, have slightly different
behaviour if oxidation states are present. Again this option enables
testing this behaviour.
reciprocal_coordination: Several near neighbor methods are not reciprocal. I.e.,
if site A is bonded to site B, it is not guaranteed that site B is bonded
to site A. Enabling this option ensures that coordination is reciprocal by
evaluating the coordination of all sites and including all bonds. This
behaviour is the same as that provided by NearNeighbor.get_bonded_structure.
"""
all_structure_groups: List[str] = [
filename.stem for filename in Path(_resource_dir).iterdir() if filename.is_dir()
]
def __init__(
self,
structures: Dict[str, Structure],
symprec: Optional[float] = 0.01,
perturb_sigma: Optional[float] = None,
remove_oxidation_states: bool = True,
reciprocal_coordination: bool = True,
):
# make a deep copy to avoid modifying structures in place
self.structures = deepcopy(structures)
self.symprec = symprec
self.reciprocal_coordination = reciprocal_coordination
# use this to cache benchmark results
self._benchmark: Dict[NearNeighbors, Dict[str, List]] = defaultdict(dict)
for name, structure in structures.items():
if "coordination" not in structure.site_properties:
raise AttributeError(
"{} structure does not have the 'coordination' site "
"property".format(name)
)
if perturb_sigma:
for name, structure in self.structures.items():
self.structures[name] = perturb_einstein_crystal(
structure, perturb_sigma
)
# precompute the symmetrized structures to save time during the benchmark. Also,
# determine the total number of unique cations/anions each structure.
self.site_information: Dict[str, Dict[str, Any]] = {}
self.max_nsites = 0
n_structures_with_oxi = 0
for name, structure in self.structures.items():
if self.symprec:
sga = SpacegroupAnalyzer(structure, symprec=self.symprec)
equiv_sites = sga.get_symmetrized_structure().equivalent_sites
unique_site_idxs = np.unique(
sga.get_symmetry_dataset()["equivalent_atoms"]
)
else:
equiv_sites = [[s] for s in structure]
unique_site_idxs = list(range(len(structure)))
# cation_idxs and anion_idxs are the indexes of the cations/anions
# in the list of unique sites (not the list of ALL sites).
cation_degens = []
cation_idxs = []
anion_degens = []
anion_idxs = []
cations = set()
anions = set()
for i, sites in enumerate(equiv_sites):
# Specie check needed to see if site has an oxidation state at all.
if (
hasattr(sites[0].specie, "oxi_state")
and sites[0].specie.oxi_state >= 0
):
# based on previous implementation, neutral ions will be scored as
# cations, however, neutral to neutral bonding is allowed, hence
# don't add the element to the cations set as this will be used to
# prevent cation-cation bonding later on.
cation_degens.append(len(sites))
cation_idxs.append(i)
if sites[0].specie.oxi_state > 0:
cations.add(sites[0].specie.name)
elif (
hasattr(sites[0].specie, "oxi_state")
and sites[0].specie.oxi_state < 0
):
anion_degens.append(len(sites))
anion_idxs.append(i)
anions.add(sites[0].specie.name)
all_degens = [len(x) for x in equiv_sites]
total_all = sum(all_degens)
total_cations = sum(cation_degens)
total_anions = sum(anion_degens)
self.site_information[name] = {
"unique_idxs": unique_site_idxs,
"all_idxs": list(range(len(unique_site_idxs))),
"cation_idxs": cation_idxs,
"anion_idxs": anion_idxs,
"all_degens": all_degens,
"cation_degens": cation_degens,
"anion_degens": anion_degens,
"all_total": total_all,
"cation_total": total_cations,
"anion_total": total_anions,
"cations": cations,
"anions": anions,
}
self.max_nsites = max(self.max_nsites, len(unique_site_idxs))
if cation_idxs or anion_idxs:
n_structures_with_oxi += 1
# useful to know if all structures have oxidation states when calculating scores
self.all_structures_have_oxi = n_structures_with_oxi == len(structures)
if remove_oxidation_states:
for structure in self.structures.values():
structure.remove_oxidation_states()
@classmethod
def from_structure_group(
cls, structure_groups: Union[str, List[str]], **kwargs
) -> "Benchmark":
"""
Initialises the benchmark from a list of test structure classes.
Args:
structure_groups: One or more test structure groups. Options
include: "elemental", "common_binaries", "ABX3", "ABX4",
"A2BX4", "laves". See the "test_structures" folder for
the full list of options. Defaults to "elemental".
**kwargs: Additional keyword arguments that will be passed
to the Benchmark constructor.
"""
if isinstance(structure_groups, str):
structure_groups = [structure_groups]
filenames: List[Path] = []
for structure_group in structure_groups:
if structure_group not in Benchmark.all_structure_groups:
raise ValueError(f'"{structure_group}" is not a valid structure group')
filenames.extend(Path(_resource_dir, structure_group).glob("*.json"))
structures = {}
for filename in filenames:
name = Path(filename).stem
structures[name] = Structure.from_file(str(filename))
return cls(structures, **kwargs)
def benchmark(
self, methods: List[NearNeighbors], return_dataframe: bool = True
) -> Union[pd.DataFrame, Dict[NearNeighbors, Dict[str, List[CN_dict]]]]:
"""
Calculates the coordination numbers for all sites in all structures
using each nearest neighbor method.
Args:
methods: A list of NearNeighbors methods. E.g., from
``pymatgen.analysis.local_env``.
return_dataframe: Whether to return the results as a pandas
dataframe.
Returns:
If ``return_dataframe``. The benchmark results as a pandas
DataFrame, else a dictionary formatted as::
{method: {structure_name: List[cn_dicts]}}
Where the cn_dicts are given for each unique site in the structure.
See the docstring for `NearNeighbors.get_cn_dict` for the format of
cn_dict.
"""
for method in methods:
for name in self.structures:
if method not in self._benchmark or name not in self._benchmark[method]:
self._benchmark[method][name] = self._benchmark_structure(
name, method
)
if not return_dataframe:
return self._benchmark
method_names = _get_method_names(methods)
df_data: Dict[str, Dict[str, float]] = defaultdict(dict)
for method_name, method in zip(method_names, methods):
for name in self.structures:
for site_idx in range(self.max_nsites):
column = method_name + str(site_idx)
if site_idx < len(self.site_information[name]["unique_idxs"]):
val = self._benchmark[method][name][site_idx]
else:
val = None
df_data[column][name] = val
return | pd.DataFrame(data=df_data) | pandas.DataFrame |
""" Parsing source data into simple tsv datasets.
To parse Bgl3 and GB1, ENRICH2 MUST BE INSTALLED IN A SEPARATE CONDA ENVIRONEMNT NAMED 'enrich2' """
from os.path import isfile, join
import collections
import numpy as np
import pandas as pd
import enrich2
import utils
def parse_avgfp():
""" create the gfp dataset from raw source data """
source_fn = "source_data/avgfp/amino_acid_genotypes_to_brightness.tsv"
out_fn = "data/avgfp/avgfp.tsv"
if isfile(out_fn):
print("err: parsed avgfp dataset already exists: {}".format(out_fn))
return
# load the source data
data = pd.read_csv(source_fn, sep="\t")
# remove the wild-type entry
data = data.loc[1:]
# create columns for variants, number of mutations, and score
variants = data["aaMutations"].apply(lambda x: ",".join([x[1:] for x in x.split(":")]))
num_mutations = variants.apply(lambda x: len(x.split(",")))
score = data["medianBrightness"]
# create the dataframe
cols = ["variant", "num_mutations", "score"]
data_dict = {"variant": variants.values, "num_mutations": num_mutations.values, "score": score.values}
df = | pd.DataFrame(data_dict, columns=cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Utils
==========================
Utility functions for the Distance Closure package
"""
# Copyright (C) 2015 by
# <NAME> <<EMAIL>>
# <NAME> <@.>
# <NAME> <<EMAIL>>
# All rights reserved.
# MIT license.
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
__author__ = """\n""".join([
'<NAME> <<EMAIL>>',
'<NAME> <@.>',
'<NAME> <<EMAIL>>',
])
__all__ = [
'dist2prox',
'prox2dist',
'dict2matrix',
'matrix2dict',
'dict2sparse'
]
#
# Proximity and Distance Conversions
#
def prox2dist(P):
"""
Transforms a matrix of non-negative ``[0,1]`` proximities P to distance weights in the ``[0,inf]`` interval:
.. math::
d = \\frac{1}{p} - 1
Args:
P (matrix): Proximity matrix
Returns:
D (matrix): Distance matrix
See Also:
:attr:`dist2prox`
"""
if (type(P).__module__.split('.')[0] == 'numpy'):
return _prox2dist_numpy(P)
elif (type(P).__module__.split('.')[1] == 'sparse'):
return _prox2dist_sparse(P)
else:
raise ("Format not accepted: try numpy or scipy.sparse formats")
def _prox2dist_sparse(A):
A.data = prox2dist_numpy(A.data)
return A
def _prox2dist_numpy(A):
f = np.vectorize(_prox2dist)
return f(A)
def _prox2dist(x):
if x == 0:
return np.inf
else:
return (1/float(x)) - 1
def dist2prox(D):
"""
Transforms a matrix of non-negative integer distances ``D`` to proximity/similarity weights in the ``[0,1]`` interval:
.. math::
p = \\frac{1}{(d+1)}
It accepts both dense and sparse matrices.
Args:
D (matrix): Distance matrix
Returns:
P (matrix): Proximity matrix
See Also:
:attr:`prox2dist`
"""
if (type(D).__module__.split('.')[0] == 'numpy'):
return _dist2prox_numpy(D)
elif (type(D).__module__.split('.')[1] == 'sparse'):
return _dist2prox_numpy(D)
else:
raise ValueError("Format not accepted: try numpy or scipy.sparse formats")
def _dist2prox_sparse(A):
A.data = _dist2prox_numpy(A.data)
return A
def _dist2prox_numpy(A):
f = np.vectorize(_dist2prox)
return f(A)
def _dist2prox(x):
if x == np.inf:
return 0
else:
return (x + 1) ** -1
#
# Data format Conversiosn
#
def dict2matrix(d):
"""
Tranforms a 2D dictionary into a numpy. Usefull when converting Dijkstra results.
Args:
d (dict): 2D dictionary
Returns:
m (matrix): numpy matrix
Warning:
If your nodes have names instead of number assigned to them, make sure to keep a mapping.
Usage:
>>> d = {0: {0: 0, 1: 1, 2:3}, 1: {0: 1, 1: 0, 2:2}, 2: {0: 3, 1:2, 2:0}}
>>> dict2matrix(d)
[[ 0 1 3]
[ 1 0 2]
[ 3 2 0]]
See Also:
:attr:`matrix2dict`
Note:
Uses pandas to accomplish this in a one liner.
"""
return pd.DataFrame.from_dict(d).values
def matrix2dict(m):
"""
Tranforms a Numpy matrix into a 2D dictionary. Usefull when comparing dense metric and Dijkstra results.
Args:
m (matrix): numpy matrix
Returns:
d (dict): 2D dictionary
Usage:
>>> m = [[0, 1, 3], [1, 0, 2], [3, 2, 0]]
>>> matrix2dict(m)
{0: {0: 0, 1: 1, 2:3}, 1: {0: 1, 1: 0, 2:2}, 2: {0: 3, 1:2, 2:0}}
See Also:
:attr:`dict2matrix`
Note:
Uses pandas to accomplish this in a one liner.
"""
df = | pd.DataFrame(m) | pandas.DataFrame |
import tensorflow as tf
import numpy as np
import pandas as pd
from collections import defaultdict
from datetime import datetime
from pathlib import Path
import tophat.callbacks as cbks
from tophat.data import FeatureSource, InteractionsSource
from tophat.constants import FType, FGroup
from tophat.tasks.wrapper import FactorizationTaskWrapper
from tophat.core import TophatModel
from tophat.evaluation import Validator
from tophat.utils.io import load_vocab
from tophat.datasets.movielens import fetch_movielens # ref: lightfm
SEED = 322
EMB_DIM = 30
# Get movielens data via lightfm
data = fetch_movielens(
indicator_features=False,
genre_features=True,
min_rating=4.0, # Pretend 5-star is an implicit 'like'
download_if_missing=True,
)
# Labels for tensorboard projector
item_lbls_df = | pd.DataFrame(data['item_labels']) | pandas.DataFrame |
"""
List of relations
Schema | Name | Type | Owner
--------+-----------+-------+----------
public | friday | table | postgres
public | monday | table | postgres
public | thursday | table | postgres
public | tuesday | table | postgres
public | wednesday | table | postgres
public | week | table | postgres
<day> Tables Columns
index | timestamp | customer_no | location | duration | no_locations | cust_path
-------+---------------------+-------------+----------+---------------------+--------------+------------------------------------
0 | 2019-09-06 07:00:00 | 1 | da | 1970-01-01 00:04:00 | 3 | daspch
1 | 2019-09-06 07:00:00 | 2 | dr | 1970-01-01 00:01:00 | 2 | drch
2 | 2019-09-06 07:00:00 | 3 | fr | 1970-01-01 00:03:00 | 4 | frspdach
'week' Table Columns
index | timestamp | customer_no | location | duration | no_locations | cust_path | day
-------+---------------------+-------------+----------+---------------------+--------------+--------------------------------------+-----------
0 | 2019-09-02 07:03:00 | 1 | da | 1970-01-01 00:02:00 | 2 | dach | monday
1 | 2019-09-02 07:03:00 | 2 | da | 1970-01-01 00:03:00 | 2 | dach | monday
2 | 2019-09-02 07:04:00 | 3 | da | 1970-01-01 00:02:00 | 2 | dach | monday
"""
from sqlalchemy import create_engine
import psycopg2
import pandas as pd
engine = create_engine(...)
week = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday']
def replace_location_names(location):
return location[0:2]
def make_neg_val_null(timestamp):
if timestamp.value < 0:
return ( | pd.Timedelta(0) | pandas.Timedelta |
"""Postprocesses data across dates and simulation runs before aggregating at geographic levels (ADM0, ADM1, or ADM2)."""
import concurrent.futures
import gc
import queue
import shutil
import threading
import numpy as np
import pandas as pd
import tqdm
from fastparquet import ParquetFile
from loguru import logger
from .numerical_libs import enable_cupy, reimport_numerical_libs, xp
from .util.util import _banner
# TODO switch to cupy.quantile instead of percentile (they didn't have that when we first wrote this)
# also double check but the api might be consistant by now so we dont have to handle numpy/cupy differently
def main(cfg):
"""Main method for postprocessing the raw outputs from an MC run."""
_banner("Postprocessing Quantiles")
# verbose = cfg["runtime.verbose"]
# use_gpu = cfg["runtime.use_cupy"]
run_dir = cfg["postprocessing.run_dir"]
data_dir = run_dir / "data"
metadata_dir = run_dir / "metadata"
# if verbose:
# logger.info(cfg)
output_dir = cfg["postprocessing.output_dir"]
if not output_dir.exists():
output_dir.mkdir(parents=True)
# Copy metadata
output_metadata_dir = output_dir / "metadata"
output_metadata_dir.mkdir(exist_ok=True)
# TODO this should probably recurse directories too...
for md_file in metadata_dir.iterdir():
shutil.copy2(md_file, output_metadata_dir / md_file.name)
adm_mapping = | pd.read_csv(metadata_dir / "adm_mapping.csv") | pandas.read_csv |
from scipy import stats
import numpy as np
import pandas as pd
import re
class PWR(object):
def __init__(self, weight=1, regress_to=None, values=None):
self.weight = weight
self.regress_to = regress_to
if values is None:
self.values = None
else:
self.values = values.copy()
def calculate(self, **kwargs):
self.pwrcol = [x for x in list(self.values) if x not in ['Player']][0]
return self
def regress(self, df):
self.values[self.pwrcol] = self.regress_to.regress(df, self.pwrcol)
class SRS(PWR):
def __init__(self, weight=1, regress_to=None):
PWR.__init__(self, weight, regress_to)
def calculate(self, **kwargs):
self.pwrcol = 'SRS'
if kwargs['season'] == 1:
df = kwargs['gamelog'].groupby('Player').agg({'Pts':'mean'})
df = df.rename(columns={'Pts':'SRS'}).reset_index()
self.values = df[['Player','SRS']]
else:
grouped = kwargs['gamelog'].groupby('Player').agg({'Difference':'sum','Opponent':lambda x: list(x)})
grouped['Games Played'] = grouped['Opponent'].str.len()
grouped['Margin'] = grouped['Difference'].values / grouped['Games Played'].values
grouped['SRS'] = grouped['Margin']
grouped['OldSRS'] = grouped['Margin']
players = grouped.to_dict('index')
for i in range(10000):
delta = 0.0
for name, player in players.items():
sos = 0.0
for opponent in player['Opponent']:
sos += players[opponent]['SRS']
players[name]['OldSRS'] = player['SRS']
players[name]['SRS'] = player['Margin'] + (sos / player['Games Played'])
delta = max(delta, abs(players[name]['SRS'] - players[name]['OldSRS']))
if delta < 0.001:
break
srs_sum = 0.0
for name, player in players.items():
srs_sum += players[name]['SRS']
srs_avg = srs_sum / len(players)
for name, player in players.items():
players[name]['SRS'] = player['SRS'] - srs_avg
df = | pd.DataFrame.from_dict(players, orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 08:32:32 2021
@author: Ramakrishnamekala
"""
import pandas as pd
import numpy as np
import ccxt
import talib.abstract as ta
from database import db
from technicalta import *
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn import svm
import joblib
def texterconversion(text):
tex=text.replace('/','').replace('-','_').replace(' ','_').replace('(','').replace(')','')
return tex
df=list(db['BNBUSDT_1h'].find())
def Dataset(df):
global listof1
df= | pd.DataFrame(df) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
import pandas_datareader as pdr
import quandl
import csv
import yfinance as yf
import investpy
con_df = investpy.get_etf_historical_data(
etf='Consumer Discretionary Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
con_df = con_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(con_df.tail())
fin_df = investpy.get_etf_historical_data(
etf='Financial Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
fin_df = fin_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(fin_df.tail())
health_df = investpy.get_etf_historical_data(
etf='Health Care Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
health_df = health_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(health_df.tail())
tech_df = investpy.get_etf_historical_data(
etf='Technology Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
tech_df = tech_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(tech_df.tail())
consumerstaples_df = investpy.get_etf_historical_data(
etf='Consumer Staples Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
consumerstaples_df = consumerstaples_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(consumerstaples_df.tail())
industrial_df = investpy.get_etf_historical_data(
etf='Industrial Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
industrial_df = industrial_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(industrial_df.tail())
material_df = investpy.get_etf_historical_data(
etf='Materials Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
material_df = material_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(material_df.tail())
energy_df = investpy.get_etf_historical_data(
etf='Energy Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
energy_df = energy_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(energy_df.tail())
utilities_df = investpy.get_etf_historical_data(
etf='Utilities Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
utilities_df = utilities_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(utilities_df.tail())
realestate_df = investpy.get_etf_historical_data(
etf='Real Estate Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
realestate_df = realestate_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(realestate_df.tail())
commun_df = investpy.get_etf_historical_data(
etf='Communication Services Select Sector SPDR',
country='United States',
from_date='01/01/2012',
to_date='12/01/2030')
commun_df = commun_df.drop(['Open', 'High', 'Low', 'Volume', 'Currency', 'Exchange'], axis=1)
# print(commun_df.tail())
sp_df = pdr.get_data_yahoo(
"SPY",
start='2012-01-01',
end='2030-12-01')
sp_df = sp_df.drop(['Open', 'High', 'Low', 'Volume', 'Adj Close'], axis=1)
# Merging all the dataframes
etf_df = pd.merge(con_df, fin_df, left_index=True, right_index=True)
etf_df = pd.merge(etf_df, health_df, left_index=True, right_index=True)
etf_df = pd.merge(etf_df, tech_df, left_index=True, right_index=True)
etf_df = pd.merge(etf_df, consumerstaples_df, left_index=True, right_index=True)
etf_df = | pd.merge(etf_df, industrial_df, left_index=True, right_index=True) | pandas.merge |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import os.path
import sys
import importlib
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
import py_eegepe
import numpy as np
importlib.reload(py_eegepe)
import py_eegepe.paradigm as paradigm
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
"""wr: within run!"""
script_type = 'wr'
#%%
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 1000) | pandas.set_option |
from typing import Union
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm_notebook as tqdm
class WRMSSEEvaluator(object):
def __init__(self, train_df: pd.DataFrame, valid_df: pd.DataFrame, calendar: pd.DataFrame, prices: pd.DataFrame):
train_y = train_df.loc[:, train_df.columns.str.startswith('d_')]
train_target_columns = train_y.columns.tolist()
weight_columns = train_y.iloc[:, -28:].columns.tolist()
train_df['all_id'] = 0 # for lv1 aggregation
id_columns = train_df.loc[:, ~train_df.columns.str.startswith('d_')].columns.tolist()
valid_target_columns = valid_df.loc[:, valid_df.columns.str.startswith('d_')].columns.tolist()
if not all([c in valid_df.columns for c in id_columns]):
valid_df = pd.concat([train_df[id_columns], valid_df], axis=1, sort=False)
self.train_df = train_df
self.valid_df = valid_df
self.calendar = calendar
self.prices = prices
self.weight_columns = weight_columns
self.id_columns = id_columns
self.valid_target_columns = valid_target_columns
weight_df = self.get_weight_df()
self.group_ids = (
'all_id',
'state_id',
'store_id',
'cat_id',
'dept_id',
['state_id', 'cat_id'],
['state_id', 'dept_id'],
['store_id', 'cat_id'],
['store_id', 'dept_id'],
'item_id',
['item_id', 'state_id'],
['item_id', 'store_id']
)
for i, group_id in enumerate(tqdm(self.group_ids)):
train_y = train_df.groupby(group_id)[train_target_columns].sum()
scale = []
for _, row in train_y.iterrows():
series = row.values[np.argmax(row.values != 0):]
scale.append(((series[1:] - series[:-1]) ** 2).mean())
setattr(self, f'lv{i + 1}_scale', np.array(scale))
setattr(self, f'lv{i + 1}_train_df', train_y)
setattr(self, f'lv{i + 1}_valid_df', valid_df.groupby(group_id)[valid_target_columns].sum())
lv_weight = weight_df.groupby(group_id)[weight_columns].sum().sum(axis=1)
setattr(self, f'lv{i + 1}_weight', lv_weight / lv_weight.sum())
def get_weight_df(self) -> pd.DataFrame:
day_to_week = self.calendar.set_index('d')['wm_yr_wk'].to_dict()
weight_df = self.train_df[['item_id', 'store_id'] + self.weight_columns].set_index(['item_id', 'store_id'])
weight_df = weight_df.stack().reset_index().rename(columns={'level_2': 'd', 0: 'value'})
weight_df['wm_yr_wk'] = weight_df['d'].map(day_to_week)
weight_df = weight_df.merge(self.prices, how='left', on=['item_id', 'store_id', 'wm_yr_wk'])
weight_df['value'] = weight_df['value'] * weight_df['sell_price']
weight_df = weight_df.set_index(['item_id', 'store_id', 'd']).unstack(level=2)['value']
weight_df = weight_df.loc[zip(self.train_df.item_id, self.train_df.store_id), :].reset_index(drop=True)
weight_df = pd.concat([self.train_df[self.id_columns], weight_df], axis=1, sort=False)
return weight_df
def rmsse(self, valid_preds: pd.DataFrame, lv: int) -> pd.Series:
valid_y = getattr(self, f'lv{lv}_valid_df')
score = ((valid_y - valid_preds) ** 2).mean(axis=1)
scale = getattr(self, f'lv{lv}_scale')
return (score / scale).map(np.sqrt)
def score(self, valid_preds: Union[pd.DataFrame, np.ndarray]) -> float:
assert self.valid_df[self.valid_target_columns].shape == valid_preds.shape
if isinstance(valid_preds, np.ndarray):
valid_preds = pd.DataFrame(valid_preds, columns=self.valid_target_columns)
valid_preds = | pd.concat([self.valid_df[self.id_columns], valid_preds], axis=1, sort=False) | pandas.concat |
import pandas as pd
import numpy as np
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import uic
import glob, sys, datetime, os, openpyxl, threading, timeit, time, shutil
from pyqtspinner.spinner import WaitingSpinner
from utils import *
from msnparse import *
from LatLon23 import string2latlon
from pyproj import _datadir, datadir
import config
import json
import csv
import resource
from pathlib import Path
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True) #enable highdpi scaling
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True) #use highdpi icons
class External(QThread):
countChanged = pyqtSignal(int)
def run(self):
config.ProgressMsnEvent = 0
while config.ProgressMsnEvent < 100:
time.sleep(1)
self.countChanged.emit(config.ProgressMsnEvent)
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent) # Call the inherited classes __init__ method
uic.loadUi('maingui.ui', self) # Load the .ui file
self.TXTload = self.findChild(QPushButton, 'pushButton_TXTLoad') # Find the button
self.TXTload.clicked.connect(self.TXTLoadPressed) # Remember to pass the definition/method, not the return value!
self.progressbar = self.findChild(QProgressBar, 'progressBar')
self.progressbar.setValue(0)
self.benametext = self.findChild(QLineEdit, 'line_BEName')
self.belattext = self.findChild(QLineEdit, 'line_BELat')
self.belongtext = self.findChild(QLineEdit, 'line_BELong')
self.gpsfile = self.findChild(QCheckBox,'cbGPS')
self.turbo = self.findChild(QCheckBox, 'cbTurbo')
self.csname = self.findChild(QLineEdit, 'line_CS')
self.manualjassmmatch = self.findChild(QAction,'actionJASSM_Report_Match')
self.manualjassmmatch.triggered.connect(self.jassmmatch)
self.bullupdatebutton = self.findChild(QAction,'actionUpdate_Bullseyes')
self.bullupdatebutton.triggered.connect(self.bullupdate)
self.howtoguide = self.findChild(QAction,'actionHow_To_Guide')
self.howtoguide.triggered.connect(self.guideopen)
self.changelog = self.findChild(QAction,'actionChangelog')
self.changelog.triggered.connect(self.changelogopen)
self.BIDDS = self.findChild(QPushButton, 'pushButton_BIDDS')
self.BIDDS.clicked.connect(self.BIDDSopen)
self.Folder = self.findChild(QPushButton, 'pushButton_Folder')
self.Folder.clicked.connect(self.FolderCreate)
self.BIDDSDRD = self.findChild(QAction,'actionCopy_DRD_to_BIDDS')
self.BIDDSDRD.triggered.connect(self.BIDDSfolderopen)
try:
with open('defaults.json') as f:
config.defaults = json.load(f)
self.benametext.setText(config.defaults['BEname'])
self.belattext.setText(config.defaults['BElat'])
self.belongtext.setText(config.defaults['BElon'])
self.csname.setText(config.defaults['CS'])
if os.path.isdir(config.defaults['defaultpath']):
config.outputpath = config.defaults['defaultpath']
else:
config.outputpath = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
except:
pass
self.show() # Show the GUI
def guideopen(self):
prefixed = [filename for filename in os.listdir('.') if filename.startswith("BIDDS Debrief Card Guide") and filename.endswith("pdf")]
try:
os.startfile(prefixed[-1])
except:
QMessageBox.question(self, 'Error',
"Guide not found.",
QMessageBox.Ok)
return
def changelogopen(self):
prefixed = [filename for filename in os.listdir('.') if filename.startswith("Changelog") and filename.endswith("pdf")]
try:
os.startfile(prefixed[-1])
except:
QMessageBox.question(self, 'Error',
"Changelog not found.",
QMessageBox.Ok)
return
def jassmmatch(self):
config.bename = self.benametext.text()
config.belat = self.belattext.text()
config.belong = self.belongtext.text()
config.csname = '' #Doesnt overwrite callsign
if len(config.bename)>0 or len(config.belat)>0 or len(config.belong)>0:
try:
config.becoord = string2latlon(config.belat, config.belong, 'H% %d% %M')
except:
config.becoord = QMessageBox.question(self, 'Bullseye Error',
"Coordinate format not recognized.\n N/S DD MM.MMMM, E/W DDD MM.MMMM",
QMessageBox.Ok)
print('Bullseye Lat/Long Error: Input DD MM.MMMM format.')
return
else:
config.becoord = None
self.matchpick = UiJassmMatch(self)
self.matchpick.show()
def bullupdate(self):
config.bename = self.benametext.text()
config.belat = self.belattext.text()
config.belong = self.belongtext.text()
if len(config.bename)>0 or len(config.belat)>0 or len(config.belong)>0:
try:
config.becoord = string2latlon(config.belat, config.belong, 'H% %d% %M')
except:
config.becoord = QMessageBox.question(self, 'Bullseye Error',
"Coordinate format not recognized.\n N/S DD MM.MMMM, E/W DDD MM.MMMM",
QMessageBox.Ok)
print('Bullseye Lat/Long Error: Input DD MM.MMMM format.')
return
else:
config.becoord = None
selectedfile = QFileDialog.getOpenFileName(self, 'Select Debrief Card Excel File',
config.outputpath,
"Excel File (*.xlsx)")
if len(selectedfile[0])> 0:
debriefcard = pd.ExcelFile(selectedfile[0])
for sheet in debriefcard.sheet_names:
if 'Combined' in sheet:
wpns = pd.read_excel(debriefcard, sheet_name='Combined', index_col=None, na_filter=False)
wpns.astype({'TGT LAT': str, 'TGT LONG': str, 'TGT ELEV': str, 'BULL': str,'Release LAT': str, 'Release LONG': str,'Release Bull': str}).dtypes
wpns['TGT LAT'] = wpns['TGT LAT'].astype(str)
wpns['TGT LONG'] = wpns['TGT LONG'].astype(str)
wpns['BULL'] = wpns['TGT LAT'].astype(str)
wpns['Release LAT'] = wpns['Release LAT'].astype(str)
wpns['Release LONG'] = wpns['Release LONG'].astype(str)
wpns['Release Bull'] = wpns['Release Bull'].astype(str)
for i, row in wpns.iterrows():
try:
wpns.at[i, 'BULL'] = bullcalculate(wpns.at[i, 'TGT LAT'], wpns.at[i, 'TGT LONG'])
except:
wpns.at[i, 'BULL'] = ''
#try:
wpns.at[i, 'Release Bull'] = bullcalculate(wpns.at[i, 'Release LAT'], wpns.at[i, 'Release LONG'])
#except:
# wpns.at[i, 'Release Bull'] = ''
wpns = wpns.fillna('')
wpns = wpns.replace('nan', '', regex=True)
append_df_to_excel(debriefcard, wpns, sheet_name="Combined", startrow=0,
index=False)
os.startfile(debriefcard)
def onCountChanged(self, value):
self.progressbar.setValue(value)
if value == 95:
config.ProgressMsnEvent = 96
config.jassmmatch = QMessageBox.question(self, 'JASSM Match',
"JASSM releases were found for your mission:\n\nWould you like to match these releases to a JMPS DTC JASSM TARGET SUMMARY for JDPI Data to show properly?\n\nYou can also complete this later by selecting Tools>JASSM Report Match.",
QMessageBox.Yes | QMessageBox.No)
if config.jassmmatch == QMessageBox.Yes:
selectedfile = QFileDialog.getOpenFileName(self, 'Open JMPS JASSM Report Excel File',
config.outputpath,
"Excel File (*.xlsm)")
if len(selectedfile[0]) > 0:
config.jassmreport_filename = selectedfile[0]
else:
config.jassmreport_filename = False
else:
config.jassmreport_filename = False
def TXTLoadPressed(self):
config.bename = self.benametext.text()
config.belat = self.belattext.text()
config.belong = self.belongtext.text()
config.csname = self.csname.text()
config.defaults['gpsfile'] = self.gpsfile.isChecked()
config.turbocharge = not self.turbo.isChecked()
if len(config.bename)>0 or len(config.belat)>0 or len(config.belong)>0:
try:
config.becoord = string2latlon(config.belat, config.belong, 'H% %d% %M')
except:
config.becoord = QMessageBox.question(self, 'Bullseye Error',
"Coordinate format not recognized.\n N/S DD MM.MMMM, E/W DDD MM.MMMM",
QMessageBox.Ok)
print('Bullseye Lat/Long Error: Input DD MM.MMMM format.')
return
else:
config.becoord = None
if len(config.csname)<1:
config.csname = QMessageBox.question(self, 'Callsign Error',
"Please enter a Callsign",
QMessageBox.Ok)
return
config.filename = QFileDialog.getOpenFileName(self, 'Open TXT file',
config.outputpath, "Text File (*.txt)")
#config.filename = QFileDialog.getOpenFileName(self, 'Open TXT file',
# os.path.join(os.getcwd(),'textfiles'), "Text File (*.txt)")
if len(config.filename[0]) > 0:
config.outputpath = os.path.dirname(config.filename[0])
try:
with open('defaults.json', 'w') as f:
config.defaults['BEname'] = config.bename
config.defaults['BElat'] = config.belat
config.defaults['BElon'] = config.belong
config.defaults['CS'] = ''.join([i for i in config.csname if not i.isdigit()])
config.defaults['defaultpath'] = config.outputpath
json.dump(config.defaults, f)
except:
pass
self.msnpicker = UiMsnPicker(self)
self.msnpicker.show()
self.calc = External()
self.calc.countChanged.connect(self.onCountChanged)
self.calc.start()
def BIDDSopen(self):
try:
os.startfile(config.defaults['bidds'])
except:
QMessageBox.question(self, 'File Error',
"Unable to find BIDDS Program located at " + str(config.defaults['bidds']) ,
QMessageBox.Ok)
def FolderCreate(self):
config.csname = self.csname.text()
basefolder = config.defaults['savepath']
foldername = datetime.datetime.now().strftime("%Y%m%d") + ' ' + config.csname
try:
if not os.path.isdir(basefolder):
basefolder = 'Desktop'
if basefolder == 'Desktop':
basefolder = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
basefolder = os.path.join(basefolder, config.defaults['savefolder'])
sortiefolder = os.path.join(basefolder, foldername)
Path(sortiefolder).mkdir(parents=True, exist_ok=True)
if os.path.isdir(sortiefolder):
os.startfile(sortiefolder)
config.outputpath = sortiefolder
except:
QMessageBox.question(self, 'Folder Error',
"Unable to create folder \nBase Folder: " + str(basefolder)+"\nSortie Folder: "+ str(sortiefolder),
QMessageBox.Ok)
def BIDDSfolderopen(self):
try:
biddspath, tail = os.path.split(config.defaults['bidds'])
if os.path.isdir(biddspath):
os.startfile(biddspath)
except:
QMessageBox.question(self, 'File Error',
"Unable to find BIDDS Program located at " + str(config.defaults['bidds']) ,
QMessageBox.Ok)
"""
CopyConfirm = QMessageBox.question(self, 'Copy DRD Files',
"Would you like to update the BIDDS folder DRD files with new ones?" ,
QMessageBox.Yes|QMessageBox.No)
if CopyConfirm ==QMessageBox.Yes:
selectedFolder = QFileDialog.getExistingDirectory(self, 'Select Folder with new DRD files',
config.outputpath)
count =0
if len(selectedFolder[0]) > 0:
for basename in os.listdir(selectedFolder):
if basename.endswith('.drd'):
pathname = os.path.join(selectedFolder, basename)
destfile = os.path.join(biddspath, basename)
if os.path.isfile(pathname):
if os.path.exists(destfile):
try:
os.remove(destfile)
except PermissionError as exc:
os.chmod(destfile, 0o777)
os.remove(destfile)
shutil.copy2(pathname, biddspath)
count += 1
QMessageBox.question(self, 'DRD Copy',
f"DRD Files copied: {count}.",
QMessageBox.Ok)
else:
QMessageBox.question(self, 'File Error',
"Unable to find BIDDS Program located at " + str(config.defaults['bidds']) ,
QMessageBox.Ok)
"""
class Worker(QObject):
"""
Must derive from QObject in order to emit signals, connect slots to other signals, and operate in a QThread.
"""
sig_done = pyqtSignal(int) # worker id: emitted at end of work()
def __init__(self, id: int):
super().__init__()
self.__id = id
self.__abort = False
@pyqtSlot()
def work(self):
thread_name = QThread.currentThread().objectName()
thread_id = int(QThread.currentThreadId()) # cast to int() is necessary
msnsplit()
self.sig_done.emit(self.__id)
def abort(self):
self.__abort = True
class UiJassmMatch(QDialog):
def __init__(self,parent=None):
super(UiJassmMatch, self).__init__(parent) # Call the inherited classes __init__ method
uic.loadUi('jassmmatch.ui', self) # Load the .ui file
self.debriefload = self.findChild(QPushButton, 'loaddebrief') # Find the button
self.debriefload.clicked.connect(self.debriefpicker)
self.sumload = self.findChild(QPushButton, 'loadjassmreport')
self.sumload.clicked.connect(self.jassmreportpicker)
self.match = self.findChild(QPushButton, 'match') # Find the button
self.match.clicked.connect(self.match_releases) # Remember to pass the definition/method, not the return value!
config.debriefcard_filename = ''
config.jassmreport_filename = ''
self.show() # Show the GUI
def match_releases(self):
self.match.setText('Matching...')
self.repaint()
newfilename = config.debriefcard_filename.replace('.xlsx', ' (matched).xlsx')
try:
shutil.copy(config.debriefcard_filename, newfilename)
except:
QMessageBox.question(self, 'File Error',
"File copy error, please close " + str(newfilename) + ' and try again.',
QMessageBox.Ok)
self.match.setText('Match')
self.repaint()
return
config.debriefcard_filename = newfilename
updatecombined = jassm_report_match(config.debriefcard_filename, config.jassmreport_filename)
if not updatecombined.empty:
updatecombined = updatecombined.fillna('')
updatecombined = updatecombined.replace('nan', '', regex=True)
append_df_to_excel(config.debriefcard_filename, updatecombined, sheet_name="Combined", startrow=0, index=False)
updatefillins(config.debriefcard_filename)
os.startfile(config.debriefcard_filename)
self.match.setText('Match')
self.repaint()
def debriefpicker(self):
selectedfile = QFileDialog.getOpenFileName(self, 'Open Debrief Card to Pair',
config.outputpath,
"Excel File (*.xlsx)")
if len(selectedfile[0]) > 0:
config.debriefcard_filename = selectedfile[0]
if len(config.debriefcard_filename) > 0 and len(config.jassmreport_filename) > 0:
self.match.setEnabled(True)
def jassmreportpicker(self):
selectedfile = QFileDialog.getOpenFileName(self, 'Open JMPS JASSM Report Excel File',
config.outputpath,
"Excel File (*.xlsm)")
if len(selectedfile[0]) > 0:
config.jassmreport_filename = selectedfile[0]
if len(config.debriefcard_filename) > 0 and len(config.jassmreport_filename) > 0:
self.match.setEnabled(True)
class UiMsnPicker(QDialog):
def __init__(self,parent=None):
super(UiMsnPicker, self).__init__(parent) # Call the inherited classes __init__ method
uic.loadUi('fdrsplit.ui', self) # Load the .ui file
self.cbFlights = self.findChild(QComboBox, 'cbFlights') # Find the button
self.SelectFlight = self.findChild(QPushButton, 'pushButton') # Find the button
self.SelectFlight.clicked.connect(self.SelectFlightPressed) # Remember to pass the definition/method, not the return value!
self.FlightCount = self.findChild(QLabel, 'labelFlightCount')
self.spinner = WaitingSpinner(self, True, True, Qt.ApplicationModal)
self.spinner.start()
self.__threads = None
self.start_threads()
self.show() # Show the GUI
def SelectFlightPressed(self):
#self.FlightCount.setText(print(self.cbFlights.currentIndex()))
idx = self.cbFlights.currentIndex()
i = int(config.ranges[idx][0])
j = int(config.ranges[idx][1])
config.msnData = config.msnData[i:j]
if config.turbocharge == True:
larmald = [m.start() for m in re.finditer('Change of IR IZ LAR', config.msnData)]
larjassm = [m.start() for m in re.finditer('Change of In-Range/In-Zone Status', config.msnData)]
rel = [m.start() for m in re.finditer('Weapon Scoring', config.msnData)]
config.larparse = len(larmald + larjassm) >0
releases = rel + larmald + larjassm
releases.sort()
records = [m.start() for m in re.finditer('Mission Event', config.msnData)]
print('Partial Sortie Search')
try:
if min(releases)> 30000:
K = min(releases)-30000
else:
K = 1
start = records[min(range(len(records)), key = lambda i: abs(records[i]-K))]
K = max(releases)+30000
stop = records[min(range(len(records)), key=lambda i: abs(records[i] - K))]
config.msnData = config.msnData[start:stop]
except:
print('Partial Sortie Search Error, searching full sortie...')
else:
larmald = [m.start() for m in re.finditer('Change of IR IZ LAR', config.msnData)]
larjassm = [m.start() for m in re.finditer('Change of In-Range/In-Zone Status', config.msnData)]
config.larparse = len(larmald + larjassm) >0
config.parse_pending = threading.Event()
config.ProgressMsnEvent = 2.3
threadParse = threading.Thread(target=Parse)
threadParse.start()
self.close()
def spinner_start(self):
self.spinner.start()
def spinner_stop(self):
self.spinner.stop()
def start_threads(self):
self.SelectFlight.setDisabled(True)
self.__threads = []
for idx in range(1):
worker = Worker(idx)
thread = QThread()
thread.setObjectName('thread_' + str(idx))
self.__threads.append((thread, worker)) # need to store worker too otherwise will be gc'd
worker.moveToThread(thread)
worker.sig_done.connect(self.on_worker_done)
thread.started.connect(worker.work)
thread.start() # this will emit 'started' and start thread's event loop
@pyqtSlot(int)
def on_worker_done(self, worker_id):
for thread, worker in self.__threads: # note nice unpacking by Python, avoids indexing
thread.quit() # this will quit **as soon as thread event loop unblocks**
thread.wait() # <- so you need to wait for it to *actually* quit
if config.msndates:
self.spinner_stop()
self.FlightCount.setText(str(len(config.msndates)))
self.cbFlights.clear()
for i in config.msndates:
self.cbFlights.addItem(i)
self.SelectFlight.setDisabled(False)
else:
self.close()
def Parse():
start_time = timeit.default_timer()
config.events_available = threading.Event()
config.releases_available = threading.Event()
config.lars_available = threading.Event()
config.ProgressMsnEvent = 0
threadEvents = threading.Thread(target=parsemsnevn)
threadEvents.start()
threadReleases = threading.Thread(target=parserelease)
threadReleases.start()
if config.larparse:
threadLars = threading.Thread(target=parselar())
threadLars.start()
allWPNs = []
dfAllWPNS = []
allLARs = []
reader = csv.reader(open('wpncodes.csv', 'r'))
config.wpncodes = {}
for row in reader:
k, v = row
config.wpncodes[k] = v
while not config.events_available.wait(timeout=1) :
print('\r{}% done...'.format(config.ProgressMsnEvent), end='', flush=True)
config.ProgressMsnEvent = 80
if config.larparse:
config.lars_available.wait()
config.ProgressMsnEvent = 81
print('\r{}% done...'.format(config.ProgressMsnEvent), end='', flush=True)
config.releases_available.wait()
config.dfMsnEvents['Time (UTC)'] = pd.to_datetime(config.dfMsnEvents['Date'] + ' ' + config.dfMsnEvents['Time (UTC)'])
#dfMsnEvents.to_csv('msnevents.csv')
config.dfMsnEvents.sort_values(by=['Time (UTC)'], inplace=True)
filter_mask = config.dfMsnEvents['Time (UTC)'] > pd.Timestamp(2000, 1, 2)
config.dfMsnEvents = config.dfMsnEvents[filter_mask]
config.dfMsnEvents.drop_duplicates(subset="Time (UTC)", keep=False, inplace=True)
if len(config.jdam) > 0:
dfJDAM = | pd.DataFrame(config.jdam) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 29 13:49:20 2021
@author: alber
"""
import re
import os
import pandas as pd
import numpy as np
import spacy
import pickle
import lightgbm as lgb
import imblearn
from sklearn import preprocessing
from sklearn.semi_supervised import (
LabelPropagation,
LabelSpreading,
SelfTrainingClassifier,
)
from sklearn import metrics
from sklearn.dummy import DummyClassifier
from sklearn.metrics import classification_report
from nltk.corpus import stopwords
from nltk import ngrams
from nltk.stem.snowball import SnowballStemmer
from sklearn.preprocessing import minmax_scale
# from sentence_transformers import SentenceTransformer, util
from imblearn.over_sampling import SMOTE, BorderlineSMOTE, ADASYN
from statsmodels.stats.inter_rater import cohens_kappa
from common.tools import get_files, file_presistance
from sklearn.metrics import roc_auc_score, r2_score, mean_absolute_error
from sklearn.multioutput import MultiOutputClassifier
from common.config import (
PATH_POEMS, PATH_RESULTS, PATH_AFF_LEXICON, PATH_GROUND_TRUTH
)
from itertools import product
nlp = spacy.load("es_core_news_md")
stemmer = SnowballStemmer("spanish")
def renormalize(n, range1, range2):
delta1 = range1[1] - range1[0]
delta2 = range2[1] - range2[0]
return (delta2 * (n - range1[0]) / delta1) + range2[0]
# MAPE metric
def mape(y_real, y_pred):
"""
TODO
Parameters
----------
y_real : TYPE
DESCRIPTION.
y_pred : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
y_real, y_pred = np.array(y_real), np.array(y_pred)
return np.mean(np.abs((y_real - y_pred) / y_real))
def _getReport(
y_test, y_pred, y_pred_proba, target_names, using_affective = "yes",
semantic_model = "", used_model = ""
):
"""
TODO
Parameters
----------
y_test : TYPE
DESCRIPTION.
y_pred : TYPE
DESCRIPTION.
target_names : TYPE
DESCRIPTION.
using_affective : TYPE, optional
DESCRIPTION. The default is "yes".
semantic_model : TYPE, optional
DESCRIPTION. The default is "".
used_model : TYPE, optional
DESCRIPTION. The default is "".
Returns
-------
df_metrics_iter : TYPE
DESCRIPTION.
"""
### 1. Base Dataframe
df_metrics_iter = pd.DataFrame(
{
'category': [category],
'using_affective': [using_affective],
'semantic_model': [semantic_model],
'regression_model': [used_model]
}
)
### 2. Cohen's Kappa
# Make Dataframe
y_pred_cat = np.round(y_pred).astype(int)
y_pred_cat = np.array([x if x>0 else 1 for x in y_pred_cat])
df = pd.DataFrame({"A": y_test.astype(int), "B": y_pred_cat})
# Switch it to three columns A's answer, B's answer and count of that combination
df = df.value_counts().reset_index()
l1 = [1,2,3,4]
l2 = [1,2,3,4]
df_aux = pd.DataFrame(list(product(l1, l2)), columns=['A', 'B'])
# Check compliance
if len(df) < len(df_aux):
df = df.merge(df_aux, how="outer").fillna(0)
# Make square
square = df.pivot(columns="A",index="B").fillna(0).values
# Get Kappa
dct_kappa = cohens_kappa(square)
kappa_max = dct_kappa['kappa_max']
kappa = dct_kappa['kappa']
df_metrics_iter['kappa'] = [kappa]
df_metrics_iter['kappa_max'] = [kappa_max]
### Get Correlations
df_corr = pd.DataFrame({
'y_pred': y_pred,
'y_test': y_test
})
df_corr = df_corr.corr().reset_index()
df_corr = df_corr.round(2)
corr = df_corr.values[0][2]
df_metrics_iter['corr'] = [corr]
### 3. R2 & MAPE
r2_result = r2_score(y_test, y_pred)
mape_result = mape(y_test, y_pred)
mae_result = mean_absolute_error(y_test, y_pred)
df_metrics_iter['r2_result'] = [r2_result]
df_metrics_iter['mape_result'] = [mape_result]
df_metrics_iter['mae'] = [mae_result]
### 3. AUC
if len(y_pred_proba)>0:
## P, R, F1 metrics
report = classification_report(
y_test, y_pred, output_dict = True
)
df_metrics_iter['precision_weighted'] = [report['weighted avg']['precision']]
df_metrics_iter['recall_weighted'] = [report['weighted avg']['recall']]
df_metrics_iter['f1_weighted'] = [report['weighted avg']['f1-score']]
## Multiclass AUC
# Encoding
y_test_c = pd.get_dummies(y_test)
y_test_c = y_test_c.astype(int)
y_test_c.columns = [int(x)-1 for x in list(y_test_c.columns)]
y_pred_proba_c = y_pred_proba[y_test_c.columns].copy()
# y_test_c = pd.get_dummies(y_test)
# y_pred_c = pd.get_dummies(y_pred)
# cols_ref = list(set(list(y_test_c.columns)+list(y_pred_c.columns)))
# cols_ref = [1.0, 2.0, 3.0, 4.0]
# for column in cols_ref:
# if column not in list(y_test_c.columns):
# y_test_c[column] = 0
# if column not in list(y_pred_c.columns):
# y_pred_c[column] = 0
# y_test_c = y_test_c[cols_ref].astype(int)
# y_pred_c = y_pred_c[cols_ref].astype(int)
# Get Multiclass AUC
auc = roc_auc_score(y_test_c, y_pred_proba_c, average='weighted')
df_metrics_iter['auc'] = auc
# y_pred_proba = np.asarray([x if str(x) != 'nan' else 0.0 for x in y_pred_proba])
# fpr, tpr, thresholds = metrics.roc_curve(
# y_test, y_pred_proba, pos_label=1
# )
# auc = metrics.auc(fpr, tpr)
# df_metrics_iter['auc'] = [auc]
return df_metrics_iter
# =============================================================================
# 1. Prepare Data
# =============================================================================
### Load Sonnets Features
# Load Data
file_to_read = open(f"{PATH_RESULTS}/dct_sonnets_input_v5", "rb")
dct_sonnets = pickle.load(file_to_read)
file_to_read.close()
# Only DISCO
if False:
dct_sonnets = {x:y for x,y in dct_sonnets.items() if x <= 4085}
# Sonnet Matrix
list_original_sentence = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5'
]
list_semantic_models = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5',
# 'enc_text_model_hg_bert_max',
# 'enc_text_model_hg_bert_span',
# 'enc_text_model_hg_bert_median',
'enc_text_model_hg_bert_avg_w',
# 'enc_text_model_hg_bert_sp_max',
# 'enc_text_model_hg_bert_sp_span',
# 'enc_text_model_hg_bert_sp_median',
'enc_text_model_hg_bert_sp_avg_w',
# 'enc_text_model_hg_ro_max',
# 'enc_text_model_hg_ro_span',
# 'enc_text_model_hg_ro_median',
# 'enc_text_model_hg_ro_avg_w'
]
# General Variables
dct_metrics_all_models = {}
df_meta = pd.concat(
[
| pd.DataFrame({"index": [item["index"]], "text": [item["text"]]}) | pandas.DataFrame |
from myutils.utils import getConnection, cronlog
import pandas as pd
import numpy as np
import datetime
import requests
class TestRequest:
def __init__(self, url, method='GET', META=None, postdata=None):
self.method = method
u = url.split('?')
self.path_info = u[0]
self.META = META or {}
self.GET = {}
if len(u)>1:
for x in u[1].split('&'):
y = x.split('=')
if len(y)==1:
self.GET[x] = ''
else:
self.GET[y[0]] = y[1]
self.PUT = postdata
def get_full_path(self):
return url
conn, cur = getConnection()
if False:
s = """
DROP TABLE IF EXISTS price_function;
CREATE TABLE price_function (
id smallserial PRIMARY KEY
, date DATE NOT NULL
, slope FLOAT(8) NOT NULL
, intercept FLOAT(8) NOT NULL
, r FLOAT(8) NOT NULL
, created_on TIMESTAMP NOT NULL
);
"""
cur.execute(s)
conn.commit()
if False:
s = """
INSERT INTO price_function (date, slope, intercept, r, created_on)
VALUES
('2020-07-05', 3, 2.8, 0.9, CURRENT_TIMESTAMP),
('2020-07-04', 2., 2.9, 0.7, CURRENT_TIMESTAMP);
"""
cur.execute(s)
conn.commit()
s = 'select * from price_function;'
cur.execute(s)
list_tables = cur.fetchall()
print(list_tables)
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS price_forecast;
CREATE TABLE price_forecast (
id serial PRIMARY KEY
, datetime TIMESTAMP NOT NULL
, demand Float(8) NOT NULL
, solar Float(8) NOT NULL
, wind Float(8) NOT NULL
, price Float(4) NOT NULL
, created_on TIMESTAMP NOT NULL
);
"""
cur.execute(s)
conn.commit()
if False:
s = """
DROP TABLE IF EXISTS testing;
CREATE TABLE testing (
id serial PRIMARY KEY
, created_on TIMESTAMP NOT NULL
); """
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_periods;
CREATE TABLE sm_periods (
period_id serial PRIMARY KEY
, period Char(16) not null
, local_date Date not null
, local_time char(5) not null
, timezone_adj smallint not null
);
"""
cur.execute(s)
conn.commit()
df_idx = pd.date_range(datetime.datetime(2019,1,1), datetime.datetime(2020,10,1), freq='30min')
df_idx_local = df_idx.tz_localize('UTC').tz_convert('Europe/London')
df = pd.DataFrame(index=df_idx)
df['period'] = df_idx.strftime('%Y-%m-%d %H:%M')
df['local_date'] = df_idx_local.strftime('%Y-%m-%d')
df['local_time'] = df_idx_local.strftime('%H:%M')
df['timezone_adj'] = df_idx_local.strftime('%z').str[0:3].astype(int)
df.reset_index(inplace=True)
start = """
INSERT INTO sm_periods (period_id, period, local_date, local_time, timezone_adj)
VALUES
"""
s=""
for i, j in df.iterrows():
s+= "({},'{}', '{}', '{}', {}),".format(i, j['period'], j['local_date'],j['local_time'], j['timezone_adj'])
if (i+1)%1000==0:
print('done: {}'.format(i+1))
cur.execute(start + s[:-1] + ';')
conn.commit()
s=""
print('done: {}'.format(i+1))
cur.execute(start + s[:-1] + ';')
conn.commit()
s=""
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_accounts;
CREATE TABLE sm_accounts (
account_id serial PRIMARY KEY
, type_id smallint not null
, first_period varChar(16) not null
, last_period varChar(16) not null
, last_updated TIMESTAMP not null
, hash varChar(64) not null
, region varChar(1)
, source_id smallint not null
);
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_quantity;
CREATE TABLE sm_quantity (
id serial PRIMARY KEY
, account_id integer not null
, period_id integer not null
, quantity float(8) not null
);
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_hh_variables;
CREATE TABLE sm_hh_variables (
var_id serial PRIMARY KEY
, var_name varchar(32) not null
, var_type varchar(32));
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_d_variables;
CREATE TABLE sm_d_variables (
var_id serial PRIMARY KEY
, var_name varchar(32) not null
, var_type varchar(32));
"""
cur.execute(s)
conn.commit()
if False: # Creates new hh tariff variables in sm_hh_variables and sm_tariffs
product = 'AGILE-OUTGOING-19-05-13'
type_id=2
s = f"""
delete from sm_hh_variables where var_name like '{product}%';
delete from sm_tariffs where product='{product}';
"""
cur.execute(s)
conn.commit()
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
s = f"""
INSERT INTO sm_hh_variables (var_name) values ('{product}-{region}');
"""
cur.execute(s)
conn.commit()
s = f"select var_id from sm_hh_variables where var_name='{product}-{region}';"
cur.execute(s)
var_id = cur.fetchone()[0]
conn.commit()
s = f"""
INSERT INTO sm_tariffs (type_id, product, region, granularity_id, var_id) values
({type_id}, '{product}', '{region}', 0, {var_id});
"""
cur.execute(s)
conn.commit()
START='201901010000'
if False: #Inserts initial prices into hh tariff variables
import requests
idx = pd.date_range(START, '202101010000', freq='30T')
df = pd.DataFrame()
df['timestamp'] = idx
df = pd.DataFrame(idx, columns=['timestamp'])
for region in ['B','C','D','E','F','G','H','J','K','L','M','N','P']:
tariff = 'AGILE-OUTGOING-19-05-13'
url = ('https://api.octopus.energy/v1/products/{}/' +
'electricity-tariffs/E-1R-{}-{}/standard-unit-rates/' +
'?period_from={}Z&period_to={}Z&page_size=15000')
url = url.format(tariff, tariff, region,
df.timestamp.iloc[0].strftime('%Y-%m-%dT%H:%M'),
df.timestamp.iloc[-1].strftime('%Y-%m-%dT%H:%M'))
r = requests.get(url)
dfs = []
dfs.append(pd.DataFrame(r.json()['results'])[['valid_from','value_exc_vat']])
while r.json()['next'] is not None:
r = requests.get(r.json()['next'])
dfs.append(pd.DataFrame(r.json()['results'])[['valid_from','value_exc_vat']])
if len(dfs)>30:
raise Exception
dfs = pd.concat(dfs)
dfs['timestamp'] = pd.DatetimeIndex(dfs.valid_from.str[:-1])
dfs = df.merge(right=dfs, how='left', on='timestamp')
dfs = dfs[dfs.value_exc_vat.notna()]
s = f"select var_id from sm_hh_variables where var_name='{tariff}-{region}';"
cur.execute(s)
var_id = cur.fetchone()[0]
conn.commit()
print(f'{var_id} {tariff} {region}' )
s = """
delete from sm_hh_variable_vals where var_id={};
"""
s = s.format(var_id)
cur.execute(s)
conn.commit()
s = """
INSERT INTO sm_hh_variable_vals (var_id, period_id, value) values
"""
s = s.format(var_id)
for i, j in dfs.iterrows():
s+= " ({}, {}, {}),".format(var_id, i, j.value_exc_vat)
s = s[:-1] + ';'
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_hh_variable_vals;
CREATE TABLE sm_hh_variable_vals (
id serial primary key
, var_id integer not null
, period_id integer not null
, value float(8) not null);
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_d_variable_vals;
CREATE TABLE sm_d_variable_vals (
id serial primary key
, var_id integer not null
, local_date date not null
, value float(8) not null);
"""
cur.execute(s)
conn.commit()
from myutils.utils import loadDataFromDb
if False: #Creates daily tracker variables
product = 'SILVER-2017-1'
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
s = f"""
insert into sm_d_variables (var_name) values ('{product}-{region}') returning var_id; """
var_id = loadDataFromDb(s)[0][0]
print(var_id)
s = f"""
insert into sm_tariffs (product, region, var_id, type_id, granularity_id) values
('{product}', '{region}', {var_id}, 1, 1); """
loadDataFromDb(s)
if False:
product = 'SILVER-2017-1'
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
s = f"select var_id from sm_variables where product='{product}' and region='{region}' ;"
var_id = loadDataFromDb(s)[0][0]
r = requests.get(f'https://octopus.energy/api/v1/tracker/G-1R-SILVER-2017-1-{region}/daily/past/540/1/')
dates = [x['date'] for x in r.json()['periods']]
prices = [x['unit_rate'] for x in r.json()['periods']]
d = pd.Series(prices, index=dates)
d = d[:datetime.date.today().strftime('%Y-%m-%d')]
d = d/1.05
d = d.round(2)
s = 'insert into sm_d_variable_vals (var_id, local_date, value) values '
for i, j in d.iteritems():
s+= f"({var_id}, '{i}', {j}),"
s = s[:-1]+';'
loadDataFromDb(s)
print(region)
if False:
conn.commit()
import requests
idx = pd.date_range(START, '202101010000', freq='30T')
df = pd.DataFrame()
df['timestamp'] = idx
df = pd.DataFrame(idx, columns=['timestamp'])
s = """
select sm_hh_variables.var_id, var_name, min(sm_periods.period_id) as period_id, min(period) as period
from sm_periods cross join sm_hh_variables
left outer join sm_hh_variable_vals on sm_periods.period_id=sm_hh_variable_vals.period_id
and sm_hh_variable_vals.var_id=sm_hh_variables.var_id
where sm_hh_variable_vals.id is null
group by sm_hh_variables.var_id, var_name;
"""
mins = loadDataFromDb(s, returndf=True)
for i, j in mins.iterrows():
tariff = j.var_name
if 'AGILE-18-02-21' not in tariff:
continue
print(tariff)
start = j.period.replace(' ','T')
end = '2021-01-01T00:00'
url = ('https://api.octopus.energy/v1/products/AGILE-18-02-21/' +
'electricity-tariffs/E-1R-{}/standard-unit-rates/' +
'?period_from={}Z&period_to={}Z&page_size=15000')
url = url.format(tariff, start, end)
r = requests.get(url)
r = r.json()['results']
if len(r)==0:
continue
dfs = pd.DataFrame(r)[['valid_from','value_exc_vat']]
dfs['timestamp'] = pd.DatetimeIndex(dfs.valid_from.str[:-1])
dfs = df.merge(right=dfs, how='left', on='timestamp')
dfs = dfs[dfs.valid_from.notna()]
print(dfs)
s = """
INSERT INTO sm_hh_variable_vals (var_id, period_id, value) values
"""
for a, b in dfs.iterrows():
s+= " ({}, {}, {}),".format(j.var_id, a, b.value_exc_vat)
s = s[:-1] + ';'
cur.execute(s)
print(cur.statusmessage)
conn.commit()
if False:
s = 'select account_id, code from sm_accounts;'
a = loadDataFromDb(s, returndf=True)
s = ''
for i, j in a.iterrows():
s+= "update sm_accounts set hash='{}' where account_id={};\n".format(encode(j.code), j.account_id)
loadDataFromDb(s)
if False:
# Checks for gaps
s = """
select sm_accounts.account_id, sm_accounts.type_id, sm_periods.period from sm_accounts
left join sm_periods on sm_periods.period between sm_accounts.first_period and sm_accounts.last_period
left join sm_quantity on sm_quantity.period_id=sm_periods.period_id and sm_quantity.account_id= sm_accounts.account_id
where sm_quantity.quantity is null
"""
df = loadDataFromDb(s, returndf=True)
print(df)
if False:
s = """
DROP TABLE IF EXISTS sm_tariffs;
CREATE TABLE sm_tariffs (
tariff_id serial primary key
, type_id integer not null
, product varchar not null
, region char(1) not null
, granularity_id integer not null
, var_id integer not null);
"""
loadDataFromDb(s)
if False:
s = """
insert into sm_tariffs (type_id, product, region, granularity_id, var_id)
select
0 as type_id, left(var_name, 14) as product, right(var_name, 1) as region, 0 as granularity_id, var_id
from sm_hh_variables;
"""
loadDataFromDb(s)
print(loadDataFromDb('select * from sm_tariffs', returndf=True))
if False:
s = """
select period from sm_periods
left join sm_quantity on sm_quantity.period_id=sm_periods.period_id
where sm_quantity.quantity is null and sm_periods.local_date between '2020/07/01' and '2020/07/30'
and sm_quantity.account_id in (select account_id from sm_accounts where hash LIKE
'c6e81194c0aa3d65d0522d41171e4d07301457dc1cb26f7b05f60a70227be1f3%' and type_id=0);
"""
s = """
with p as (select period, period_id from sm_periods where local_date between '2020/07/01' and '2020/07/30' ),
q as (select quantity, period_id from sm_quantity where sm_quantity.account_id in (select account_id from sm_accounts where hash LIKE
'c6e81194c0aa3d65d0522d41171e4d07301457dc1cb26f7b05f60a70227be1f3%' and type_id=0))
select count(p.period) from p
left join q on q.period_id=p.period_id
where q.quantity is null;
"""
print(loadDataFromDb(s, returndf=True))
if False:
s = "insert into sm_hh_variables (var_name) Values ('Profile_1'), ('Profile_2');"
#loadDataFromDb(s)
#
for pc in [1,2]:
idx = pd.date_range(START, '202203312300', freq='30T')
df = pd.DataFrame()
df['timestamp'] = idx
df = pd.DataFrame(idx, columns=['timestamp'])
df = df.iloc[:-1].copy()
f = '/home/django/django_project/scripts/Default_Period_Profile_Class_Coefficient_309.csv'
d = pd.read_csv(f)
d.columns = ['class','d1','period','coeff']
d = d[d['class']==pc]
d['date'] = d.d1.str[6:] + d.d1.str[2:6] + d.d1.str[:2]
d = d[d.date>=(START[:4] + '/' + START[4:6] + '/' + START[6:8])]
df = df[df.timestamp>='2021-03-31 23:00']
#d = d[d.date<'2021/04/01']
d = d.iloc[:len(df)]
assert(len(d)==len(df))
df['coeff'] = d.coeff.tolist()
s = "select var_id from sm_hh_variables where var_name='{}';".format('Profile_{}'.format(pc))
var_id = loadDataFromDb(s)[0][0]
s = "insert into sm_hh_variable_vals (var_id, period_id, value) values "
for i, j in df.iterrows():
s+= " ({}, {}, {}),".format(var_id, i, j.coeff)
s = s[:-1] + ';'
loadDataFromDb(s)
if False: #Gets latest carbon intensity
s = "insert into sm_hh_variables (var_name) Values ('CO2_National');"
#loadDataFromDb(s)
s = """
select s.var_id, s.var_name, max(sm_periods.period_id) as period_id, max(period) as period
from sm_hh_variables s
left join sm_hh_variable_vals on s.var_id=sm_hh_variable_vals.var_id
left join sm_periods on sm_periods.period_id=sm_hh_variable_vals.period_id
where s.var_name='CO2_National'
group by s.var_id, s.var_name;
"""
data = loadDataFromDb(s)[0]
latest = data[3]
var_id = data[0]
print(latest)
idx = pd.date_range(START, '202101010000', freq='30T')
df = pd.DataFrame()
df['timestamp'] = idx
df = pd.DataFrame(idx, columns=['timestamp'])
df = df.iloc[:-1].copy()
def getintensity(dt):
url = "https://api.carbonintensity.org.uk/intensity/"
r = requests.get(url + dt + "/fw48h")
j = r.json()['data']
return j[-1]['to'], pd.DataFrame([x['intensity']['actual'] for x in j], index=[x['from'] for x in j])
dt = latest.replace(' ', 'T')+ 'Z'
intensities = []
for d in range(500):
dt, intensity = getintensity(dt)
intensities.append( intensity)
if intensity[0].isna()[-1]:
break
d = pd.concat(intensities)
d.columns=['intensity']
last = d[d.intensity.notna()].index.max()
d = d.loc[:last].copy()
for i in range(len(d)):
if np.isnan(d.intensity.iloc[i]):
if i<48:
d.intensity.iloc[i] = d.intensity.iloc[i-1]
else:
d.intensity.iloc[i] = d.intensity.iloc[i-48]
d['timestamp'] = pd.DatetimeIndex(d.index.str[:16])
d = d.iloc[2:]
d = df.merge(d, how='left', on='timestamp' )
d = d[d.intensity.notna()]
print(d)
s = "insert into sm_hh_variable_vals (var_id, period_id, value) values "
for i, j in d.iterrows():
s+= " ({}, {}, {}),".format(var_id, i, j.intensity)
s = s[:-1] + ';'
loadDataFromDb(s)
if False:
s = """
select sm_periods.local_date, sm_periods.local_time, emis.value emissions, prof.value profile
from sm_hh_variable_vals emis
inner join sm_hh_variable_vals prof on emis.period_id=prof.period_id and emis.var_id=31 and prof.var_id=29
inner join sm_periods on sm_periods.period_id=emis.period_id;
"""
s = """
select sm_periods.local_date, sm_periods.local_time, emis.value emissions, prof.value profile, COALESCE(qty.quantity,0) quantity
from sm_hh_variable_vals emis
inner join sm_hh_variable_vals prof on emis.period_id=prof.period_id and emis.var_id=31 and prof.var_id=29
inner join sm_periods on sm_periods.period_id=emis.period_id
left outer join sm_quantity qty on qty.period_id=emis.period_id and qty.account_id=21
;
"""
s = """
select sm_periods.local_date, sm_periods.local_time, emis.value emissions, prof.value profile, COALESCE(qty.quantity,0) quantity
from sm_hh_variable_vals emis
inner join sm_hh_variable_vals prof on emis.period_id=prof.period_id and emis.var_id=31 and prof.var_id=29
inner join sm_periods on sm_periods.period_id=emis.period_id
left outer join sm_quantity qty on qty.period_id=emis.period_id and qty.account_id=21
where local_date='2020-07-25'
;
"""
df = loadDataFromDb(s, returndf=True)
print(df)
if False:
s = """
DROP TABLE IF EXISTS sm_log;
CREATE TABLE sm_log (
id serial primary key
, datetime timestamp not null
, mode integer not null
, url varchar(124) not null
, hash varchar(64) not null);
"""
loadDataFromDb(s)
if False:
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
tariff = 'GO-18-06-12'
s = '''
insert into sm_hh_variables (var_name) values ('{t}-{r}');
insert into sm_tariffs (type_id, product, region, granularity_id, var_id)
select 0 as type_id, '{t}' as product, '{r}' as region, 0 as granularity_id, var_id
from sm_hh_variables where var_name='{t}-{r}';
'''
loadDataFromDb(s.format(t=tariff, r=region))
if False:
idx = pd.date_range(START, '202101010000', freq='30T')
df = pd.DataFrame()
df['timestamp'] = idx
df = pd.DataFrame(idx, columns=['timestamp'])
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
tariff = 'GO-18-06-12'
print(region)
url = ('https://api.octopus.energy/v1/products/{}/' +
'electricity-tariffs/E-1R-{}-{}/standard-unit-rates/' +
'?period_from={}Z&period_to={}Z&page_size=15000')
url = url.format(tariff, tariff, region,
df.timestamp.iloc[0].strftime('%Y-%m-%dT%H:%M'),
df.timestamp.iloc[-1].strftime('%Y-%m-%dT%H:%M'))
r = requests.get(url)
dfs = pd.DataFrame(r.json()['results'])
dfs.index = pd.DatetimeIndex(dfs.valid_from.str[:16])
dfs.sort_index(inplace=True)
dfs.loc[pd.Timestamp(dfs.valid_to[-1][:16])] = dfs.iloc[-1]
dfs = dfs['value_exc_vat']
dfs = dfs.resample('30T').ffill()
dfs = | pd.merge(left=df, right=dfs, left_on='timestamp', right_index=True, how='left') | pandas.merge |
from __future__ import print_function
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optimizer
from get_dataset import *
import models
import utils
from sklearn.metrics import confusion_matrix, roc_curve,auc
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import numpy as np
import itertools
import pandas as pd
from sklearn import datasets
from sklearn.metrics import roc_curve,auc
from scipy import interp
from itertools import cycle
import time
NUM_CLASSES = 7
# Training settings
parser = argparse.ArgumentParser(description='Openset-DA SVHN -> MNIST Example')
parser.add_argument('--task', choices=['s2sa'], default='s2sa',
help='type of task')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument('--lr', type=float, default=0.0000001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--lr-rampdown-epochs', default=501, type=int, metavar='EPOCHS',
help='length of learning rate cosine rampdown (>= length of training)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--grl-rampup-epochs', default=20, type=int, metavar='EPOCHS',
help='length of grl rampup')
parser.add_argument('--weight-decay', '--wd', default=1e-3, type=float,
metavar='W', help='weight decay (default: 1e-3)')
parser.add_argument('--th', type=float, default=0.5, metavar='TH',
help='threshold (default: 0.5)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
args = parser.parse_args()
# C:/Users/Nirmal/Desktop/PYNB/project/GAN/checkpoint.pth.tar
torch.backends.cudnn.benchmark = True
source_dataset, target_dataset = get_dataset(args.task)
source_loader = torch.utils.data.DataLoader(source_dataset,
batch_size=args.batch_size, shuffle=True, num_workers=0)
target_loader = torch.utils.data.DataLoader(target_dataset,
batch_size=args.batch_size, shuffle=True, num_workers=0)
model = models.Net(task=args.task).cuda()
repr(model)
# if args.task=='s2sa':
# optimizer = torch.optim.SGD(model.parameters(), args.lr,
# momentum=args.momentum,
# weight_decay=args.weight_decay,
# nesterov=True)
if args.task=='s2sa':
optimizer = torch.optim.Adam(model.parameters(), args.lr,
weight_decay=args.weight_decay)
if args.resume:
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
criterion_bce = nn.BCELoss()
criterion_cel = nn.CrossEntropyLoss()
best_prec1 = 0
best_pred_y = []
best_gt_y = []
global_step = 0
total_steps = args.grl_rampup_epochs * len(source_loader)
acc11=[]
epoch11=[]
def train(epoch):
model.train()
global global_step
for batch_idx, (batch_s, batch_t) in enumerate(zip(source_loader, target_loader)):
adjust_learning_rate(optimizer, epoch, batch_idx, len(source_loader)) if args.task=='s2m' else None
p = global_step / total_steps
constant = 2. / (1. + np.exp(-10 * p)) - 1
data_s, target_s = batch_s
data_t, target_t = batch_t
data_s, target_s = data_s.cuda(), target_s.cuda(non_blocking=True)
data_t, target_t = data_t.cuda(), target_t.cuda(non_blocking=True)
batch_size_s = len(target_s)
batch_size_t = len(target_t)
optimizer.zero_grad()
data_s = data_s.unsqueeze(1)
data_t = data_t.unsqueeze(1)
output_s = model(data_s)
output_t = model(data_t, constant = constant, adaption = True)
target_s = target_s.long()
loss_cel = criterion_cel(output_s, target_s)
output_t_prob_unk = F.softmax(output_t, dim=1)[:,-1]
loss_adv = criterion_bce(output_t_prob_unk, torch.tensor([args.th]*batch_size_t).cuda())
loss = loss_cel + loss_adv
loss.backward()
optimizer.step()
global_step += 1
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tConstant: {:.4f}'.format(
epoch, batch_idx * args.batch_size, len(source_loader.dataset),
100. * batch_idx / len(source_loader), loss.item(), constant))
def test(epoch):
global acc11
global epoch11
global best_prec1
model.eval()
loss = 0
pred_y = []
true_y = []
correct = 0
ema_correct = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(target_loader):
data, target = data.cuda(), target.cuda(non_blocking=True)
data = data.unsqueeze(1)
output = model(data)
target = target.long()
loss += criterion_cel(output, target).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
for i in range(len(pred)):
pred_y.append(pred[i].item())
true_y.append(target[i].item())
correct += pred.eq(target.view_as(pred)).sum().item()
loss /= len(target_loader.dataset)
utils.cal_acc(true_y, pred_y, NUM_CLASSES)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
loss, correct, len(target_loader.dataset),
100. * correct / len(target_loader.dataset)))
prec1 = 100. * correct / len(target_loader.dataset)
if epoch % 1 == 0:
acc11.append(prec1)
epoch11.append(epoch)
#print(acc11)
#print(epoch11)
acc=np.round_(acc11)
np.savetxt('csvfile.csv', acc)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
utils.save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
if is_best:
global best_gt_y
global best_pred_y
best_gt_y = true_y
best_pred_y = pred_y
def adjust_learning_rate(optimizer, epoch, step_in_epoch, total_steps_in_epoch):
lr = args.lr
epoch = epoch + step_in_epoch / total_steps_in_epoch
lr *= utils.cosine_rampdown(epoch, args.lr_rampdown_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
start = time.time()
try:
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
print ("------Best Result-------")
utils.cal_acc(best_gt_y, best_pred_y, NUM_CLASSES)
except KeyboardInterrupt:
print ("------Best Result-------")
utils.cal_acc(best_gt_y, best_pred_y, NUM_CLASSES)
stop = time.time()
print('time taken = ' + str(stop - start) + 'secs')
# ########################################################################################################################
def plot_confusion_matrix(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize= False):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(9, 7))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.show()
plt.savefig('Confusion Matrix', dpi=200, format='png', bbox_inches='tight')
plt.close()
# Compute confusion matrix
cm = confusion_matrix(best_gt_y, best_pred_y)
print(cm)
# Show confusion matrix in a separate window
plt.matshow(cm)
plot_confusion_matrix(cm,
target_names= ['a', 'b', 'c', 'd', 'e', 'u'],
title='Confusion matrix',
cmap=None, normalize= False)
#print(classification_report(best_gt_y, best_pred_y, labels=['0', '1', '2', '3', '4', '5'], target_names=['a', 'b', 'c', 'd', 'e', 'u']))
#a=classification_report(best_gt_y, best_pred_y, labels=['0', '1', '2', '3', '4', '5'], target_names=['a', 'b', 'c', 'd', 'e', 'u'])
plt.show()
#plt.savefig('Classification Report.png', dpi=200, format='png', bbox_inches='tight')
plt.close()
#############################################################################################################################
# Compute ROC curve and ROC area for each class
### MACRO
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(NUM_CLASSES):
fpr[i], tpr[i], _ = roc_curve(np.array( | pd.get_dummies(best_gt_y) | pandas.get_dummies |
import os
import geopandas as gpd
import pandas as pd
import pytest
import trackintel as ti
from geopandas.testing import assert_geodataframe_equal
from pandas.testing import assert_frame_equal, assert_index_equal
from shapely.geometry import Point, Polygon, MultiPoint
from trackintel.io.from_geopandas import (
_trackintel_model,
read_locations_gpd,
read_positionfixes_gpd,
read_staypoints_gpd,
read_tours_gpd,
read_triplegs_gpd,
read_trips_gpd,
)
@pytest.fixture()
def example_positionfixes():
"""Model conform positionfixes to test with."""
p1 = Point(8.5067847, 47.4)
p2 = Point(8.5067847, 47.5)
p3 = Point(8.5067847, 47.6)
t1 = pd.Timestamp("1971-01-01 04:00:00", tz="utc")
t2 = pd.Timestamp("1971-01-01 05:00:00", tz="utc")
t3 = pd.Timestamp("1971-01-02 07:00:00", tz="utc")
list_dict = [
{"user_id": 0, "tracked_at": t1, "geom": p1},
{"user_id": 0, "tracked_at": t2, "geom": p2},
{"user_id": 1, "tracked_at": t3, "geom": p3},
]
pfs = gpd.GeoDataFrame(data=list_dict, geometry="geom", crs="EPSG:4326")
pfs.index.name = "id"
assert pfs.as_positionfixes
return pfs
class Test_Trackintel_Model:
"""Test `_trackintel_model()` function."""
def test_renaming(self, example_positionfixes):
"""Test renaming of columns."""
example_positionfixes["additional_col"] = [11, 22, 33]
pfs = example_positionfixes.copy()
# create new column mapping and revert it
columns = {"user_id": "_user_id", "tracked_at": "_tracked_at", "additional_col": "_additional_col"}
columns_rev = {val: key for key, val in columns.items()}
# check if columns get renamed correctly
pfs.rename(columns=columns, inplace=True)
pfs = _trackintel_model(pfs, columns_rev)
assert_geodataframe_equal(example_positionfixes, pfs)
def test_setting_geometry(self, example_positionfixes):
"""Test the setting of the geometry."""
# create pfs as dataframe
pfs = pd.DataFrame(example_positionfixes[["user_id", "tracked_at"]], copy=True)
pfs["geom"] = example_positionfixes.geometry
# check if geom column gets assigned to geometry
pfs = _trackintel_model(pfs, geom_col="geom")
assert_geodataframe_equal(example_positionfixes, pfs)
def test_set_crs(self, example_positionfixes):
"""Test if crs will be set."""
pfs = example_positionfixes.copy()
example_positionfixes.crs = "EPSG:2056"
# check if the crs is correctly set
pfs.crs = None
pfs = _trackintel_model(pfs, crs="EPSG:2056")
assert_geodataframe_equal(example_positionfixes, pfs)
def test_already_set_geometry(self, example_positionfixes):
"""Test if default checks if GeoDataFrame already has a geometry."""
pfs = _trackintel_model(example_positionfixes)
assert_geodataframe_equal(pfs, example_positionfixes)
def test_error_no_set_geometry(self, example_positionfixes):
"""Test if AttributeError will be raised if no geom_col is provided and GeoDataFrame has no geometry."""
pfs = gpd.GeoDataFrame(example_positionfixes[["user_id", "tracked_at"]])
with pytest.raises(AttributeError):
_trackintel_model(pfs)
def test_tz_cols(self, example_positionfixes):
"""Test if columns get casted to datetimes."""
pfs = example_positionfixes.copy()
pfs["tracked_at"] = ["1971-01-01 04:00:00", "1971-01-01 05:00:00", "1971-01-02 07:00:00"]
pfs = _trackintel_model(pfs, tz_cols=["tracked_at"], tz="UTC")
assert_geodataframe_equal(pfs, example_positionfixes)
def test_multiple_timezones_in_col(self, example_positionfixes):
"""Test if datetimes in column don't have the same timezone get casted to UTC."""
example_positionfixes["tracked_at"] = [
pd.Timestamp("2021-08-01 16:00:00", tz="Europe/Amsterdam"),
pd.Timestamp("2021-08-01 16:00:00", tz="Asia/Muscat"),
pd.Timestamp("2021-08-01 16:00:00", tz="Pacific/Niue"),
]
pfs = _trackintel_model(example_positionfixes, tz_cols=["tracked_at"])
example_positionfixes["tracked_at"] = pd.to_datetime(example_positionfixes["tracked_at"], utc=True)
assert_geodataframe_equal(pfs, example_positionfixes)
class TestRead_Positionfixes_Gpd:
"""Test `read_positionfixes_gpd()` function."""
def test_csv(self):
"""Test if the results of reading from gpd and csv agrees."""
# read from file and transform to trackintel format
gdf = gpd.read_file(os.path.join("tests", "data", "positionfixes.geojson"))
gdf.set_index("id", inplace=True)
pfs_from_gpd = read_positionfixes_gpd(gdf, user_id="User", geom_col="geometry", crs="EPSG:4326", tz="utc")
# read from csv file
pfs_file = os.path.join("tests", "data", "positionfixes.csv")
pfs_from_csv = ti.read_positionfixes_csv(pfs_file, sep=";", tz="utc", index_col="id", crs="EPSG:4326")
pfs_from_csv = pfs_from_csv.rename(columns={"geom": "geometry"})
assert_frame_equal(pfs_from_gpd, pfs_from_csv, check_exact=False)
def test_mapper(self, example_positionfixes):
"""Test if mapper argument allows for additional renaming."""
example_positionfixes["additional_col"] = [11, 22, 33]
mapper = {"additional_col": "additional_col_renamed"}
pfs = read_positionfixes_gpd(example_positionfixes, mapper=mapper)
example_positionfixes.rename(columns=mapper, inplace=True)
assert_geodataframe_equal(example_positionfixes, pfs)
class TestRead_Triplegs_Gpd:
"""Test `read_triplegs_gpd()` function."""
def test_csv(self):
"""Test if the results of reading from gpd and csv agrees."""
# read from file and transform to trackintel format
gdf = gpd.read_file(os.path.join("tests", "data", "triplegs.geojson"))
gdf.set_index("id", inplace=True)
tpls_from_gpd = read_triplegs_gpd(gdf, user_id="User", geom_col="geometry", crs="EPSG:4326", tz="utc")
# read from csv file
tpls_file = os.path.join("tests", "data", "triplegs.csv")
tpls_from_csv = ti.read_triplegs_csv(tpls_file, sep=";", tz="utc", index_col="id")
tpls_from_csv = tpls_from_csv.rename(columns={"geom": "geometry"})
assert_frame_equal(tpls_from_gpd, tpls_from_csv, check_exact=False)
def test_mapper(self):
"""Test if mapper argument allows for additional renaming."""
gdf = gpd.read_file(os.path.join("tests", "data", "triplegs.geojson"))
gdf["additional_col"] = [11, 22]
gdf.rename(columns={"User": "user_id"}, inplace=True)
mapper = {"additional_col": "additional_col_renamed"}
tpls = read_triplegs_gpd(gdf, mapper=mapper, tz="utc")
gdf.rename(columns=mapper, inplace=True)
assert_index_equal(tpls.columns, gdf.columns)
class TestRead_Staypoints_Gpd:
"""Test `read_staypoints_gpd()` function."""
def test_csv(self):
"""Test if the results of reading from gpd and csv agrees."""
# read from file and transform to trackintel format
gdf = gpd.read_file(os.path.join("tests", "data", "staypoints.geojson"))
gdf.set_index("id", inplace=True)
sp_from_gpd = read_staypoints_gpd(gdf, "start_time", "end_time", geom_col="geometry", crs="EPSG:4326", tz="utc")
# read from csv file
sp_file = os.path.join("tests", "data", "staypoints.csv")
sp_from_csv = ti.read_staypoints_csv(sp_file, sep=";", tz="utc", index_col="id")
sp_from_csv = sp_from_csv.rename(columns={"geom": "geometry"})
assert_frame_equal(sp_from_gpd, sp_from_csv, check_exact=False)
def test_mapper(self):
"""Test if mapper argument allows for additional renaming."""
gdf = gpd.read_file(os.path.join("tests", "data", "staypoints.geojson"))
gdf["additional_col"] = [11, 22]
gdf.rename(columns={"start_time": "started_at", "end_time": "finished_at"}, inplace=True)
mapper = {"additional_col": "additional_col_renamed"}
sp = read_staypoints_gpd(gdf, mapper=mapper, tz="utc")
gdf.rename(columns=mapper, inplace=True)
assert_index_equal(gdf.columns, sp.columns)
@pytest.fixture()
def example_locations():
"""Model conform locations to test with."""
p1 = Point(8.5067847, 47.4)
p2 = Point(8.5067847, 47.5)
list_dict = [
{"user_id": 0, "center": p1},
{"user_id": 0, "center": p2},
{"user_id": 1, "center": p2},
]
locs = gpd.GeoDataFrame(data=list_dict, geometry="center", crs="EPSG:4326")
locs.index.name = "id"
coords = [[8.45, 47.6], [8.45, 47.4], [8.55, 47.4], [8.55, 47.6], [8.45, 47.6]]
extent = Polygon(coords)
locs["extent"] = extent # broadcasting
locs["extent"] = gpd.GeoSeries(locs["extent"]) # dtype
assert locs.as_locations
return locs
class TestRead_Locations_Gpd:
"""Test `read_locations_gpd()` function."""
def test_csv(self):
"""Test if the results of reading from gpd and csv agrees."""
# TODO: Problem with multiple geometry columns and csv format
gdf = gpd.read_file(os.path.join("tests", "data", "locations.geojson"))
gdf.set_index("id", inplace=True)
locs_from_gpd = read_locations_gpd(gdf, user_id="User", center="geometry", crs="EPSG:4326")
locs_file = os.path.join("tests", "data", "locations.csv")
locs_from_csv = ti.read_locations_csv(locs_file, sep=";", index_col="id")
# drop the second geometry column manually because not storable in GeoJSON (from Geopandas)
locs_from_csv = locs_from_csv.drop(columns="extent")
assert_frame_equal(locs_from_csv, locs_from_gpd, check_exact=False)
def test_extent_col(self, example_locations):
"""Test function with optional geom-column "extent"."""
locs = example_locations.copy()
del locs["extent"]
coords = [[8.45, 47.6], [8.45, 47.4], [8.55, 47.4], [8.55, 47.6], [8.45, 47.6]]
locs["extent_wrongname"] = Polygon(coords)
locs = read_locations_gpd(locs, extent="extent_wrongname")
assert_geodataframe_equal(locs, example_locations)
def test_mapper(self, example_locations):
"""Test if mapper argument allows for additional renaming."""
example_locations["additional_col"] = [11, 22, 33]
mapper = {"additional_col": "additional_col_renamed"}
locs = read_locations_gpd(example_locations, mapper=mapper)
example_locations.rename(columns=mapper, inplace=True)
assert_geodataframe_equal(locs, example_locations)
@pytest.fixture
def example_trips():
"""Model conform trips to test with."""
start = pd.Timestamp("1971-01-01 00:00:00", tz="utc")
h = pd.Timedelta("1h")
mp1 = MultiPoint([(0.0, 0.0), (1.0, 1.0)])
mp2 = MultiPoint([(2.0, 2.0), (3.0, 3.0)])
list_dict = [
{"user_id": 0, "origin_staypoint_id": 0, "destination_staypoint_id": 1, "geom": mp1},
{"user_id": 0, "origin_staypoint_id": 1, "destination_staypoint_id": 2, "geom": mp2},
{"user_id": 1, "origin_staypoint_id": 0, "destination_staypoint_id": 1, "geom": mp2},
]
for n, d in enumerate(list_dict):
d["started_at"] = start + 4 * n * h
d["finished_at"] = d["started_at"] + h
trips = gpd.GeoDataFrame(data=list_dict, geometry="geom", crs="EPSG:2056")
trips.index.name = "id"
assert trips.as_trips
return trips
class TestRead_Trips_Gpd:
"""Test `read_trips_gpd()` function."""
def test_csv(self):
"""Test if the results of reading from gpd and csv agrees."""
df = pd.read_csv(os.path.join("tests", "data", "trips.csv"), sep=";")
df.set_index("id", inplace=True)
trips_from_gpd = read_trips_gpd(df, tz="utc")
trips_file = os.path.join("tests", "data", "trips.csv")
trips_from_csv = ti.read_trips_csv(trips_file, sep=";", tz="utc", index_col="id")
assert_frame_equal(trips_from_gpd, trips_from_csv, check_exact=False)
def test_with_geometry(self, example_trips):
"""Test if optional geometry gets read."""
trips = example_trips.copy()
del trips["geom"]
mp1 = MultiPoint([(0.0, 0.0), (1.0, 1.0)])
mp2 = MultiPoint([(2.0, 2.0), (3.0, 3.0)])
trips["geom"] = [mp1, mp2, mp2]
trips = read_trips_gpd(trips, geom_col="geom", crs="EPSG:2056", tz="utc")
example_trips = example_trips[trips.columns] # copy changed column order
assert_geodataframe_equal(trips, example_trips)
def test_without_geometry(self, example_trips):
"""Test if DataFrame without geometry stays the same."""
columns_without_geom = example_trips.columns.difference(["geom"])
trips = | pd.DataFrame(example_trips[columns_without_geom], copy=True) | pandas.DataFrame |
import numpy as np
import torch
import torch.nn as nn
from src.models.loss import RMSELoss, RMSLELoss
from sklearn.metrics import r2_score
import pandas as pd
#########################
# EARLY STOPPING
#########################
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(
self,
patience=7,
verbose=False,
delta=0.005,
path="checkpoint.pt",
trace_func=print,
early_stop_delay=20,
):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
From https://github.com/Bjarten/early-stopping-pytorch
License: MIT
"""
self.patience = patience
self.verbose = verbose
self.early_stop_delay = early_stop_delay
self.counter = 0
self.epoch = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.path = path
self.trace_func = trace_func
def __call__(self, val_loss, model):
score = -val_loss
# print(type(score), 'SCORE ####,', score)
if self.epoch < self.early_stop_delay:
self.epoch += 1
pass
else:
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
if self.verbose:
self.trace_func(
f"EarlyStopping counter: {self.counter} out of {self.patience}"
)
if self.counter >= self.patience:
self.early_stop = True
elif torch.isnan(score).item():
self.counter += 1
if self.counter >= self.patience:
self.early_stop = True
# print('########## IS NAN #######')
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
self.epoch += 1
def save_checkpoint(self, val_loss, model):
"""Saves model when validation loss decrease."""
if self.verbose:
self.trace_func(
f"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ..."
)
torch.save(model, self.path)
# torch.save(model.state_dict(), self.path)
self.val_loss_min = val_loss
#########################
# TESTING
#########################
def test(net, x_test, device, batch_size=100, ):
with torch.no_grad():
y_hats = []
for i in range(0, len(x_test), batch_size):
batch_x = x_test[i:i+batch_size].to(device)
outputs = net(batch_x)
y_hats.append(np.array(outputs.cpu()).reshape(-1,1))
return torch.tensor(np.concatenate(y_hats))
def calc_r2_avg(y_hats, y_val, index_sorted, window_size):
y_hats_rolling_avg = np.convolve(np.array(y_hats[index_sorted]).reshape(-1), np.ones(window_size), 'valid') / window_size
r2_val_avg = r2_score(np.array(y_val)[index_sorted][window_size-1:], y_hats_rolling_avg)
return r2_val_avg, y_hats_rolling_avg
# function to create metrics from the test set on an already trained model
def model_metrics_test(net, model_path, x_test, y_test, device, window_size=12):
net.eval()
criterion_mae = nn.L1Loss()
criterion_rmse = RMSELoss()
criterion_rmsle = RMSLELoss()
results_dict = {}
try:
y_hats = test(net, x_test, device, 100)
index_sorted = np.array(np.argsort(y_test, 0).reshape(-1))
r2_test = r2_score(y_test, y_hats)
results_dict['r2_test'] = r2_test
r2_test_avg, y_hats_rolling_avg = calc_r2_avg(y_hats, y_test, index_sorted, window_size)
results_dict['r2_test_avg'] = r2_test_avg
loss_mae_test = criterion_mae(y_hats, y_test)
results_dict['loss_mae_test'] = loss_mae_test.item()
loss_rmse_test = criterion_rmse(y_hats, y_test)
results_dict['loss_rmse_test'] = loss_rmse_test.item()
loss_rmsle_test = criterion_rmsle(y_hats, y_test)
results_dict['loss_rmsle_test'] = loss_rmsle_test.item()
except:
results_dict['r2_test'] = 99999
results_dict['r2_test_avg'] = 99999
results_dict['loss_mae_test'] = 99999
results_dict['loss_rmse_test'] = 99999
results_dict['loss_rmsle_test'] = 99999
return results_dict
def test_metrics_to_results_df(model_folder, df_results, x_test, y_test, ):
'''Function that takes the results datafram and appends
the results from the test data to it.
Parameters
===========
model_folder : pathlib position
Folder holding all the saved checkpoint files of saved models
'''
# select device to run neural net on
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("Running on GPU")
else:
device = torch.device("cpu")
print("Running on CPU")
df_temp = pd.DataFrame()
for i, r in df_results.iterrows():
model_name = r['model_checkpoint_name']
if i % 200 == 0:
print('model no. ', i)
# load model
net = torch.load(model_folder / model_name, map_location=device)
results_dict = model_metrics_test(net, model_folder / model_name, x_test, y_test, device)
results_dict['model_name'] = model_name
df_temp = df_temp.append( | pd.DataFrame.from_dict(results_dict,orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = | Series([v1, v1, v1, v2, v2, v1]) | pandas.Series |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
import pandas as pd
def main(input_filepath, output_filepath, lagged_list, number_of_customers):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
data = pd.read_csv(input_filepath)
logger.info("convert datetime to pd.datetime")
data.datetime = | pd.to_datetime(data.datetime) | pandas.to_datetime |
import pandas as pd
import logging
import logzero
from logzero import logger
def get_pseudo_labelled_data(sure_traders, X_train, y_train, X_test):
X_test.set_index(['Trader', 'Share', 'Day'], inplace=True)
sure_traders_line = X_test.loc[sure_traders.keys()]
logger.info(f"Pseudo labelling {len(sure_traders_line)} lines out of {len(X_test)}")
X_test.drop(sure_traders_line.index, axis=0, inplace=True)
X_train = pd.concat([X_train, sure_traders_line.drop(['Prediction'], axis=1)])
y_train = pd.concat([y_train, sure_traders_line['Prediction']])
X_test.drop(['Prediction'], axis=1, inplace=True)
return X_train, y_train, X_test
def aggregate_traders(X_test, y_pred):
X_test.reset_index(inplace=True)
y_pred = | pd.DataFrame(y_pred, columns=['HFT', 'MIX', 'NON HFT']) | pandas.DataFrame |
import timeit
from typing import Union
import numpy as np
import pandas as pd
import copy
from carla.evaluation.distances import get_distances
from carla.evaluation.nearest_neighbours import yNN, yNN_prob, yNN_dist
from carla.evaluation.manifold import yNN_manifold, sphere_manifold
from carla.evaluation.process_nans import remove_nans
from carla.evaluation.redundancy import redundancy
from carla.evaluation.success_rate import success_rate, individual_success_rate
from carla.evaluation.diversity import individual_diversity, avg_diversity
from carla.evaluation.violations import constraint_violation
from carla.evaluation.recourse_time import recourse_time_taken
from carla.models.api import MLModel
from carla.models.catalog import MLModelCatalog
from carla.recourse_methods.api import RecourseMethod
from carla.recourse_methods.processing import get_drop_columns_binary
class Benchmark:
"""
The benchmarking class contains all measurements.
It is possible to run only individual evaluation metrics or all via one single call.
For every given factual, the benchmark object will generate one counterfactual example with
the given recourse method.
Parameters
----------
mlmodel: carla.models.MLModel
Black Box model we want to explain
recmodel: carla.recourse_methods.RecourseMethod
Recourse method we want to benchmark
factuals: pd.DataFrame
Instances we want to find counterfactuals
Methods
-------
compute_ynn:
Computes y-Nearest-Neighbours for generated counterfactuals
compute_average_time:
Computes average time for generated counterfactual
compute_distances:
Calculates the distance measure and returns it as dataframe
compute_constraint_violation:
Computes the constraint violation per factual as dataframe
compute_redundancy:
Computes redundancy for each counterfactual
compute_success_rate:
Computes success rate for the whole recourse method.
run_benchmark:
Runs every measurement and returns every value as dict.
"""
def __init__(
self,
mlmodel: Union[MLModel, MLModelCatalog],
recourse_method: RecourseMethod,
factuals: pd.DataFrame,
dataset: pd.DataFrame = None
) -> None:
self._mlmodel = mlmodel
self._recourse_method = recourse_method
self._full_dataset = dataset
start = timeit.default_timer()
self._counterfactuals = recourse_method.get_counterfactuals(factuals)
stop = timeit.default_timer()
self._timer = stop - start
# Avoid using scaling and normalizing more than once
if isinstance(mlmodel, MLModelCatalog):
self._mlmodel.use_pipeline = False # type: ignore
self._factuals = copy.deepcopy(factuals)
# Normalizing and encoding factual for later use
self._enc_norm_factuals = recourse_method.encode_normalize_order_factuals(
factuals, with_target=True
)
def compute_ynn(self) -> pd.DataFrame:
"""
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours"]
return pd.DataFrame([[ynn]], columns=columns)
def compute_average_time(self) -> pd.DataFrame:
"""
Computes average time for generated counterfactual
Returns
-------
pd.DataFrame
"""
avg_time = self._timer / self._counterfactuals.shape[0]
columns = ["Average_Time"]
return pd.DataFrame([[avg_time]], columns=columns)
def compute_distances(self) -> pd.DataFrame:
"""
Calculates the distance measure and returns it as dataframe
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._enc_norm_factuals, self._counterfactuals
)
columns = ["Distance_1", "Distance_2", "Distance_3", "Distance_4"]
if counterfactuals_without_nans.empty:
return pd.DataFrame(columns=columns)
if self._mlmodel.encoder.drop is None:
# To prevent double count of encoded features without drop if_binary
binary_columns_to_drop = get_drop_columns_binary(
self._mlmodel.data.categoricals,
counterfactuals_without_nans.columns.tolist(),
)
counterfactuals_without_nans = counterfactuals_without_nans.drop(
binary_columns_to_drop, axis=1
)
factual_without_nans = factual_without_nans.drop(
binary_columns_to_drop, axis=1
)
arr_f = factual_without_nans.to_numpy()
arr_cf = counterfactuals_without_nans.to_numpy()
distances = get_distances(arr_f, arr_cf)
output = pd.DataFrame(distances, columns=columns)
return output
def compute_constraint_violation(self) -> pd.DataFrame:
"""
Computes the constraint violation per factual as dataframe
Returns
-------
pd.Dataframe
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
violations = []
else:
violations = constraint_violation(
self._mlmodel, counterfactuals_without_nans, factual_without_nans
)
columns = ["Constraint_Violation"]
return pd.DataFrame(violations, columns=columns)
def compute_time_taken(self) -> pd.DataFrame:
"""
TODO
Computes time taken for generated counterfactual
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
time_taken = []
else:
time_taken = recourse_time_taken(
self._recourse_method, self._factuals
)
columns = ["Time_taken"]
return pd.DataFrame(time_taken, columns=columns)
def compute_individual_diversity(self) -> pd.DataFrame:
"""
TODO
Computes instance-wise diveristy for generated counterfactual
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
diveristy = []
else:
diveristy = individual_diversity(
counterfactuals_without_nans, factual_without_nans
)
columns = ["Individual_Diversity"]
return pd.DataFrame(diveristy, columns=columns)
def compute_avg_diversity(self) -> pd.DataFrame:
"""
TODO
Computes average diversity for generated counterfactual
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
diversity = []
else:
diversity = avg_diversity(
counterfactuals_without_nans, factual_without_nans
)
columns = ["Average_Diversity"]
return pd.DataFrame(diversity, columns=columns)
def compute_ynn_dist(self) -> pd.DataFrame:
"""
TODO
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN_dist(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours-Distance"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_manifold_ynn(self) -> pd.DataFrame:
"""
TODO
Computes y-Nearest-Neighbours for generated counterfactuals with respect to positive class
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN_manifold(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours-Manifold-Distance"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_manifold_sphere(self) -> pd.DataFrame:
"""
TODO
Computes neighbor distance for generated counterfactuals with respect to positive class within sphere
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = sphere_manifold(
counterfactuals_without_nans, self._recourse_method, self._mlmodel
)
columns = ["Sphere-Manifold-Distance"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_ynn_prob(self) -> pd.DataFrame:
"""
TODO
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN_prob(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
print(ynn)
columns = ["y-Nearest-Neighbours-Probability"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_redundancy(self) -> pd.DataFrame:
"""
Computes redundancy for each counterfactual
Returns
-------
pd.Dataframe
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._enc_norm_factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
redundancies = []
else:
redundancies = redundancy(
factual_without_nans, counterfactuals_without_nans, self._mlmodel
)
columns = ["Redundancy"]
return | pd.DataFrame(redundancies, columns=columns) | pandas.DataFrame |
import logging
from os.path import join
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import json
import matplotlib.image as image
import matplotlib.dates as mdates
from .base import PAGE_WIDTH, ROW_HEIGHT, COLORS, heatmap, add_watermark
from gridemissions.eia_api import SRC
HEATMAP_BAS = [
"MISO",
"PJM",
"ERCO",
"SWPP",
"SOCO",
"CISO",
"FPL",
"TVA",
"NYIS",
"FPC",
"ISNE",
"LGEE",
"PACE",
"DUK",
"PSCO",
"NEVP",
"CPLE",
"AECI",
"WACM",
"SC",
"TEC",
"SRP",
"FMPP",
"LDWP",
"AZPS",
"SCEG",
"JEA",
"TEPC",
"PNM",
"WALC",
"PACW",
"NWMT",
"PSEI",
"EPE",
"IPCO",
"BANC",
"PGE",
"AEC",
"SEC",
"BPAT",
]
def separate_imp_exp(data, ba):
imp = 0.0
exp = 0.0
for ba2 in data.get_trade_partners(ba):
imp += data.df.loc[:, data.KEY["ID"] % (ba, ba2)].apply(lambda x: min(x, 0)).fillna(0.)
exp += data.df.loc[:, data.KEY["ID"] % (ba, ba2)].apply(lambda x: max(x, 0)).fillna(0.)
return imp, exp
def annual_plot_hourly(elec, co2, ba, save=False, fig_folder=None):
scaling_elec = 1e-3
scaling_co2 = 1e-6
f, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, figsize=(10, 12.5))
for ax, data, scale in zip((ax1, ax2), [elec, co2], [scaling_elec, scaling_co2]):
df_plot = data.df
ax.plot(df_plot.loc[:, data.get_cols(r=ba, field="D")] * scale, label="D")
ax.plot(
df_plot.loc[:, data.get_cols(r=ba, field="NG")] * scale,
label="G",
alpha=0.8,
)
# CO2i plot
co2iD = (
co2.df.loc[:, co2.get_cols(r=ba, field="D")].values.flatten()
/ elec.df.loc[:, elec.get_cols(r=ba, field="D")].values.flatten()
)
co2iG = (
co2.df.loc[:, co2.get_cols(r=ba, field="NG")].values.flatten()
/ elec.df.loc[:, elec.get_cols(r=ba, field="NG")].values.flatten()
)
co2iD[co2iD > 2000] = np.nan
co2iG[co2iG > 2000] = np.nan
impC, expC = separate_imp_exp(co2, ba)
impE, expE = separate_imp_exp(elec, ba)
co2i_imp = impC / impE
ax1.plot(impE * scaling_elec, label="Imp", alpha=0.7)
ax1.plot(expE * scaling_elec, label="Exp", alpha=0.7)
ax2.plot(impC * scaling_co2, label="Imp", alpha=0.7)
ax2.plot(expC * scaling_co2, label="Exp", alpha=0.7)
ax3.plot(co2.df.index, co2iD, label="D")
ax3.plot(co2.df.index, co2iG, label="G", alpha=0.8)
ax3.plot(co2.df.index, co2i_imp, label="Imp", alpha=0.7)
ax3.set_ylim(bottom=0.0)
for ax, data, scale in zip((ax4, ax5), [elec, co2], [scaling_elec, scaling_co2]):
partners = data.get_trade_partners(ba)
df_plot = data.df
for ba2 in partners:
ax.plot(
df_plot.loc[:, data.KEY["ID"] % (ba, ba2)] * scale, label=ba2, alpha=0.7
)
f.autofmt_xdate()
ax1.set_title(ba)
ax1.set_ylabel("Electricity (GWh)")
ax2.set_ylabel("Carbon (ktons)")
ax3.set_ylabel("Carbon intensity (kg/MWh)")
ax4.set_ylabel("Electricity trade (MWh)")
ax5.set_ylabel("Carbon trade (ktons)")
for ax in [ax1, ax2, ax3, ax4, ax5]:
ax.legend(loc=7)
f.tight_layout()
if save and (fig_folder is not None):
f.savefig(join(fig_folder, "%s.pdf" % ba))
plt.close(f)
return (f, (ax1, ax2, ax3, ax4, ax5))
def summ_stats(s, ax, color, label, q_up=0.9, q_down=0.1):
s1 = s.groupby(s.index.weekofyear).mean()
s1_up = s.groupby(s.index.weekofyear).quantile(q_up)
s1_down = s.groupby(s.index.weekofyear).quantile(q_down)
ax.plot(s1, label=label, color=color)
ax.plot(s1_up, color=color, ls="--", lw=0.5)
ax.plot(s1_down, color=color, ls="--", lw=0.5)
ax.fill_between(
s1_up.index,
s1_down.values.flatten(),
s1_up.values.flatten(),
color=color,
alpha=0.1,
)
def annual_plot_weekly(elec, co2, ba, save=False, fig_folder=None):
scaling_elec = 1e-3
scaling_co2 = 1e-6
df_co2 = co2.df
df_elec = elec.df
f, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, figsize=(10, 12.5))
for ifield, field in enumerate(["D", "NG"]):
summ_stats(
df_elec.loc[:, elec.get_cols(r=ba, field=field)] * scaling_elec,
ax1,
COLORS[ifield],
field,
)
summ_stats(
df_co2.loc[:, co2.get_cols(r=ba, field=field)] * scaling_co2,
ax2,
COLORS[ifield],
field,
)
# CO2i plot
co2iD = (
df_co2.loc[:, co2.get_cols(r=ba, field="D")].values.flatten()
/ df_elec.loc[:, elec.get_cols(r=ba, field="D")].values.flatten()
)
co2iG = (
df_co2.loc[:, co2.get_cols(r=ba, field="NG")].values.flatten()
/ df_elec.loc[:, elec.get_cols(r=ba, field="NG")].values.flatten()
)
co2iD[co2iD > 2000] = np.nan
co2iG[co2iG > 2000] = np.nan
summ_stats(pd.DataFrame(co2iD, index=df_co2.index), ax3, COLORS[0], "D")
summ_stats( | pd.DataFrame(co2iG, index=df_co2.index) | pandas.DataFrame |
'''
A suite of utilities to help working with DataFrames
'''
import pandas
import numpy as np
# Function to calculate missing values by column
def missing_values_table(df):
# total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
# Rename the columns
mis_val_table_ren_columns = mis_val_table.rename(
columns = {0 : 'Missing Values', 1 : '% of Total Values'})
# Sort the table by percentage of missing descending
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:,1] != 0].sort_values(
'% of Total Values', ascending=False).round(2)
# Print some summary information
print("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.")
# Return the dataframe with missing information
return mis_val_table_ren_columns
class DF_Processor:
"""
Working with pandas DataFrames before analysis.
"""
def __init__(self, df):
self.df = df
def train_val_test_split(self, **options):
"""
Split arrays or matrices into random train, validation, and test
subsets
Parameters
----------
df : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
train_size : float
Should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the train split. Defaults to 0.6.
test_size : float
Should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the test split. Defaults to 0.2.
val_size : float
Should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the test split. Defaults to 0.2.
Returns train, val, test and a statement indicating each shape.
"""
test_size = options.pop('test_size')
if test_size == None:
test_size = .2
val_size = options.pop('val_size')
if val_size == None:
val_size = .2
train_size = options.pop('train_size')
if train_size == None:
train_size = .6
if test_size + val_size + train_size != 1:
raise ValueError("Size floats must be positive and sum to 1")
train, val, test = np.split(self.df.sample(frac=1),
[int(train_size*len(self.df)),
int(train_size*len(self.df) +
int(val_size*len(self.df)))])
print('Shape of train, val, and test dataframes:')
print(train.shape,val.shape,test.shape)
return train, val, test
def add_column(self, df, list_to_add):
"""
Takes a list, converts to a pandas.Series(), and adds as a column
to DataFrame, with name of list as name of column.
Parameters
----------
df : an existing DataFrame
list_to_add : a list of length equal to number of rows of DataFrame.
"""
if df.shape[0] != len(list_to_add):
raise ValueError("Length of list must equal number of rows of df")
new_column = | pandas.Series(data=list_to_add) | pandas.Series |
from bs4 import BeautifulSoup
import requests
import csv
import time
import pandas as pd
for n in range(100, 200):
url = f'https://www.goodreads.com/list/show/1.Best_Books_Ever?page={n}'
source = requests.get(url)
data = source.text
soup = BeautifulSoup(data, 'lxml')
links = []
for title in soup.find_all('a', class_='bookTitle'):
links.append(title['href'])
link_ser = | pd.Series(links) | pandas.Series |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.fixture(scope="module") # type: ignore
def postgres_url_tls() -> str:
conn = os.environ["POSTGRES_URL_TLS"]
return conn
@pytest.fixture(scope="module") # type: ignore
def postgres_rootcert() -> str:
cert = os.environ["POSTGRES_ROOTCERT"]
return cert
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": | pd.Series([10.9, 5.2, -10.0], dtype="float64") | pandas.Series |
import numpy as np
import pandas as pd
from utilities.date_util import lookup_dates, end_quarter
# Download the CRSP Names file
# - this links the permno to gvkey (COMPUSTAT) and CUSIP
# - Fix the date ending only for last date within the group
def get_names(db):
return fix_ending_dates(clean_wrds(db.get_table(
'crsp', 'stocknames')), 'nameenddt', ['permno'])
# Get the Compustat-CRSP Link table
# - fix the dates and use todays date as empty end_date
# - filter on permnos
def get_crosswalk(db, permno_list):
crosswalk = clean_wrds(db.get_table('crsp', 'Ccmxpf_linktable'))
crosswalk = clean_wrds(crosswalk[~(
crosswalk.linkenddt < '1980-01-01') & crosswalk.lpermno.isin(permno_list)])
crosswalk['linkenddt'].fillna(pd.Timestamp("today").date(), inplace=True)
return crosswalk
# DB Queries
# Get the Compustat Fundamentals
# Match to names file to get permno instead of gvkey
# Make sure they are unique observations by permno,quarter (this is a pain)
def get_fundamentals(db, crosswalk):
fields = [
'gvkey',
'datadate',
'fyearq',
'fqtr',
'fyr',
'datafqtr',
'indfmt',
'cusip',
'oibdpq',
'atq',
'niq',
'saleq',
'cogsq']
query = "select " + \
', '.join(fields) + " from comp.fundq where fyearq> 1979 and gvkey in %s" % repr(
tuple(crosswalk.gvkey.unique()))
df_fundq = clean_wrds(db.raw_sql(query)).sort_values(['gvkey', 'datafqtr'])
# remove duplicates by taking last datafqtr within each gvkey-quarter
# note: this is rare and only happens when fiscal year changes, taking
# first has no effect
df_fundq2 = df_fundq.groupby(['gvkey', 'datadate']).last().reset_index()
# merge in the gvkey-permno crosswalk
x = pd.merge(df_fundq2,
crosswalk[['gvkey',
'lpermno',
'linkdt',
'linkenddt']].drop_duplicates(),
on='gvkey').rename(columns={'lpermno': 'permno'})
y = x[(x.datadate >= x.linkdt) & (x.datadate <= x.linkenddt)].copy()
return clean_wrds(y.sort_values('linkenddt').groupby(
['permno', 'datadate']).last().reset_index()[fields + ['permno']])
# Download the MSF file from CRSP
# - convert to quarterly data by taking last observation
def get_msf(db, permno_list, trim=False):
fields = [
'cusip',
'permno',
'hsiccd',
'date',
'prc',
'altprc',
'shrout',
'altprcdt',
'cfacshr']
query = "select " + \
', '.join(
fields) + " from crsp.msf where date > '1979-12-31' and permno in %s" % repr(tuple(permno_list))
df_msf = clean_wrds(db.raw_sql(query))
df_msf2 = convert_to_quarter(df_msf, 'date', ['cusip', 'permno'])
if trim:
# Trim the MSF data for only dates and permnos in the S&P at the time
df_msf3 = pd.merge(df_msf2, df_sp500, on='permno')
return df_msf3[(df_msf3['date'] >= df_msf3['start']) &
(df_msf3['date'] <= df_msf3['ending'])]
else:
return df_msf2
# Download the short interest file from COMPUSTAT
# - Merge in the crosswalk to get permnos
# - Filter on time after merge to get correct crosswalk info
def get_short_interest(db, crosswalk):
short_int = clean_wrds(db.get_table('comp', 'sec_shortint'))
short_int2 = | pd.merge(short_int, crosswalk, on=['gvkey'], how='left') | pandas.merge |
import argparse
import pandas as pd
from collections import defaultdict
def get_args():
desc = 'Fixes a GTF with no genes'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-gtf', '-g', dest='gtf',
help='gtf to fix')
args = parser.parse_args()
return args
# get value associated with keyword in the 9th column of gtf
def get_field_value(key, fields):
if key not in fields:
return None
else:
return fields.split(key+' "')[1].split()[0].replace('";','')
def make_ofile_name(matfile, prefix=None):
fname = matfile.split('.gtf')[0]
if prefix:
fname += '_'
fname += prefix
fname += '_reformatted.gtf'
return fname
def format_to_write(line):
return ''.join('\t'.join([str(i) for i in line])+'\n')
def construct_new_entry(entry, entry_type, min_coord, max_coord):
entry.type = entry_type
fields = ''
gid = entry.gid.tolist()[0]
gname = entry.gname.tolist()[0]
entry.start = min_coord
entry.stop = max_coord
if entry_type == 'gene' or entry_type == 'transcript':
fields += 'gene_id "{}";'.format(gid)
fields += ' gene_name "{}";'.format(gname)
if entry_type == 'transcript':
tid = entry.tid.tolist()[0]
fields += ' transcript_id "{}";'.format(tid)
entry.fields = fields
return entry
def main():
args = get_args()
gtffile = args.gtf
outfile = make_ofile_name(gtffile)
df = pd.read_csv(gtffile, sep='\t',
names=['chr', 'source', 'type', 'start', 'stop',
'score', 'strand', 'phase', 'fields'],
comment='#')
print(df)
df['tid'] = df.apply(lambda x: get_field_value('transcript_id', x.fields), axis=1)
df['gid'] = df.apply(lambda x: get_field_value('gene_id', x.fields), axis=1)
df['gname'] = df.apply(lambda x: get_field_value('gene', x.fields), axis=1)
gene_mins = df[['gid', 'start']].groupby(by='gid').min().copy(deep=True)
gene_maxes = df[['gid', 'stop']].groupby(by='gid').max().copy(deep=True)
transcript_mins = df[['tid', 'start']].groupby(by='tid').min().copy(deep=True)
transcript_maxes = df[['tid', 'stop']].groupby(by='tid').max().copy(deep=True)
for gid in df.gid.unique():
if gid == None:
continue
# if there's already an entry
if len(df.loc[(df.gid == gid) & (df.type == 'gene')].index) != 0:
pass
# construct gene entry from its exons
else:
g_min = gene_mins.loc[gid, 'start'].tolist()[0]
g_max = gene_maxes.loc[gid, 'stop'].tolist()[0]
# pull info out of constituent transcripts
g_entry = df.loc[df.gid == gid].head(1).copy(deep=True)
g_entry = construct_new_entry(g_entry, 'gene', g_min, g_max)
# and add new entry into the df
new_loc = entry.index.tolist()[0]
df_new = pd.concat([df.iloc[:new_loc], t_entry, df.iloc[new_loc:]]).reset_index(drop=True)
df = df_new
for tid in df.tid.unique():
if gid == None or tid == None:
continue
# if there's already an entry
if len(df.loc[(df.tid == tid) & (df.type == 'transcript')].index) != 0:
pass
# construct transcript entry from its exons
else:
t_min = transcript_mins.loc[tid].tolist()[0]
t_max = transcript_maxes.loc[tid].tolist()[0]
# pull info out of constituent exons
t_entry = df.loc[df.tid == tid].head(1).copy(deep=True)
t_entry = construct_new_entry(t_entry, 'transcript', t_min, t_max)
# and add new entry into the df
new_loc = t_entry.index.tolist()[0]
df_new = | pd.concat([df.iloc[:new_loc], t_entry, df.iloc[new_loc:]]) | pandas.concat |
#!/usr/bin/env python3
# python3.6
# ref link: https://www.jianshu.com/p/91c98585b79b
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import os,re
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from scipy import stats
import seaborn as sns
import argparse
def gene_dis(fi, prefix):
data = pd.read_table(fi, header=0)
###
if data.columns[0] != 'gene':
data.rename(columns={data.columns[0]:'gene'}, inplace=True)
print(data.columns)
data_melt = data.melt('gene', var_name='sample')
data_melt = data_melt.query('value>0')
data_melt.index = data_melt['gene']
#print(data)
#data_melt = data.melt(var_name='sample')
#print(data_melt)
#data_melt = data_melt.query('value>0')
#data_melt.index = data_melt['gene']
### Gene species by gene type
gene_type = {}
with open("/sibcb1/wuliganglab1/liuwei/genome/hisat2_index/Ensembl96_GRCh38_GRcm38.gene.type") as f:
for line in f:
line=line.strip().split("\t")
gene_type[line[1]] = line[2]
gene_type = pd.Series(gene_type, name='gene_type', dtype="string")
type_list = ["protein_coding", "pseudogene", "lincRNA", "antisense"]
for i in range(0, len(gene_type.index)):
if gene_type[i] in type_list:
pass
elif re.search("pseudogene", gene_type[i]):
gene_type[i] = 'pseudogene'
else:
gene_type[i] = 'others'
# Categories
#cat_type = CategoricalDtype(categories=["protein_coding", "pseudogene", "lincRNA", "antisense", "others"], ordered =True)
#cat_type = CategoricalDtype(categories=["others", "pseudogene", "antisense", "lincRNA", "protein_coding"], ordered =True)
#gene_type = gene_type.astype(cat_type)
data_melt2 = pd.merge(data_melt, gene_type, how='left', sort=False, right_index=True,left_index=True)
cat_type = CategoricalDtype(categories=data.columns[1:], ordered =True)
data_melt2['sample'] = data_melt2['sample'].astype(cat_type)
aspect = np.log(int(data.shape[1])) - 1
colors = list(reversed(sns.color_palette()[0:5]))
hue_order = ["others", "pseudogene", "antisense", "lincRNA", "protein_coding"]
sns.displot(data_melt2, x="sample", hue="gene_type", hue_order=hue_order, palette=colors, multiple="stack", shrink=.8, height=4, aspect=aspect)
plt.xticks(rotation=90)
plt.xlabel('Samples', fontsize=15)
plt.ylabel('Gene species', fontsize=15)
out_box = prefix + "_gene_species_type.pdf"
plt.savefig(out_box, bbox_inches='tight')
plt.close()
### Gene species
sns.displot(data_melt, x="sample", shrink=.8, height=4, aspect=aspect)
plt.xticks(rotation=90)
plt.xlabel('Samples', fontsize=15)
plt.ylabel('Gene numbers', fontsize=15)
out_box = prefix + "_gene_species.pdf"
plt.savefig(out_box, bbox_inches='tight')
plt.close()
### Gene species by expression interval
values = | pd.cut(data_melt['value'], [0, 1, 5, 10, 50, 100, 1000, 1000000], labels=['0~1', '1~5', '5~10', '10~50', '50~100', '100~1000', '>1000']) | pandas.cut |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from django_plotly_dash import DjangoDash
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import dash_table
import numpy as np
import pandas as pd
import datetime
import random
import numpy as np
import names
from fall.models import PredictionData
'''
visdcc not working with django - dash
so.. I guess brython to write width hight to a hidden div, and then ask dash to load it...
then use convert the values to vh, vw in pixels to set all plotly graph/table/font size
brython is not working neither, can not communicate from outside scripts...
finally, there is a clientside callback in dash framework can run js...
I'll use md(768px) as the line to show my content and use viewport based unit for any possible way.
'''
prediction_df = pd.DataFrame()
def init_df():
global prediction_df
if datetime.date.today() >= datetime.date(2020,10,1):
df = pd.DataFrame.from_records(PredictionData.objects.all().values(
'participant_name','participant_phone','guess_n_species','guess_total_individual','prediction_datetime',)
).sort_values(by=['prediction_datetime'])
ns_Guess = df.guess_n_species.tolist()
nc_Guess = df.guess_total_individual.tolist()
prediction_df = | pd.DataFrame({}) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = | pd.Index(['g1', 'g1', 'g2', 'g2']) | pandas.Index |
import io
import pandas as pd
import pickle
from flagging_site.data import hobolink
from flagging_site.data.hobolink import get_live_hobolink_data
from flagging_site.data.usgs import get_live_usgs_data
def test_hobolink_data_is_recent(app):
with app.app_context():
df = get_live_hobolink_data()
last_timestamp = df['time'].iloc[-1]
time_difference = (pd.to_datetime('today') - last_timestamp)
assert time_difference < pd.Timedelta(hours=2)
def test_usgs_data_is_recent(app):
with app.app_context():
df = get_live_usgs_data()
last_timestamp = df['time'].iloc[-1]
time_difference = ( | pd.to_datetime('today') | pandas.to_datetime |
# EDA - Text Analysis
### Import Libraries
import os
import pickle
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
plt.style.use("bmh")
warnings.filterwarnings("ignore")
### set directory
Im_dir_path = "/home/zhendi/pm/scripts/image/EDA/"
### inport data
result = pd.read_pickle("/home/zhendi/pm/scripts/result_non_split_rmnum.pkl")
### utils for saving and loading
def save_img(fig, path, name):
file_path = os.path.join(path, name)
fig.savefig(file_path)
def save_obj(obj, file_address):
with open(file_address, "wb") as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(file_address):
with open(file_address, "rb") as f:
return pickle.load(f)
# Distribution of words and characters
n_words = result["TEXT"].apply(lambda x: len(str(x).split()))
fig = plt.figure(figsize=(12, 8))
sns.distplot(n_words.values, bins=100, kde=False, color="brown")
plt.xlabel("Number of Words", fontsize=15)
plt.ylabel("Frequency", fontsize=15)
plt.title("Distribution of the Number of Words", fontsize=20)
save_img(fig, Im_dir_path, "DistributionOfWords.png")
plt.show()
n_chars = result["TEXT"].apply(lambda x: len(str(x)))
fig = plt.figure(figsize=(12, 8))
sns.distplot(n_chars.values, bins=100, kde=False, color="brown")
plt.xlabel("Number of Characters", fontsize=15)
plt.ylabel("Frequency", fontsize=15)
plt.title("Distribution of Number of Characters", fontsize=20)
save_img(fig, Im_dir_path, "DistributionOfChars.png")
plt.show()
### Text length distribution
result["n_words"] = result["TEXT"].apply(lambda x: len(str(x).split()))
fig = plt.figure(figsize=(12, 8))
sns.violinplot(x="Class", y="n_words", data=result, inner=None)
sns.swarmplot(x="Class", y="n_words", data=result, color="w", alpha=0.5)
plt.ylabel("Text Count", fontsize=15)
plt.xlabel("Class", fontsize=15)
plt.title("Text length distribution", fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
save_img(fig, Im_dir_path, "TextLength9Classes.png")
plt.show()
### Wordcloud for all the text
def plotWordCloud_forall():
combText = result["TEXT"].agg(lambda x: " ".join(x.dropna()))
wordcloud = WordCloud(
background_color="white", colormap="Dark2", max_font_size=150, random_state=42
).generate(combText)
# Display the generated image:
print("word cloud for text ")
fig = plt.figure(figsize=(12, 8))
plt.imshow(wordcloud, interpolation="bilinear")
plt.title("All Text Data")
plt.axis("off")
save_img(fig, Im_dir_path, "wordCloud_forall.png")
plt.show()
plotWordCloud_forall()
### Wordcloud for each class
wc = WordCloud(
background_color="white", colormap="Dark2", max_font_size=150, random_state=42
)
def plotWordCloud_foreach(class_n, model, TEXTdata):
combText = TEXTdata.agg(lambda x: " ".join(x.dropna()))
wordcloud = model.generate(combText)
# Display the generated image:
print("word cloud for Class: ", class_n)
fig = plt.figure(figsize=(12, 8))
plt.imshow(wordcloud, interpolation="bilinear")
plt.title("Class: " + str(class_n), fontsize=15)
plt.axis("off")
filename = "wordCloud_class_" + str(class_n) + ".png"
save_img(fig, Im_dir_path, filename)
plt.show()
for i in range(9):
class_n = i + 1
textdata = result[result["Class"] == class_n]["TEXT"]
plotWordCloud_foreach(class_n, wc, textdata)
### Distribution of Unigram, Bigram, and Trigram
# build the model
vec = CountVectorizer().fit(result["TEXT"])
# Non-strict for unigrams
result = pd.read_pickle("/home/zhendi/pm/scripts/result_non_split_rmnum.pkl")
vec = CountVectorizer(ngram_range=(1, 1)).fit(result["TEXT"])
def get_top_n_words(x, model, n):
vec = model
bow = vec.transform(x)
sum_words = bow.sum(axis=0)
vec.vocabulary_.items() # give a dictionary
words_freq = [
(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()
] # a list
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) # a sorted list
return words_freq[:n] # select numbers
dict_unigram = {}
for i in range(9):
class_n = i + 1
textdata = result[result["Class"] == class_n]["TEXT"]
dict_unigram[class_n] = get_top_n_words(textdata, vec, 20)
df_list = [
pd.DataFrame(dict_unigram[i + 1], columns=["Unigram", "Frequency"])
for i in range(9)
]
def plot_classfeats_h(dfs):
x = np.arange(len(dfs[0])) # y axis ticks
for i, df in enumerate(dfs):
fig = plt.figure(figsize=(12, 10))
plt.xlabel("Frequency", labelpad=16, fontsize=15)
plt.ylabel("Unigram", labelpad=16, fontsize=15)
plt.title("Top 20 Unigram in Class: " + str(i + 1), fontsize=20)
plt.barh(df.Unigram, df.Frequency, align="center", color="#32acbf")
plt.yticks(x, fontsize=12)
plt.xticks(fontsize=12)
plt.ylim([-1, x[-1] + 1])
save_img(fig, Im_dir_path, "TextFeature_Count_uni_Class" + str(i + 1) + ".png")
plt.show()
plot_classfeats_h(df_list)
# Non-strict for bigrams
result = | pd.read_pickle("/home/zhendi/pm/scripts/result_non_split_rmnum.pkl") | pandas.read_pickle |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190102T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_weekahead(session, site_metadata, mocker):
variable = 'net_load'
observation = default_observation(
site_metadata, variable=variable,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
run_time = pd.Timestamp('20190110T1945Z')
forecast = default_forecast(
site_metadata, variable=variable,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1d'),
interval_label='beginning')
issue_time = pd.Timestamp('20190111T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, observation, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_interval_index(session, site_metadata,
obs_5min_begin):
# index=True not supported for day ahead
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert 'index=True not supported' in str(excinfo.value)
def test_run_persistence_interval_too_long(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('48h'), # too long
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_interval_not_midnight_to_midnight(session,
site_metadata,
obs_5min_begin):
# not midnight to midnight
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=22),
lead_time_to_start=pd.Timedelta('1h'),
interval_length= | pd.Timedelta('1h') | pandas.Timedelta |
# %%
import datetime
import pandas
import altair
from plot_shared import plot_points_average_and_trend
# %%
df = pandas.read_csv('https://api.coronavirus.data.gov.uk/v2/data?areaType=nation&metric=cumPeopleVaccinatedFirstDoseByPublishDate&metric=cumPeopleVaccinatedSecondDoseByPublishDate&format=csv')
df.rename(columns={
'cumPeopleVaccinatedFirstDoseByPublishDate': 'First',
'cumPeopleVaccinatedSecondDoseByPublishDate': 'Second',
'areaName': 'Nation',
'date': 'Publication Date'
}, inplace=True)
df = df.drop(columns=['areaCode','areaType']).melt(id_vars=['Publication Date','Nation'], var_name='Dose', value_name='People')
# %%
ni = pandas.read_csv('../sam/doses.csv')
ni['Dose'] = ni['Dose'].str.replace('Dose 1', 'First')
ni['Dose'] = ni['Dose'].str.replace('Dose 2', 'Second')
ni['Dose'] = ni['Dose'].str.replace('Dose 3', 'Third')
# %%
history = df[df['Nation']=='Northern Ireland'][['Publication Date','Dose','People']]
ni.rename(columns={'Date':'Publication Date','Total':'People'}, inplace=True)
all = history.merge(ni, on=['Publication Date','Dose'], how='outer', suffixes=('','_bot'))
all['People'] = all['People'].fillna(all['People_bot'])
all = all[['Publication Date','Dose','People']]
# %%
boosters = all[all['Dose']=='Booster'][['Publication Date','People']]
boosters['Publication Date'] = pandas.to_datetime(boosters['Publication Date'])
dose2s = all[all['Dose']=='Second'][['Publication Date','People']]
dose2s['Publication Date'] = pandas.to_datetime(dose2s['Publication Date'])
dose2s['Booster Target Date 6M'] = pandas.to_datetime(dose2s['Publication Date']) + pandas.to_timedelta(183, unit='d')
dose2s['Booster Target Date 7M'] = | pandas.to_datetime(dose2s['Publication Date']) | pandas.to_datetime |
import os
import pandas as pd
import argparse
mainAppRepo = os.path.dirname(os.path.abspath(__file__)) + '/'
# SITE NAME
def get_site_name_from_site_number(site_number):
sites = pd.read_csv(mainAppRepo + 'data/study_sites.txt',
sep=',', header=0, index_col=0) #\\s+
site_name = sites.index._data[site_number]
return site_name
# H ind CSV FILE
def get_csv_file_with_indicator_for_a_context(site_number, chronicle, approx, folder):
indicator = "H"
site_name = get_site_name_from_site_number(site_number)
file_name = "Exps_" + indicator + "_Indicator_" + site_name + "_Chronicle"+ str(chronicle) + "_Approx" + str(approx) + ".csv"
indicator_file = folder + "/" + site_name + "/" + file_name
try:
dfp = pd.read_csv(indicator_file, sep=",")
except:
print("File does not exist")
dfp = pd.DataFrame()
return dfp
def get_csv_file_with_steady_features_for_a_context(site_number, chronicle, folder):
site_name = get_site_name_from_site_number(site_number)
model_name = "model_time_0_geo_0_thick_1_K_86.4_Sy_0.1_Step1_site" + str(site_number) + "_Chronicle" + str(chronicle) + "_SteadyState"
file_name = model_name + "_extracted_features.csv"
steady_file = folder + "/" + site_name + "/" + model_name + "/" + file_name
try:
df = pd.read_csv(steady_file, sep=";")
except:
print("File for site " + site_name + " (number : " + str(site_number) + " & chronicle " + str(chronicle) + ") does not exist")
df = pd.DataFrame()
return df
def get_csv_file_with_indicator_for_a_site(site_number, folder):
indicator = "H"
site_name = get_site_name_from_site_number(site_number)
file_name = "Exps_" + indicator + "_Indicator_" + site_name + ".csv"
indicator_file = folder + "/" + site_name + "/" + file_name
try:
dfp = pd.read_csv(indicator_file, sep=",")
except:
print("File for site " + site_name + " (number : " + str(site_number) + "does not exist")
dfp = pd.DataFrame()
return dfp
def createCSVFileForASite(site_number, folder):
indicator = "H"
site_name = get_site_name_from_site_number(site_number)
df_all_per_site = pd.DataFrame()
for approx in range(2):
for chronicle in range(2):
df = get_csv_file_with_indicator_for_a_context(site_number, chronicle, approx, folder)
if not df.empty:
taille = len(df.index)
df = df.drop(df.columns[[0]], axis=1)
df.rename(columns={'Approximation':'Rate'}, inplace=True)
df_site = pd.DataFrame(data=[site_number]*taille, index=df.index, columns=['Site_number'])
df_approx = pd.DataFrame(data=[approx]*taille, index=df.index, columns=['Approx'])
df_chr = | pd.DataFrame(data=[chronicle]*taille, index=df.index, columns=['Chronicle']) | pandas.DataFrame |
import pandas as pd
# fpanda_del_orders = pd.read_csv("fpanda_del_orders.csv")
foodpanda_orders = pd.read_csv("foodpanda_orders.csv")
deliveroo_orders = pd.read_csv("deliveroo_orders.csv")
ubereats_orders = pd.read_csv("uber_orders.csv", usecols=["Cost", "Date", "Items", "Restaurant", "Service"])
dominos_orders = | pd.read_csv("dominos_orders.csv", usecols=["Cost", "Date", "Items", "Restaurant", "Service"]) | pandas.read_csv |
#!/usr/bin/python
# coding=utf-8
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
__author__ = "<NAME>"
__copyright__ = "Copyright 2016-2018, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "09/08/17"
from atomic_data.collections import AtomicCollection
import logging
from mendeleev import element
from ai4materials.descriptors.base_descriptor import Descriptor
from ai4materials.utils.utils_crystals import convert_energy_substance
from ai4materials.utils.utils_config import read_nomad_metainfo
from ai4materials.utils.utils_config import get_data_filename
import os
import pandas as pd
import string
logger = logging.getLogger('ai4materials')
class AtomicFeatures(Descriptor):
"""Return the atomic features corresponding to a given crystal structure.
For each atom species present in the crystal structure, a list of atomic features are retrieved
either from user-defined atomic collection, or the python package Mendeleev (https://pypi.org/project/mendeleev/).
Example of atomic features are atomic number, electron affinity, ionization potential, orbital radii etc.
Parameters:
configs: dict
Contains configuration information such as folders for input and output
(e.g. `desc_folder`, `tmp_folder`), logging level, and metadata location.
See also :py:mod:`ai4materials.utils.utils_config.set_configs`.
.. codeauthor:: <NAME> <<EMAIL>>
"""
def __init__(self, path_to_collection=None, feature_order_by=None, energy_unit='eV', length_unit='angstrom',
materials_class='binaries', configs=None):
super(AtomicFeatures, self).__init__(configs=configs)
if feature_order_by is None:
feature_order_by = 'atomic_mulliken_electronegativity'
self.feature_order_by = feature_order_by
self.energy_unit = energy_unit
self.length_unit = length_unit
self.materials_class = materials_class
self.metadata_info = read_nomad_metainfo()
if path_to_collection is None:
path_to_collection = get_data_filename(resource='../tests/ExtendedBinaries_Dimers_Atoms_new.json',
package='atomic_data')
self.path_to_collection = path_to_collection
self.collection = AtomicCollection("binaries", collections=self.path_to_collection)
logger.info("Reading atomic collection from '{0}'".format(self.path_to_collection))
if self.feature_order_by is not None:
logger.info("Ordering atomic features by '{0}' of the elements".format(self.feature_order_by))
def calculate(self, structure, selected_feature_list=None, **kwargs):
if selected_feature_list is not None:
if len(selected_feature_list) < 2:
raise ValueError("Please select at least two primary features.")
else:
self.selected_feature_list = selected_feature_list
value_list = []
columns = ['ordered_chemical_symbols']
features_mendeleev = ['atomic_number']
chemical_symbols = structure.get_chemical_symbols()
if self.materials_class == 'binaries':
# reduce chemical symbols and formula to binary
chemical_symbols = list(set(chemical_symbols))
if len(chemical_symbols) == 1:
chemical_symbols *= 2
if len(chemical_symbols) != 2:
raise ValueError("More than 2 different atoms in structure {}. At the moment only structures with one or "
"two different chemical species are possible. "
"The chemical symbols are {}.".format(structure, chemical_symbols))
# in a given structure, order by the user-specified atomic_metadata
p = self.collection.get(self.feature_order_by)
value_order_by = p.value(chemical_symbols)
# add lambda because the key that is being used to sort
# is (val, sym), and not just value.
# in case of sorting of multiple arrays this is needed
chemical_symbols = [sym for (val, sym) in
sorted(zip(value_order_by, chemical_symbols), key=lambda pair: pair[0])]
values = [''.join(chemical_symbols)]
for idx, el_symb in enumerate(chemical_symbols):
for feature in selected_feature_list:
# divide features for mendeleev and collection
if feature in features_mendeleev:
elem_mendeleev = element(el_symb)
try:
value = getattr(elem_mendeleev, feature)
except Exception as e:
logger.warning("{} not found for element {}.".format(feature, el_symb))
logger.warning("{}".format(e))
value = float('NaN')
else:
# add features from collection
try:
p = self.collection.get(feature)
value = p.value(el_symb) # convert to desired units
unit = p.value(el_symb, 'units')
value = convert_energy_substance(unit, value, energy_unit=self.energy_unit,
length_unit=self.length_unit)
except Exception as e:
logger.warning("{} not found for element {}.".format(feature, el_symb))
logger.warning("{}".format(e))
value = float('NaN')
values.append(value)
columns.append(feature + '(' + str(list(string.ascii_uppercase)[idx]) + ')')
values = tuple(values)
value_list.append(values)
atomic_features_table = | pd.DataFrame.from_records(value_list, columns=columns) | pandas.DataFrame.from_records |
import pandas as pd
def generate_demand_csv():
# Demand
demand = pd.read_excel("Data_management/DATA.xlsx", sheet_name='2.3 EUD', index_col=0, header=1, usecols=range(5))
demand.columns = [x.strip() for x in demand.columns]
demand.index = [x.strip() for x in demand.index]
# Add additional information
demand_aux = pd.read_csv("Data/User_data/aux_demand.csv", index_col=0)
demand = pd.merge(demand, demand_aux, left_index=True, right_index=True)
# Rename and reorder columns
demand.index.name = 'parameter name'
demand = demand.reset_index()
demand = demand[['Category', 'Subcategory', 'parameter name', 'HOUSEHOLDS',
'SERVICES', 'INDUSTRY', 'TRANSPORTATION', 'Units']]
demand.to_csv("Data/User_data/Demand.csv", sep=',', index=False)
def generate_resources_csv():
# Resources
resources = pd.read_excel("Data_management/DATA.xlsx", sheet_name='2.1 RESOURCES', index_col=0, header=1,
usecols=range(5))
resources.index = [x.strip() for x in resources.index]
resources.columns = [x.split(" ")[0] for x in resources.columns]
# Add additional information
resources_aux = pd.read_csv("Data/User_data/aux_resources.csv", index_col=0)
resources = pd.merge(resources, resources_aux, left_index=True, right_index=True)
# Rename and reorder columns
resources.index.name = 'parameter name'
resources = resources.reset_index()
resources = resources[['Category', 'Subcategory', 'parameter name', 'avail', 'gwp_op', 'c_op', 'einv_op']]
# resources.columns = ['Category', 'Subcategory', 'parameter name', 'Availability', 'Direct and indirect emissions',
# 'Price', 'Direct emissions']
# Add a line with units
units = pd.Series(['', '', 'units', '[GWh/y]', '[ktCO2-eq./GWh]', '[Meuro/GWh]', '[GWh/y]'],
index=resources.columns)
resources = pd.concat((units.to_frame().T, resources), axis=0)
resources.to_csv("Data/User_data/Resources.csv", sep=',', index=False)
def generate_technologies_csv():
# Technologies
technologies = pd.read_excel("Data_management/DATA.xlsx", sheet_name='3.2 TECH', index_col=1)
technologies = technologies.drop(technologies.columns[[0]], axis=1)
technologies.index = [x.strip() for x in technologies.index]
# Add additional information
technologies_aux = pd.read_csv("Data/User_data/aux_technologies.csv", index_col=0)
technologies = pd.merge(technologies, technologies_aux, left_index=True, right_index=True)
# Rename and reorder columns
technologies.index.name = 'parameter name'
technologies = technologies.reset_index()
technologies = technologies[['Category', 'Subcategory', 'Technologies name', 'parameter name', 'c_inv', 'c_maint',
'gwp_constr', 'einv_constr', 'lifetime', 'c_p', 'fmin_perc', 'fmax_perc',
'f_min', 'f_max']]
# Add a line with units
units = pd.Series(['', '', 'Name (simplified)', 'Name (in model and documents)',
'[Meuro/GW],[Meuro/GWh],[Meuro/(Mkmpass/h)],[Meuro/(Mtonkm/h)]',
'[Meuro/GW],[Meuro/GWh],[Meuro/(Mkmpass/h)],[Meuro/(Mtonkm/h)]',
'[ktonCO2_eq/GW],[ktonCO2_eq/GWh],[ktonCO2_eq/(Mkmpass/h)],[ktonCO2_eq/(Mtonkm/h)]',
'[GWh/y]', '[years]', '[]', '[]', '[]', '[GW]', '[GW]'],
index=technologies.columns)
technologies = pd.concat((units.to_frame().T, technologies), axis=0)
technologies.to_csv("Data/User_data/Technologies.csv", sep=',', index=False)
def generate_layers_csv():
# Layers in-out
layers = pd.read_excel("Data_management/DATA.xlsx", sheet_name='3.1 layers_in_out', index_col=1)
layers = layers.drop(layers.columns[0], axis=1)
layers.columns = [x.strip() for x in layers.columns]
layers.to_csv("Data/Developer_data/Layers_in_out.csv", sep=',')
def generate_storage_csv():
# Storage eff in
storage_eff_in = pd.read_excel("Data_management/DATA.xlsx", sheet_name='3.3 STO', header=2, nrows=25, index_col=0)
storage_eff_in.index = [x.strip() for x in storage_eff_in.index]
storage_eff_in.to_csv("Data/Developer_data/Storage_eff_in.csv", sep=',')
# Storage eff out
storage_eff_out = pd.read_excel("Data_management/DATA.xlsx", sheet_name='3.3 STO', header=30, nrows=25, index_col=0)
storage_eff_out.index = [x.strip() for x in storage_eff_out.index]
storage_eff_out.to_csv("Data/Developer_data/Storage_eff_out.csv", sep=',')
# Storage characteristics
storage_c = | pd.read_excel("Data_management/DATA.xlsx", sheet_name='3.3 STO', header=58, nrows=25, index_col=0) | pandas.read_excel |
## ~~~~~ Imports ~~~~~
## Data Manipulation
import pandas as pd
import numpy as np
## Plotting
import seaborn as sns
import matplotlib.pyplot as plt
## Scraping
import requests
import xmltodict
## OS Related
import os
from os import listdir
from os.path import isfile, join
## Datetime Handling
from datetime import timedelta, datetime, date
import time
## Miscellaneous
import time
import warnings
import collections
from ipypb import track
## Stream Data
from ElexonDataPortal import stream_info
## ~~~~~ Helper Functions/Classes ~~~~~
class RequestError(Exception):
def __init__(self, http_code, error_type, description):
self.message = f'{http_code} - {error_type}\n{description}'
def __str__(self):
return self.message
## ~~~~~ Core Wrapper Class ~~~~~
class Wrapper:
def dt_rng_2_SPs(self, start_date:datetime, end_date:datetime, freq='30T', tz='Europe/London'):
dt_rng = | pd.date_range(start_date, end_date, freq=freq, tz=tz) | pandas.date_range |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2020/1/23 9:07
contact: <EMAIL>
desc: 新增-事件接口
新增-事件接口新型冠状病毒-网易
新增-事件接口新型冠状病毒-丁香园
新增-事件接口新型冠状病毒-百度
"""
import json
import time
from io import BytesIO
import demjson
import pandas as pd
import requests
from PIL import Image
from bs4 import BeautifulSoup
from akshare.event.cons import province_dict, city_dict
# pd.set_option('display.max_columns', None) # just for debug
def epidemic_163(indicator="实时"):
"""
网易网页端-新冠状病毒-实时人数统计情况
国内和海外
https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other&#map_block
https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other&
:return: 返回国内各地区和海外地区情况
:rtype: pandas.DataFrame
"""
url = "https://c.m.163.com/ug/api/wuhan/app/data/list-total"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
}
params = {
"t": int(time.time() * 1000),
}
res = requests.get(url, params=params, headers=headers)
data_json = res.json()
hist_today_df = pd.DataFrame([item["today"] for item in data_json["data"]["chinaDayList"]], index=[item["date"] for item in data_json["data"]["chinaDayList"]])
hist_total_df = pd.DataFrame([item["total"] for item in data_json["data"]["chinaDayList"]], index=[item["date"] for item in data_json["data"]["chinaDayList"]])
current_df = pd.DataFrame.from_dict(data_json["data"]["chinaTotal"])
outside_today_df = pd.DataFrame([item["today"] for item in data_json["data"]["areaTree"]], index=[item["name"] for item in data_json["data"]["areaTree"]])
outside_hist_df = pd.DataFrame([item["total"] for item in data_json["data"]["areaTree"]], index=[item["name"] for item in data_json["data"]["areaTree"]])
province_hist_df = pd.DataFrame([item["total"] for item in data_json["data"]["areaTree"][0]["children"]], index=[item["name"] for item in data_json["data"]["areaTree"][0]["children"]])
if indicator == "实时":
return current_df
if indicator == "省份":
return province_hist_df
elif indicator == "历史":
return hist_total_df
elif indicator == "国家":
return outside_hist_df
def epidemic_dxy(indicator="西藏自治区"):
"""
丁香园-全国统计-info
丁香园-分地区统计-data
丁香园-全国发热门诊一览表-hospital
丁香园-全国新闻-news
:param indicator: ["info", "data", "hospital", "news"]
:type indicator: str
:return: 返回指定 indicator 的数据
:rtype: pandas.DataFrame
"""
url = "https://3g.dxy.cn/newh5/view/pneumonia"
res = requests.get(url)
res.encoding = "utf-8"
soup = BeautifulSoup(res.text, "lxml")
# news
text_data_news = str(soup.find_all("script", attrs={"id": "getTimelineService"}))
temp_json = text_data_news[
text_data_news.find("= [{") + 2: text_data_news.rfind("}catch")
]
json_data = pd.DataFrame(json.loads(temp_json))
desc_data = json_data[
["title", "summary", "infoSource", "provinceName", "sourceUrl"]
]
# data-domestic
data_text = str(soup.find("script", attrs={"id": "getAreaStat"}))
data_text_json = json.loads(
data_text[data_text.find("= [{") + 2: data_text.rfind("catch") - 1]
)
data_df = pd.DataFrame(data_text_json)
data_df.columns = ["地区", "地区简称", "确诊", "疑似", "治愈", "死亡", "备注", "区域ID", "区域"]
country_df = data_df[["地区", "地区简称", "确诊", "疑似", "治愈", "死亡", "备注"]]
# data-global
data_text = str(soup.find("script", attrs={"id": "getListByCountryTypeService2"}))
data_text_json = json.loads(
data_text[data_text.find("= [{") + 2: data_text.rfind("catch") - 1]
)
global_df = pd.DataFrame(data_text_json)
# info
dxy_static = soup.find(attrs={"id": "getStatisticsService"}).get_text()
# hospital
url = (
"https://assets.dxycdn.com/gitrepo/tod-assets/output/default/pneumonia/index.js"
)
params = {"t": str(int(time.time()))}
res = requests.get(url, params=params)
hospital_df = pd.read_html(res.text)[0].iloc[:, :-1]
if indicator == "全国":
return country_df
elif indicator == "global":
return global_df
elif indicator == "info":
return pd.read_json(
dxy_static[dxy_static.find("= {") + 2: dxy_static.rfind("}catch")],
orient="index",
)
elif indicator == "hospital":
return hospital_df
elif indicator == "全国疫情新增趋势图":
# img
img_url = pd.read_json(
dxy_static[dxy_static.find("= {") + 2: dxy_static.rfind("}catch")],
orient="index",
).T
img_file = Image.open(
BytesIO(requests.get(img_url["dailyPics"][0][0]).content)
)
img_file.show()
elif indicator == "全国疫情新增确诊病例趋势图":
# img
img_url = pd.read_json(
dxy_static[dxy_static.find("= {") + 2: dxy_static.rfind("}catch")],
orient="index",
).T
img_file = Image.open(
BytesIO(requests.get(img_url["dailyPics"][0][1]).content)
)
img_file.show()
elif indicator == "全国疫情风险病例趋势图":
# img
img_url = pd.read_json(
dxy_static[dxy_static.find("= {") + 2: dxy_static.rfind("}catch")],
orient="index",
).T
img_file = Image.open(
BytesIO(requests.get(img_url["dailyPics"][0][2]).content)
)
img_file.show()
elif indicator == "全国疫情累计死亡/治愈病例趋势图":
# img
img_url = pd.read_json(
dxy_static[dxy_static.find("= {") + 2: dxy_static.rfind("}catch")],
orient="index",
).T
img_file = Image.open(
BytesIO(requests.get(img_url["dailyPics"][0][3]).content)
)
img_file.show()
elif indicator == "全国疫情累计死亡/治愈病例趋势图-湖北":
# img
img_url = pd.read_json(
dxy_static[dxy_static.find("= {") + 2: dxy_static.rfind("}catch")],
orient="index",
).T
img_file = Image.open(
BytesIO(requests.get(img_url["dailyPics"][0][4]).content)
)
img_file.show()
elif indicator == "全国疫情病死率趋势图":
# img
img_url = pd.read_json(
dxy_static[dxy_static.find("= {") + 2: dxy_static.rfind("}catch")],
orient="index",
).T
img_file = Image.open(
BytesIO(requests.get(img_url["dailyPics"][0][5]).content)
)
img_file.show()
elif indicator == "疫情地图":
# img
img_url = pd.read_json(
dxy_static[dxy_static.find("= {") + 2: dxy_static.rfind("}catch")],
orient="index",
).T
img_file = Image.open(
BytesIO(requests.get(img_url["imgUrl"].values[0]).content)
)
img_file.show()
elif indicator == "news":
return desc_data
else:
try:
sub_area = pd.DataFrame(data_df[data_df["地区"] == indicator]["区域"].values[0])
if sub_area.empty:
return print("暂无分区域数据")
sub_area.columns = ["区域", "确诊人数", "疑似人数", "治愈人数", "死亡人数", "区域ID"]
sub_area = sub_area[["区域", "确诊人数", "疑似人数", "治愈人数", "死亡人数"]]
return sub_area
except IndexError as e:
print("请输入省/市的全称, 如: 浙江省/上海市 等")
def epidemic_baidu(indicator="浙江"):
"""
百度-新型冠状病毒肺炎-疫情实时大数据报告
https://voice.baidu.com/act/newpneumonia/newpneumonia/?from=osari_pc_1
:param indicator: 看说明文档
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
url = "https://huiyan.baidu.com/openapi/v1/migration/rank"
params = {
"type": "move",
"ak": "kgD2HiDnLdUhwzd3CLuG5AWNfX3fhLYe",
"adminType": "country",
"name": "全国",
}
res = requests.get(url, params=params)
move_in_df = pd.DataFrame(res.json()["result"]["moveInList"])
move_out_df = pd.DataFrame(res.json()["result"]["moveOutList"])
url = "https://opendata.baidu.com/api.php"
params = {
"query": "全国",
"resource_id": "39258",
"tn": "wisetpl",
"format": "json",
"cb": "jsonp_1580470773343_11183",
}
res = requests.get(url, params=params)
json_data = json.loads(res.text[res.text.find("({") + 1:res.text.rfind(");")])
today_df = pd.DataFrame(json_data["data"][0]["list"][0]["item"])
protect_df = pd.DataFrame(json_data["data"][0]["list"][1]["item"])
rumor_df = pd.DataFrame(json_data["data"][0]["list"][2]["item"])
url = "https://opendata.baidu.com/data/inner"
params = {
"tn": "reserved_all_res_tn",
"dspName": "iphone",
"from_sf": "1",
"dsp": "iphone",
"resource_id": "28565",
"alr": "1",
"query": "肺炎",
"cb": "jsonp_1580470773344_83572",
}
res = requests.get(url, params=params)
json_data = json.loads(res.text[res.text.find("({") + 1:res.text.rfind(");")])
spot_report = pd.DataFrame(json_data["Result"][0]["DisplayData"]["result"]["items"])
url = "https://voice.baidu.com/act/newpneumonia/newpneumonia/"
params = {
"from": "osari_pc_1",
}
res = requests.get(url, params=params)
json_data = json.loads(res.text[res.text.find("V.conf = ") + 9: res.text.find("V.bsData") - 1])
temp_df = pd.DataFrame()
temp_df[json_data["component"][0]["trend"]["list"][0]["name"]] = json_data["component"][0]["trend"]["list"][0][
"data"]
temp_df[json_data["component"][0]["trend"]["list"][1]["name"]] = json_data["component"][0]["trend"]["list"][1][
"data"]
temp_df[json_data["component"][0]["trend"]["list"][2]["name"]] = json_data["component"][0]["trend"]["list"][2][
"data"]
temp_df[json_data["component"][0]["trend"]["list"][3]["name"]] = json_data["component"][0]["trend"]["list"][3][
"data"]
temp_df.index = json_data["component"][0]["trend"]["updateDate"]
temp_dict = {}
for item in json_data["component"][0]["caseList"]:
temp_dict[item["area"]] = item["subList"]
domestic_df = pd.DataFrame.from_dict(json_data["component"][0]["summaryDataIn"], orient="index")
domestic_df.columns = [json_data["component"][0]["mapLastUpdatedTime"]]
out_df = pd.DataFrame.from_dict(json_data["component"][0]["summaryDataOut"], orient="index")
out_df.columns = [json_data["component"][0]["foreignLastUpdatedTime"]]
if indicator == "热门迁入地":
return move_in_df
elif indicator == "热门迁出地":
return move_out_df
elif indicator == "今日疫情热搜":
return today_df
elif indicator == "防疫知识热搜":
return protect_df
elif indicator == "热搜谣言粉碎":
return rumor_df
elif indicator == "实时播报":
return spot_report
elif indicator == "历史":
return temp_df
elif indicator == "国内":
return domestic_df
elif indicator == "国外":
return out_df
else:
return pd.DataFrame(temp_dict[indicator])
def migration_area_baidu(area="乌鲁木齐市", indicator="move_in", date="20200201"):
"""
百度地图慧眼-百度迁徙-XXX迁入地详情
百度地图慧眼-百度迁徙-XXX迁出地详情
以上展示 top100 结果,如不够 100 则展示全部
迁入来源地比例: 从 xx 地迁入到当前区域的人数与当前区域迁入总人口的比值
迁出目的地比例: 从当前区域迁出到 xx 的人口与从当前区域迁出总人口的比值
https://qianxi.baidu.com/?from=shoubai#city=0
:param area: 可以输入 省份 或者 具体城市 但是需要用全称
:type area: str
:param indicator: move_in 迁入 move_out 迁出
:type indicator: str
:param date: 查询的日期 20200101以后的时间
:type date: str
:return: 迁入地详情/迁出地详情的前50个
:rtype: pandas.DataFrame
"""
city_dict.update(province_dict)
inner_dict = dict(zip(city_dict.values(), city_dict.keys()))
if inner_dict[area] in province_dict.keys():
dt_flag = "province"
else:
dt_flag = "city"
url = "https://huiyan.baidu.com/migration/cityrank.jsonp"
params = {
"dt": dt_flag,
"id": inner_dict[area],
"type": indicator,
"date": date,
}
res = requests.get(url, params=params)
json_data = json.loads(res.text[res.text.find("({") + 1:res.text.rfind(");")])
return pd.DataFrame(json_data["data"]["list"])
def migration_scale_baidu(area="乌鲁木齐市", indicator="move_out", start_date="20190112", end_date="20200201"):
"""
百度地图慧眼-百度迁徙-迁徙规模
* 迁徙规模指数:反映迁入或迁出人口规模,城市间可横向对比
* 城市迁徙边界采用该城市行政区划,包含该城市管辖的区、县、乡、村
https://qianxi.baidu.com/?from=shoubai#city=0
:param area: 可以输入 省份 或者 具体城市 但是需要用全称
:type area: str
:param indicator: move_in 迁入 move_out 迁出
:type indicator: str
:param start_date: 开始查询的日期 默认就可以
:type start_date: str
:param end_date: 结束查询的日期 20200101 以后的时间
:type end_date: str
:return: 时间序列的迁徙规模指数
:rtype: pandas.DataFrame
"""
city_dict.update(province_dict)
inner_dict = dict(zip(city_dict.values(), city_dict.keys()))
if inner_dict[area] in province_dict.keys():
dt_flag = "province"
else:
dt_flag = "city"
url = "https://huiyan.baidu.com/migration/historycurve.jsonp"
params = {
"dt": dt_flag,
"id": inner_dict[area],
"type": indicator,
"startDate": start_date,
"endDate": end_date,
}
res = requests.get(url, params=params)
json_data = json.loads(res.text[res.text.find("({") + 1:res.text.rfind(");")])
temp_df = pd.DataFrame.from_dict(json_data["data"]["list"], orient="index")
temp_df.index = pd.to_datetime(temp_df.index)
temp_df.columns = ["迁徙规模指数"]
return temp_df
def epidemic_area_search(province="四川省", city="成都市", district="高新区"):
"""
省份-城市-区-数据查询
https://ncov.html5.qq.com/community?channelid=1&from=singlemessage&isappinstalled=0
:param province: 根据 epidemic_area_all 输入
:type province: str
:param city: 根据 epidemic_area_all 输入
:type city: str
:param district: 根据 epidemic_area_all 输入
:type district: str
:return: 全国所有省份-城市-区域数据
:rtype: pandas.DataFrame
"""
url = "https://ncov.html5.qq.com/api/getCommunity"
params = {
"province": province,
"city": city,
"district": district,
"lat": "30.26555",
"lng": "120.1536",
}
res = requests.get(url, params=params)
temp_df = pd.DataFrame(res.json()["community"][province][city][district])
return temp_df[["province", "city", "district", "show_address", "full_address", "cnt_sum_certain"]]
def epidemic_area_all():
"""
可以获取数据的全国所有省份-城市-区域数据
https://ncov.html5.qq.com/community?channelid=1&from=singlemessage&isappinstalled=0
:return: 数据的全国所有省份-城市-区域数据
:rtype: pandas.DataFrame
"""
url = "https://ncov.html5.qq.com/api/getPosition"
res = requests.get(url)
area = res.json()["position"]
province_list = list(area.keys())
temp = []
for p in province_list:
for c in area[p].keys():
temp.extend(list(zip([p] * len(list(area[p][c].keys())[1:]), [c] * len(list(area[p][c].keys())[1:]), list(area[p][c].keys())[1:])))
return pd.DataFrame(temp, columns=["province", "city", "district"])
def epidemic_area_detail():
"""
细化到每个小区的确诊人数
需要遍历每个页面, 如非必要, 请勿运行
https://ncov.html5.qq.com/community?channelid=1&from=singlemessage&isappinstalled=0
:return: 全国每个小区的确诊人数
:rtype: pandas.DataFrame
"""
temp_df = pd.DataFrame()
area_df = epidemic_area_all()
for item in area_df.iterrows():
print(f"一共{area_df.shape[0]}, 正在下载第{item[0]+1}页")
small_df = epidemic_area_search(province=item[1][0], city=item[1][1], district=item[1][2])
temp_df = temp_df.append(small_df, ignore_index=True)
return temp_df
def epidemic_trip():
"""
新型肺炎确诊患者-相同行程查询工具
https://rl.inews.qq.com/h5/trip?from=newsapp&ADTAG=tgi.wx.share.message
:return: 新型肺炎确诊患者-相同行程查询工具的所有历史数据
:rtype: pandas.DataFrame
"""
url = "https://rl.inews.qq.com/taf/travelFront"
res = requests.get(url)
return pd.DataFrame(res.json()["data"]["list"])
def epidemic_hist_all():
"""
NCP细化到地市的细颗粒数据
https://github.com/norratek/Ncov2020HistoryData
https://docs.google.com/spreadsheets/d/1JNQnFYJpR7PxQo5K5lwXTuE0F6jprhMXuy7DPnV9H90/edit#gid=0
:return: 返回每日的历史数据
:rtype: pandas.DataFrame
"""
url = "http://ncov.nosensor.com:8080/api/"
data_json = requests.get(url).json()
big_df = pd.DataFrame()
for item in range(0, len(data_json["city"])):
print(data_json["city"][item]["Time"])
temp_df = pd.DataFrame(data_json["city"][item]["CityDetail"])
temp_df["date"] = data_json["city"][item]["Time"]
big_df = big_df.append(temp_df)
return big_df
def epidemic_hist_city(city="武汉"):
"""
https://github.com/norratek/Ncov2020HistoryData
https://docs.google.com/spreadsheets/d/1JNQnFYJpR7PxQo5K5lwXTuE0F6jprhMXuy7DPnV9H90/edit#gid=0
:return: 返回每日的历史数据
:rtype: pandas.DataFrame
"""
url = f"http://ncov.nosensor.com:8080/api/city={city}"
data_json = requests.get(url).json().get("city", None)
return pd.DataFrame(data_json)
def epidemic_hist_province(province="湖北"):
"""
https://github.com/norratek/Ncov2020HistoryData
https://docs.google.com/spreadsheets/d/1JNQnFYJpR7PxQo5K5lwXTuE0F6jprhMXuy7DPnV9H90/edit#gid=0
:return: 返回每日的历史数据
:rtype: pandas.DataFrame
"""
url = f"http://ncov.nosensor.com:8080/api/province={province}"
data_json = requests.get(url).json().get("province", None)
return | pd.DataFrame(data_json) | pandas.DataFrame |
# Copyright 2019 <NAME> GmbH
# Copyright 2020-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for ROS 2 data model."""
import numpy as np
import pandas as pd
from . import DataModel
from . import DataModelIntermediateStorage
class Ros2DataModel(DataModel):
"""
Container to model pre-processed ROS 2 data for analysis.
This aims to represent the data in a ROS 2-aware way.
"""
def __init__(self) -> None:
"""Create a Ros2DataModel."""
super().__init__()
# Objects (one-time events, usually when something is created)
self._contexts: DataModelIntermediateStorage = []
self._nodes: DataModelIntermediateStorage = []
self._rmw_publishers: DataModelIntermediateStorage = []
self._rcl_publishers: DataModelIntermediateStorage = []
self._rmw_subscriptions: DataModelIntermediateStorage = []
self._rcl_subscriptions: DataModelIntermediateStorage = []
self._subscription_objects: DataModelIntermediateStorage = []
self._services: DataModelIntermediateStorage = []
self._clients: DataModelIntermediateStorage = []
self._timers: DataModelIntermediateStorage = []
self._timer_node_links: DataModelIntermediateStorage = []
self._callback_objects: DataModelIntermediateStorage = []
self._callback_symbols: DataModelIntermediateStorage = []
self._lifecycle_state_machines: DataModelIntermediateStorage = []
# Events (multiple instances, may not have a meaningful index)
self._rclcpp_publish_instances: DataModelIntermediateStorage = []
self._rcl_publish_instances: DataModelIntermediateStorage = []
self._rmw_publish_instances: DataModelIntermediateStorage = []
self._rmw_take_instances: DataModelIntermediateStorage = []
self._rcl_take_instances: DataModelIntermediateStorage = []
self._rclcpp_take_instances: DataModelIntermediateStorage = []
self._callback_instances: DataModelIntermediateStorage = []
self._lifecycle_transitions: DataModelIntermediateStorage = []
def add_context(
self, context_handle, timestamp, pid, version
) -> None:
self._contexts.append({
'context_handle': context_handle,
'timestamp': timestamp,
'pid': pid,
'version': version,
})
def add_node(
self, node_handle, timestamp, tid, rmw_handle, name, namespace
) -> None:
self._nodes.append({
'node_handle': node_handle,
'timestamp': timestamp,
'tid': tid,
'rmw_handle': rmw_handle,
'name': name,
'namespace': namespace,
})
def add_rmw_publisher(
self, handle, timestamp, gid,
) -> None:
self._rmw_publishers.append({
'publisher_handle': handle,
'timestamp': timestamp,
'gid': gid,
})
def add_rcl_publisher(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
self._rcl_publishers.append({
'publisher_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
})
def add_rclcpp_publish_instance(
self, timestamp, message,
) -> None:
self._rclcpp_publish_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rcl_publish_instance(
self, publisher_handle, timestamp, message,
) -> None:
self._rcl_publish_instances.append({
'publisher_handle': publisher_handle,
'timestamp': timestamp,
'message': message,
})
def add_rmw_publish_instance(
self, timestamp, message,
) -> None:
self._rmw_publish_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rmw_subscription(
self, handle, timestamp, gid
) -> None:
self._rmw_subscriptions.append({
'subscription_handle': handle,
'timestamp': timestamp,
'gid': gid,
})
def add_rcl_subscription(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
self._rcl_subscriptions.append({
'subscription_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
})
def add_rclcpp_subscription(
self, subscription_pointer, timestamp, subscription_handle
) -> None:
self._subscription_objects.append({
'subscription': subscription_pointer,
'timestamp': timestamp,
'subscription_handle': subscription_handle,
})
def add_service(
self, handle, timestamp, node_handle, rmw_handle, service_name
) -> None:
self._services.append({
'service_handle': timestamp,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
})
def add_client(
self, handle, timestamp, node_handle, rmw_handle, service_name
) -> None:
self._clients.append({
'client_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
})
def add_timer(
self, handle, timestamp, period, tid
) -> None:
self._timers.append({
'timer_handle': handle,
'timestamp': timestamp,
'period': period,
'tid': tid,
})
def add_timer_node_link(
self, handle, timestamp, node_handle
) -> None:
self._timer_node_links.append({
'timer_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
})
def add_callback_object(
self, reference, timestamp, callback_object
) -> None:
self._callback_objects.append({
'reference': reference,
'timestamp': timestamp,
'callback_object': callback_object,
})
def add_callback_symbol(
self, callback_object, timestamp, symbol
) -> None:
self._callback_symbols.append({
'callback_object': callback_object,
'timestamp': timestamp,
'symbol': symbol,
})
def add_callback_instance(
self, callback_object, timestamp, duration, intra_process
) -> None:
self._callback_instances.append({
'callback_object': callback_object,
'timestamp': np.datetime64(timestamp, 'ns'),
'duration': np.timedelta64(duration, 'ns'),
'intra_process': intra_process,
})
def add_rmw_take_instance(
self, subscription_handle, timestamp, message, source_timestamp, taken
) -> None:
self._rmw_take_instances.append({
'subscription_handle': subscription_handle,
'timestamp': timestamp,
'message': message,
'source_timestamp': source_timestamp,
'taken': taken,
})
def add_rcl_take_instance(
self, timestamp, message
) -> None:
self._rcl_take_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rclcpp_take_instance(
self, timestamp, message
) -> None:
self._rclcpp_take_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_lifecycle_state_machine(
self, handle, node_handle
) -> None:
self._lifecycle_state_machines.append({
'state_machine_handle': handle,
'node_handle': node_handle,
})
def add_lifecycle_state_transition(
self, state_machine_handle, start_label, goal_label, timestamp
) -> None:
self._lifecycle_transitions.append({
'state_machine_handle': state_machine_handle,
'start_label': start_label,
'goal_label': goal_label,
'timestamp': timestamp,
})
def _finalize(self) -> None:
# Some of the lists of dicts might be empty, and setting
# the index for an empty dataframe leads to an error
self.contexts = pd.DataFrame.from_dict(self._contexts)
if self._contexts:
self.contexts.set_index('context_handle', inplace=True, drop=True)
self.nodes = pd.DataFrame.from_dict(self._nodes)
if self._nodes:
self.nodes.set_index('node_handle', inplace=True, drop=True)
self.rmw_publishers = pd.DataFrame.from_dict(self._rmw_publishers)
if self._rmw_publishers:
self.rmw_publishers.set_index('publisher_handle', inplace=True, drop=True)
self.rcl_publishers = pd.DataFrame.from_dict(self._rcl_publishers)
if self._rcl_publishers:
self.rcl_publishers.set_index('publisher_handle', inplace=True, drop=True)
self.rmw_subscriptions = pd.DataFrame.from_dict(self._rmw_subscriptions)
if self._rmw_subscriptions:
self.rmw_subscriptions.set_index('subscription_handle', inplace=True, drop=True)
self.rcl_subscriptions = pd.DataFrame.from_dict(self._rcl_subscriptions)
if self._rcl_subscriptions:
self.rcl_subscriptions.set_index('subscription_handle', inplace=True, drop=True)
self.subscription_objects = pd.DataFrame.from_dict(self._subscription_objects)
if self._subscription_objects:
self.subscription_objects.set_index('subscription', inplace=True, drop=True)
self.services = pd.DataFrame.from_dict(self._services)
if self._services:
self.services.set_index('service_handle', inplace=True, drop=True)
self.clients = pd.DataFrame.from_dict(self._clients)
if self._clients:
self.clients.set_index('client_handle', inplace=True, drop=True)
self.timers = pd.DataFrame.from_dict(self._timers)
if self._timers:
self.timers.set_index('timer_handle', inplace=True, drop=True)
self.timer_node_links = pd.DataFrame.from_dict(self._timer_node_links)
if self._timer_node_links:
self.timer_node_links.set_index('timer_handle', inplace=True, drop=True)
self.callback_objects = pd.DataFrame.from_dict(self._callback_objects)
if self._callback_objects:
self.callback_objects.set_index('reference', inplace=True, drop=True)
self.callback_symbols = | pd.DataFrame.from_dict(self._callback_symbols) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 5 15:07:15 2021
@author: ali_d
"""
#Pandas
import pandas as pd
import numpy as np
from numpy.random import randn
#Pandas Serileri
data = np.array(["a","b","c","d"])
series = pd.Series(data,[100,101,102,103])
print(type(series))
#%%
DataDict = {"gorgi":35,"jake":95}
#
a=pd.Series(DataDict)
print(a)
print()
#
DataDict2 = {"fenole":70,"antre":75}
b= | pd.Series(DataDict2) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 11:30:59 2021
@author: lenakilian
"""
import numpy as np
import pandas as pd
df = pd.DataFrame
def import_lcfs_income(year, dvhh_file, dvper_file):
idx = {}; idx['person'] = {}; idx['hhld'] = {}
idx['person']['to_keep'] = ['person', 'a012p', 'a013p']
idx['person']['new_name'] = ['person_no', 'ethnicity_hrp', 'ethnicity partner hrp', 'income tax']
idx['person']['dict'] = dict(zip(idx['person']['to_keep'], idx['person']['new_name']))
idx['hhld']['to_keep'] = ['weighta', 'p396p', 'sexhrp']
idx['hhld']['new_name'] = ['weight', 'age HRP', 'sex HRP']
idx['hhld']['dict'] = dict(zip(idx['hhld']['to_keep'], idx['hhld']['new_name']))
dvhh = | pd.read_csv(dvhh_file, sep='\t', index_col=0) | pandas.read_csv |
import os, sys
import pandas as pd
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
sys.path.insert(0, os.path.abspath(".."))
from src.utils.utils import load_df, save_df
def load_transformations(path):
return load_df(path)
def crea_timestamp(df):
df['hora_creacion_copia'] = df['hora_creacion']
df['fecha_creacion_copia'] = df['fecha_creacion']
df['hora_creacion_copia'] = df['hora_creacion_copia'].astype('str')
df['fecha_creacion_copia'] = df['fecha_creacion_copia'].astype('str')
df['timestamp_creacion'] = df['fecha_creacion_copia']+' '+df['hora_creacion_copia']
# convert the 'Date' column to datetime format
df['timestamp_creacion']= pd.to_datetime(df['timestamp_creacion'])
df.drop(columns = ['hora_creacion_copia','fecha_creacion_copia'], inplace = True)
return df
def generate_day_type(df):
# 1 - indica un día con alto número de llamadas a C5
# 0 - indica un día con bajo número de llamadas a C5
# obtenidos de los datos históricos
df['tipo_dia'] = df['dia_semana'].apply(lambda x: 1 if x == 'jueves' or x == 'viernes' or x == 'sabado' else 0)
return df
def generate_trimestres(df):
df['trimestre'] = np.ceil(df['mes'] / 3)
df['trimestre'] = df['trimestre'].apply(lambda x: int(x))
TRIMESTRES = 4
df['sin_trim'] = np.sin(2 * np.pi * df.trimestre / TRIMESTRES)
df['cos_trim'] = np.cos(2 * np.pi * df.trimestre / TRIMESTRES)
df = df.astype({"trimestre":'category'})
return df
def generate_llamada(df):
search_string = "llamada"
df['llamada'] = df['tipo_entrada'].apply(lambda x: 1 if search_string in x else 0)
return df
def rename_incidente_c4(df):
df.loc[df["incidente_c4"].str.contains('accidente-', case=False, na=None), "incidente_c4"] = 'accidente'
df.loc[df["incidente_c4"].str.contains('cadaver-', case=False, na=None), "incidente_c4"] = 'cadaver'
df.loc[df["incidente_c4"].str.contains('sismo-', case=False, na=None), "incidente_c4"] = 'sismo'
df.loc[df["incidente_c4"].str.contains('mi ciudad-', case=False, na=None), "incidente_c4"] = 'mi ciudad'
df.loc[df["incidente_c4"].str.contains('detencion ciudadana-', case=False, na=None), "incidente_c4"] = 'detencion ciudadana'
df.loc[df["incidente_c4"].str.contains('lesionado-', case=False, na=None), "incidente_c4"] = 'lesionado'
return df
def feature_generation(df):
df = crea_timestamp(df)
df = generate_day_type(df)
df = generate_trimestres(df)
df = generate_llamada(df)
df = rename_incidente_c4(df)
# one hot de delegación e incidente c4
transformers = [('one_hot', OneHotEncoder(handle_unknown='ignore'), ['delegacion_inicio','incidente_c4'])]
col_trans = ColumnTransformer(transformers, remainder="passthrough", n_jobs=-1, verbose=True)
col_trans.fit(df)
output_vars = col_trans.transform(df)
feature_names=col_trans.get_feature_names()
final_df = pd.DataFrame(output_vars)
final_df.columns = feature_names
final_df.columns = final_df.columns.str.replace('one_hot__x0_', '')
final_df.columns = final_df.columns.str.replace('one_hot__x1_', '')
final_df.columns = final_df.columns.str.replace(' ', '_')
final_df.columns = final_df.columns.str.replace('.', '')
df = pd.DataFrame(final_df)
return df, col_trans
def feature_selection(df):
print(df.columns)
df = df.astype({"label":'category'})
X = df.copy()
y = X.label.values
columnas_quitar = ['label','timestamp_creacion','fecha_creacion','hora_creacion','dia_semana','tipo_entrada']
X.drop(columns = columnas_quitar,inplace = True)
print('comienza modelado de feature selection:')
# ocuparemos un RF
classifier = RandomForestClassifier(oob_score=True, random_state=1234)
hyper_param_grid = {'n_estimators': [100,500],
'max_depth': [2,5,10],
'min_samples_split': [2], 'max_features':[10,20]
}
tscv = TimeSeriesSplit(max_train_size=None, n_splits=3)
gs = GridSearchCV(classifier,
hyper_param_grid,
scoring = 'precision',
cv = tscv,
n_jobs = -1,return_train_score= True, verbose = True )
gs.fit(X, y)
datos_df_importancias = gs.best_estimator_.feature_importances_
columnas_df_importancias = X.columns
dataframe_importancias= pd.DataFrame(data = datos_df_importancias )
dataframe_importancias['feature']=columnas_df_importancias
dataframe_importancias.columns = ['importancia','feature']
dataframe_importancias = dataframe_importancias.sort_values('importancia',ascending=False)
dataframe_importancias =dataframe_importancias.loc[dataframe_importancias['importancia']>.069]
lista_features_a_mantener = dataframe_importancias['feature']
print('La lista de variables a mantener es:')
print(lista_features_a_mantener)
print('son en total:')
print(len(lista_features_a_mantener))
print('la respectiva importancia de cada feature al final fue:')
print(dataframe_importancias)
print('Los hiperparámetros del mejor modelo son:')
print(gs.best_estimator_.get_params)
df_importancias = dataframe_importancias.copy()
lista_features = lista_features_a_mantener
mejor_modelo = gs.best_estimator_
return (df_importancias, lista_features, mejor_modelo)
def test_fe_pickle_generator(input_path, output_path, col_transformer):
# cargando data frame the train
df = load_df(input_path)
# generando features comunes
df = crea_timestamp(df)
df = generate_day_type(df)
df = generate_trimestres(df)
df = generate_llamada(df)
df = rename_incidente_c4(df)
# generando features obtenidos desde train set
output_vars = col_transformer.transform(df)
feature_names = col_transformer.get_feature_names()
# generando df de transformación
final_df = | pd.DataFrame(output_vars) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import CategoricalIndex, Index
import pandas._testing as tm
class TestMap:
@pytest.mark.parametrize(
"data, categories",
[
(list("abcbca"), list("cab")),
(pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
],
ids=["string", "interval"],
)
def test_map_str(self, data, categories, ordered):
# GH 31202 - override base class since we want to maintain categorical/ordered
index = CategoricalIndex(data, categories=categories, ordered=ordered)
result = index.map(str)
expected = CategoricalIndex(
map(str, data), categories=map(str, categories), ordered=ordered
)
tm.assert_index_equal(result, expected)
def test_map(self):
ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(
list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(
list("ababc"), categories=list("bac"), ordered=False, name="XXX"
)
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(
ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
)
# change categories dtype
ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
def f(x):
return {"A": 10, "B": 20, "C": 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex(
[10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
)
tm.assert_index_equal(result, exp)
result = ci.map(pd.Series([10, 20, 30], index=["A", "B", "C"]))
tm.assert_index_equal(result, exp)
result = ci.map({"A": 10, "B": 20, "C": 30})
tm.assert_index_equal(result, exp)
def test_map_with_categorical_series(self):
# GH 12756
a = pd.Index([1, 2, 3, 4])
b = | pd.Series(["even", "odd", "even", "odd"], dtype="category") | pandas.Series |
from slytherin import remove_non_alphanumeric
from chronometry import to_date_part as lp_to_date_part
from chronometry import yearmonth_to_date as lp_yearmonth_to_date
from pandas import to_datetime
def to_day(date):
try:
return | to_datetime(date) | pandas.to_datetime |
import gc
from logging import warning
from typing import Optional, Union, List, Tuple, Dict, Callable
import numpy as np
import pandas as pd
from numpy import frombuffer, ndarray
from rdkit.Avalon.pyAvalonTools import GetAvalonFP
from rdkit.Chem import (AddHs, AssignStereochemistry, Descriptors, FindMolChiralCenters, FindPotentialStereoBonds,
Kekulize, LayeredFingerprint, MolFromSmiles, MolToSmiles, PathToSubmol, PatternFingerprint,
RDKFingerprint, RemoveHs, SanitizeMol)
from rdkit.Chem.AllChem import GetErGFingerprint, GetHashedAtomPairFingerprintAsBitVect as AtomPair, \
GetMACCSKeysFingerprint, GetHashedTopologicalTorsionFingerprintAsBitVect as TTorsion, \
GetMorganFingerprintAsBitVect as Morgan
from rdkit.Chem.rdMHFPFingerprint import MHFPEncoder
from rdkit.Chem.rdMolDescriptors import (CalcNumAromaticRings, CalcNumAtomStereoCenters, CalcNumHBA, CalcNumHBD,
CalcNumRings, CalcNumRotatableBonds, CalcNumUnspecifiedAtomStereoCenters)
from rdkit.Chem.rdchem import (Mol, RWMol)
from rdkit.Chem.rdmolops import FindAtomEnvironmentOfRadiusN as SearchAtomEnv
from rdkit.DataStructs import ExplicitBitVect, SparseBitVect
from .Dataset import FeatureData, allocateFeatureLocation
from .DatasetAPI import DatasetAPI
from .Preprocessing import inputFullCheck, inputFastCheck, inputCheckRange, inputCheckIterableInRange, \
measureExecutionTime, GetIndexOnNonSortedData, GetRemainingIndexFromLimit, RemoveExtension, ReadFile, FixPath, \
EvaluateInputPosition, SortWithIdx, ExportFile, OptimizeIntegerDatatypeByShape
from .config import DATA_FRAMEWORK as dFramework
from .coreConfig import INPUT_FRAMEWORK as iFramework
# Global variables helps for fast indexing rather than create object in memory (reduce memory footprint)
MAIN_ERROR = (ValueError, IndexError, TypeError, KeyError, RuntimeError)
bitVectorDtype = Optional[Union[ExplicitBitVect, SparseBitVect]]
_getEmptyList_: Callable = lambda *args, **kwargs: []
def checkEquivalent(FragMolX: Union[Mol, RWMol], FragMolY: Union[Mol, RWMol], FragArr1: List[Union[Mol, RWMol]],
FragArr2: List[Union[Mol, RWMol]], rematch: bool = True) -> bool:
"""
Implementation of substructure matching using RDKit. Note that the position of cross-referencing
must be the same companion
:param FragMolX: The first (1st) fragment used for identification. Order can be swapped with FragMolY.
:type FragMolX: Mol
:param FragMolY: The second (2nd) fragment used for identification. Order can be swapped with FragMolX.
:type FragMolY: Mol
:param FragArr1: A list of molecule used for comparison
:type FragArr1: Mol
:param FragArr2: A list of molecule used for comparison
:type FragArr2: Mol
:param rematch: Whether to make reversing validation
:return: bool
"""
# Hyper-parameter Verification
if True:
inputFullCheck(value=FragArr1, name='FragArr1', dtype='List')
inputFullCheck(value=FragArr2, name='FragArr2', dtype='List')
if not FragArr1 or not FragArr2:
raise ValueError("Empty List has been found")
elif len(FragArr1) != len(FragArr2):
raise ValueError(f"Two lists are not totally equivalent with {len(FragArr1)} vs {len(FragArr2)}")
size: int = len(FragArr1)
if isinstance(FragMolX, str):
FragMolX: Union[Mol, RWMol] = AddHs(MolFromSmiles(FragMolX))
if isinstance(FragMolY, str):
FragMolY: Union[Mol, RWMol] = AddHs(MolFromSmiles(FragMolY))
FragMolX = AddHs(MolFromSmiles(FragMolX)) if isinstance(FragMolX, str) else AddHs(FragMolX)
FragMolY = AddHs(MolFromSmiles(FragMolY)) if isinstance(FragMolY, str) else AddHs(FragMolY)
for i in range(size):
if isinstance(FragArr1[i], str):
FragArr1[i]: Mol = AddHs(MolFromSmiles(FragArr1[i]))
if isinstance(FragArr2[i], str):
FragArr2[i]: Mol = AddHs(MolFromSmiles(FragArr2[i]))
inputFullCheck(value=rematch, name='rematch', dtype='bool')
result: bool = False
order: Optional[bool] = None # True if FragMolX ~ FragArr2 ; False if FragMolX ~ FragArr1
for i in range(size):
# Opposite (Indirect) Comparison ---> Forward (Direct) Comparison
# Fast Mode
if FragMolX.HasSubstructMatch(FragArr2[i], False, False, False) and \
FragMolY.HasSubstructMatch(FragArr1[i], False, False, False):
result, order = True, True
break
if FragMolX.HasSubstructMatch(FragArr1[i], False, False, False) and \
FragMolY.HasSubstructMatch(FragArr2[i], False, False, False):
result, order = True, False
break
if FragMolX.HasSubstructMatch(FragArr2[i], True, True, True) and \
FragMolY.HasSubstructMatch(FragArr1[i], True, True, True):
result, order = True, True
break
if FragMolX.HasSubstructMatch(FragArr1[i], True, True, True) and \
FragMolY.HasSubstructMatch(FragArr2[i], True, True, True):
result, order = True, False
break
if not rematch:
return result
# To make a reverse check, we have to guarantee that the molecule combination must be True to be forward,
# else discard result
if not result:
return False
if order is None:
for i in range(size):
if FragArr2[i].HasSubstructMatch(FragMolX, False, False, False) and \
FragArr1[i].HasSubstructMatch(FragMolY, False, False, False):
return True
if FragArr2[i].HasSubstructMatch(FragMolX, True, True, True) and \
FragArr1[i].HasSubstructMatch(FragMolY, True, True, True):
return True
if FragArr2[i].HasSubstructMatch(FragMolY, False, False, False) and \
FragArr1[i].HasSubstructMatch(FragMolX, False, False, False):
return True
if FragArr2[i].HasSubstructMatch(FragMolY, True, True, True) and \
FragArr1[i].HasSubstructMatch(FragMolX, True, True, True):
return True
else:
for i in range(size):
if order is True:
if FragArr2[i].HasSubstructMatch(FragMolX, False, False, False) and \
FragArr1[i].HasSubstructMatch(FragMolY, False, False, False):
return True
if FragArr2[i].HasSubstructMatch(FragMolX, True, True, True) and \
FragArr1[i].HasSubstructMatch(FragMolY, True, True, True):
return True
else:
if FragArr2[i].HasSubstructMatch(FragMolY, False, False, False) and \
FragArr1[i].HasSubstructMatch(FragMolX, False, False, False):
return True
if FragArr2[i].HasSubstructMatch(FragMolY, True, True, True) and \
FragArr1[i].HasSubstructMatch(FragMolX, True, True, True):
return True
return False
def getBondIndex(ParentMol: Union[Mol, str], FragMolX: Union[Mol, str], FragMolY: Union[Mol, str], current: int = 0,
maxBonds: int = None, verbose: bool = False, rematch: bool = True) -> int:
"""
Implementation of retrieve the bond index of parent mol using 2 pre-defined radicals.
:param ParentMol: The molecule use for bond-breaking searching
:type ParentMol: Mol or string
:param FragMolX: The first (1st) fragment used for identification. Order can be swapped with FragMolY.
:type FragMolX: Mol or string
:param FragMolY: The second (2nd) fragment used for identification. Order can be swapped with FragMolX.
:type FragMolY: Mol or string
:param current: The starting position to start searching (Default to be 0)
:type current: int
:param maxBonds: The maximum bond inside the molecule (Default to be None)
:type maxBonds: int
:param verbose: Whether to show up progress meters (Default to be False)
:type verbose: bool
:param rematch: Whether to make reversing validation (Attached to checkEquivalent())
:type rematch: bool
:return: int
"""
# Hyper-parameter Verification
if True:
if isinstance(ParentMol, str):
ParentMol: Union[Mol, RWMol, str] = MolFromSmiles(ParentMol)
ParentMol: Union[Mol, RWMol, str] = AddHs(ParentMol)
if isinstance(FragMolX, str):
FragX, FragMolX = str(FragMolX), AddHs(MolFromSmiles(FragMolX))
else:
FragX, FragMolX = MolToSmiles(MolFromSmiles(MolToSmiles(FragMolX))), AddHs(FragMolX)
if isinstance(FragMolY, str):
FragY, FragMolY = str(FragMolY), AddHs(MolFromSmiles(FragMolY))
else:
FragY, FragMolY = MolToSmiles(MolFromSmiles(MolToSmiles(FragMolY))), AddHs(FragMolY)
inputCheckRange(value=current, name='current', maxValue=ParentMol.GetNumBonds() + 1, minValue=0)
if maxBonds is None:
maxBonds: int = ParentMol.GetNumBonds()
else:
inputCheckRange(value=maxBonds, name='maxBonds', maxValue=ParentMol.GetNumBonds() + 1, minValue=current)
inputFullCheck(value=verbose, name='verbose', dtype='bool')
inputFullCheck(value=rematch, name='rematch', dtype='bool')
cache: str = dFramework["Bond Type Cache"][0]
for idx in range(current, maxBonds):
bond = ParentMol.GetBondWithIdx(idx)
if bond.IsInRing() or str(bond.GetBondType()) != cache:
continue
TempMol: RWMol = RWMol(ParentMol)
BeginAtom, EndingAtom = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
TempMol.RemoveBond(BeginAtom, EndingAtom)
TempMol.GetAtomWithIdx(BeginAtom).SetNoImplicit(True)
TempMol.GetAtomWithIdx(EndingAtom).SetNoImplicit(True)
# Call SanitizeMol to update radicals (Used when kekulize before)
SanitizeMol(TempMol)
# Convert the 2 molecules into a SMILES string
Smi1A, Smi1B = sorted(MolToSmiles(TempMol).split("."))
Mol1A, Mol1B = AddHs(MolFromSmiles(Smi1A)), AddHs(MolFromSmiles(Smi1B))
Smi2A, Smi2B = MolToSmiles(MolFromSmiles(Smi1A), isomericSmiles=False), \
MolToSmiles(MolFromSmiles(Smi1B), isomericSmiles=False)
Mol2A, Mol2B = AddHs(MolFromSmiles(Smi2A)), AddHs(MolFromSmiles(Smi2B))
Smi3A, Smi3B = MolToSmiles(MolFromSmiles(Smi1A), isomericSmiles=True), \
MolToSmiles(MolFromSmiles(Smi1B), isomericSmiles=True)
Mol3A, Mol3B = AddHs(MolFromSmiles(Smi3A)), AddHs(MolFromSmiles(Smi3B))
Smi4A, Smi4B = MolToSmiles(RemoveHs(MolFromSmiles(Smi1A))), \
MolToSmiles(RemoveHs(MolFromSmiles(Smi1B)))
Mol4A, Mol4B = AddHs(MolFromSmiles(Smi4A)), AddHs(MolFromSmiles(Smi4B))
if verbose:
print("Current Bond Index:", idx)
print("SMILES 1A:", Smi1A, "- SMILES 1B:", Smi1B)
print("SMILES 2A:", Smi2A, " SMILES 2B:", Smi2B)
print("SMILES 3A:", Smi3A, "- SMILES 3B:", Smi3B)
print("SMILES 4A:", Smi4A, "- SMILES 4B:", Smi4B)
print("-" * 40)
if checkEquivalent(FragMolX=FragMolX, FragMolY=FragMolY, FragArr1=[Mol1A, Mol2A, Mol3A, Mol4A],
FragArr2=[Mol1B, Mol2B, Mol3B, Mol4B], rematch=rematch):
return idx
if FragX in (Smi1A, Smi2A, Smi3A, Smi4A) and FragY in (Smi1B, Smi2B, Smi3B, Smi4B):
return idx
if FragY in (Smi1A, Smi2A, Smi3A, Smi4A) and FragX in (Smi1B, Smi2B, Smi3B, Smi4B):
return idx
return -1
def getBondIndexByAtom(ParentMol: Union[Mol, str], atomicIndex: Union[Tuple, List, ndarray],
doubleTriple: bool = False) -> Tuple[List, List]:
"""
Implementation of retrieving bond index from atomic index
:param ParentMol: The molecule use for bond-breaking searching
:type ParentMol: Mol or string
:param atomicIndex: The atomic index
:type atomicIndex: Union[Tuple, List, ndarray]
:param doubleTriple: Whether to allow double bonds - triple bonds into breakable mode (default to be False)
:type doubleTriple: bool
:return: Tuple
"""
# Hyper-parameter Verification
if True:
if isinstance(ParentMol, str):
ParentMol: Union[Mol, RWMol] = MolFromSmiles(ParentMol)
ParentMol: Union[Mol, RWMol] = AddHs(ParentMol)
if isinstance(atomicIndex, (List, Tuple)):
atomicIndex = np.asarray(atomicIndex)
if atomicIndex.ndim == 1:
if len(atomicIndex) == 2:
atomicIndex = np.atleast_2d(atomicIndex)
elif len(atomicIndex) % 2 == 0:
atomicIndex = atomicIndex.reshape(len(atomicIndex) // 2, 2)
if atomicIndex.ndim == 2 and atomicIndex.size() == atomicIndex.shape[0]:
atomicIndex = atomicIndex.reshape(len(atomicIndex) // 2, 2)
if atomicIndex.shape[1] != 2:
raise ValueError(f"atomicIndex must be a 2D-array with 2 features of data {atomicIndex.shape}")
inputFullCheck(value=doubleTriple, name='doubleTriple', dtype='bool')
cache: str = dFramework["Bond Type Cache"][0]
FalseLine: List[List] = [[], []]
array: List[Optional[int]] = []
for index, atomIdx in enumerate(atomicIndex):
try:
bond = ParentMol.GetBondBetweenAtoms(*atomIdx)
except MAIN_ERROR:
FalseLine[0].append(index)
FalseLine[1].append(atomIdx)
array[index] = None
continue
if bond.IsInRing() or (not doubleTriple and str(bond.GetBondType()) != cache):
FalseLine[0].append(index)
FalseLine[1].append(atomIdx)
array[index] = None
continue
array[index] = bond.GetIdx()
return array, FalseLine
def getRadicalsByBondIdx(ParentMol: Union[Mol, str], bondIdx: Optional[Union[int, ndarray, List[int]]] = None,
simplifyHydro: bool = True, reverse: bool = False, doubleTriple: bool = False) -> Tuple:
"""
Implementation of retrieving radicals via bond index. If bond index is single value
(a.k.a python integer, numpy.integer)
:param ParentMol: The molecule use for bond-breaking searching
:type ParentMol: Mol or string
:param bondIdx: Bond Index Value according to ParentMol. If None,
it will try to identified all breakable single bonds
:type bondIdx: int or ndarray or tuple or list
:param simplifyHydro: Whether to not include normal Hydro in the SMILES (Default to be True)
:type simplifyHydro: bool
:param reverse: Whether to swap the position of radicals (Default to be False)
:type reverse: bool
:param doubleTriple: Whether to allow double bonds - triple bonds into breakable mode (default to be False)
:type doubleTriple: bool
:return: List[Tuple[object]] or List[List[object]] ndarray
"""
if True:
if isinstance(ParentMol, str):
ParentMol: Union[Mol, RWMol] = MolFromSmiles(ParentMol)
ParentMol: Union[Mol, RWMol] = AddHs(ParentMol)
max_bonds: int = ParentMol.GetNumBonds()
if bondIdx is not None:
if isinstance(bondIdx, (int, np.integer)):
bondIdx = [int(bondIdx)]
elif isinstance(bondIdx, Tuple):
bondIdx = list(bondIdx)
elif isinstance(bondIdx, ndarray):
bondIdx = bondIdx.ravel().tolist()
for index in range(len(bondIdx)):
if not inputFastCheck(bondIdx[index], 'int'):
bondIdx[index] = int(bondIdx[index])
inputCheckRange(value=min(bondIdx), name='min(bondIdx)', maxValue=max_bonds, minValue=0)
inputCheckRange(value=max(bondIdx), name='max(bondIdx)', maxValue=max_bonds, minValue=0)
else:
bondIdx = [i for i in range(0, max_bonds)]
inputFullCheck(value=reverse, name='reverse', dtype='bool')
inputFullCheck(value=doubleTriple, name='doubleTriple', dtype='bool')
array: ndarray = np.zeros(shape=(len(bondIdx), 3), dtype=np.object_)
NULL: List = [None] * array.shape[1]
FalseLine: List[List] = [[], []]
cache: str = dFramework["Bond Type Cache"][0]
for index, bond_index in enumerate(bondIdx):
if index != 0 and bondIdx[index] == bondIdx[index - 1]:
array[index] = [array[index, 1], array[index, 0], array[index, 2]]
if bond_index is None:
array[index] = NULL
FalseLine[0].append(index)
FalseLine[1].append(bond_index)
continue
bond = ParentMol.GetBondWithIdx(bond_index)
if bond.IsInRing() or (not doubleTriple and str(bond.GetBondType()) != cache):
array[index] = NULL
FalseLine[0].append(index)
FalseLine[1].append(bond_index)
continue
TempMol: RWMol = RWMol(ParentMol)
BeginAtom, EndAtom = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
TempMol.RemoveBond(BeginAtom, EndAtom)
TempMol.GetAtomWithIdx(BeginAtom).SetNoImplicit(True)
TempMol.GetAtomWithIdx(EndAtom).SetNoImplicit(True)
SanitizeMol(TempMol) # Call SanitizeMol to update radicals
FragA, FragB = sorted(MolToSmiles(TempMol).split("."))
atomType = sorted([BeginAtom.GetSymbol(), EndAtom.GetSymbol()])
if simplifyHydro:
FragA, FragB = MolToSmiles(MolFromSmiles(FragA)), MolToSmiles(MolFromSmiles(FragB))
if reverse:
FragA, FragB = FragB, FragA
array[index] = [FragA, FragB, f"{atomType[0]}-{atomType[1]}"]
return array, FalseLine
def getStereoChemistryFast(mol: Mol, useLegacyImplementation: bool = True) -> int:
if not useLegacyImplementation:
AssignStereochemistry(mol, flagPossibleStereoCenters=True, force=True)
FindPotentialStereoBonds(mol)
INVALID: str = "?"
centers = FindMolChiralCenters(mol, includeUnassigned=True, useLegacyImplementation=useLegacyImplementation)
check: bool = False
for center in centers:
if center[1] == INVALID:
if check:
return 0
check = True
INVALID: str = "ANY"
bonds = mol.GetBonds()
for bond in bonds:
if str(bond.GetStereo())[6:] == INVALID:
return 0
return 1
def getStereoChemistry(mol: Mol, useLegacyImplementation: bool = True) -> Tuple[dict, int]:
"""
Implementation of molecule stereo-chemistry: Check the given SMILES string to determine whether accurate
enthalpies can be calculated with the given stereo-chemistry information. Inspired by <NAME> (2020)
:param mol: The referenced molecule used for validation
:type mol: Mol
:param useLegacyImplementation: Legacy implementation of RDKit in FindMolChiralCenters
:type useLegacyImplementation: bool
:return: Union[Tuple, bool]
"""
if not useLegacyImplementation:
AssignStereochemistry(mol, flagPossibleStereoCenters=True, force=True)
FindPotentialStereoBonds(mol)
centers = FindMolChiralCenters(mol, includeUnassigned=True, useLegacyImplementation=useLegacyImplementation)
unassigned_atom: int = 0
for center in centers:
if center[1] == "?":
unassigned_atom += 1
bondList: List[str] = []
unassigned_bond: int = 0
bonds = mol.GetBonds()
for bond in bonds:
value = str(bond.GetStereo())[6:]
if value != "NONE":
bondList.append(value)
if value == "ANY":
unassigned_bond += 1
series = {"atom": len(centers) - unassigned_atom, "non_atom": unassigned_atom,
"bond": len(bondList) - unassigned_bond, "non_bond": unassigned_bond}
if unassigned_atom <= 1 and unassigned_bond == 0:
return series, 1
return series, 0
def checkInvalidStereoChemistry(mol: Mol, previous_message: str = None, useLegacyImplementation: bool = True) -> str:
series = getStereoChemistry(mol=RemoveHs(mol), useLegacyImplementation=useLegacyImplementation)[0]
if series["non_atom"] != 0 or series["non_bond"] != 0:
new_message: str = f" Molecule {MolToSmiles(RemoveHs(mol))} has undefined stereochemistry"
if previous_message != new_message:
previous_message = new_message
warning(new_message)
return previous_message
def getRadicalsStereoChemistry(FragMolX: Mol, FragMolY: Mol, useLegacyImplementation: bool = True) -> int:
"""
Implementation of retrieving stereo-chemistry with radical
:param FragMolX: The first (1st) fragment used for identification. Order can be swapped with FragMolY.
:type FragMolX: Mol
:param FragMolY: The second (2nd) fragment used for identification. Order can be swapped with FragMolX.
:type FragMolY: Mol
:param useLegacyImplementation: Legacy implementation of RDKit in FindMolChiralCenters
:type useLegacyImplementation: bool
:return: int
"""
if getStereoChemistryFast(mol=FragMolX, useLegacyImplementation=useLegacyImplementation) == 0:
return 0
return getStereoChemistryFast(mol=FragMolY, useLegacyImplementation=useLegacyImplementation)
def getFullCisTrans(mol: Mol) -> Tuple:
# Get Cis-Trans Full Structure
CisTransBond: List[str] = []
CisTransIdx: List[int] = []
INVALID: Tuple[str, str] = ("NONE", "ANY")
bonds = mol.GetBonds()
for bond in bonds:
stereo = str(bond.GetStereo())[6:]
if stereo not in INVALID:
CisTransBond.append(stereo)
CisTransIdx.append(bond.GetIdx())
return CisTransBond, CisTransIdx
def detectCisTrans(ParentMol: Mol, bondIdx: int, CisTransBond: List[str], CisTransIdx: List[int],
stack: List[int], ) -> Optional[List[int]]:
# [0]: Generate Input/Output Array
currentLength: int = len(stack)
for _ in range(2 * len(dFramework["Cis-Trans Atoms"]) + 2):
stack.append(0)
# [1]: Check possibility of atoms if potentially possess cis-trans bond
# Cis-Trans: Z == "Cis", "E" == "Trans"
checkBond = ParentMol.GetBondWithIdx(bondIdx)
BeginAtom, EndingAtom = checkBond.GetBeginAtom(), checkBond.GetEndAtom()
checkAtom = []
if BeginAtom.GetSymbol() not in dFramework["Non-Connected Atoms"]:
checkAtom.append(BeginAtom)
if EndingAtom.GetSymbol() not in dFramework["Non-Connected Atoms"]:
checkAtom.append(EndingAtom)
# [2]: Get all data needed (Cis - Trans Identifier)
if len(checkAtom) != 0:
# Bond Database store the bond index which is part of cis-trans
TargetAtom: Tuple[str, str] = dFramework["Cis-Trans Atoms"]
separation: int = len(TargetAtom) + 1
Z: str = "Z"
for _, atom in enumerate(checkAtom):
bonds = atom.GetBonds()
for neighbor_bond in bonds:
# [2.1]: Get the neighbor index that is contained possibility of being a cis-trans bond but not
# current prediction bond.
neighbor_index: int = neighbor_bond.GetIdx()
if neighbor_index == bondIdx:
continue
try:
location: int = CisTransIdx.index(neighbor_index)
except MAIN_ERROR:
continue
temp = currentLength if CisTransBond[location] == Z else separation + currentLength
stack[temp] += 1
try:
targetBond = ParentMol.GetBondWithIdx(neighbor_index)
stack[TargetAtom.index(targetBond.GetOtherAtom(atom).GetSymbol()) + 1 + temp] += 1
except MAIN_ERROR:
pass
return stack
def findNoImplicitAtom(mol) -> int:
# Find the first atom with no implicit hydrogen
atoms = mol.GetAtoms()
for atom in atoms:
if atom.GetNoImplicit():
return atom.GetIdx()
return -1
@measureExecutionTime
def getRingAttachedBondsDatabase(database: ndarray, MoleculeCol: int = 0, BondIdxCol: int = 3,
aromaticOnly: bool = False) -> Tuple[List[int], ndarray, List[int], ndarray]:
if True:
if not inputFastCheck(database, 'ndarray'):
database = np.asarray(database)
inputCheckRange(value=MoleculeCol, name='MoleculeCol', maxValue=database.shape[1], minValue=0, fastCheck=True)
inputCheckRange(value=BondIdxCol, name='BondIdxCol', maxValue=database.shape[1], minValue=0, fastCheck=True)
inputFullCheck(value=aromaticOnly, name='aromaticOnly', dtype="bool")
# [1]: Prepare data
indexData: List = GetIndexOnNonSortedData(database=database, column=MoleculeCol, excel_fit=False, get_last=True)
size: int = len(indexData) - 1
path: List[int] = []
moleculeRingTracking: List[int] = []
reversedPath: List[int] = []
moleculeNonRingTracking: List[int] = []
def add(indexRow: List[int], value: int, moleculeTracking: List[int], moleculeValue: int):
indexRow.append(value)
if len(moleculeTracking) == 0 or moleculeTracking[-1] != moleculeValue:
moleculeTracking.append(moleculeValue)
# [2]: Looping by request
for molSet in range(0, size):
begin, end = indexData[molSet][0], indexData[molSet + 1][0]
molecule: Mol = AddHs(MolFromSmiles(str(database[begin, MoleculeCol])))
for row in range(begin, end):
bond = molecule.GetBondWithIdx(int(database[row, BondIdxCol]))
startAtom, endAtom = bond.GetBeginAtom(), bond.GetEndAtom()
if startAtom.IsInRing() or endAtom.IsInRing():
condition: bool = True
if aromaticOnly:
if not (startAtom.GetIsAromatic() or endAtom.GetIsAromatic()):
condition = False
if condition:
add(indexRow=path, value=row, moleculeTracking=moleculeRingTracking, moleculeValue=begin)
else:
add(indexRow=reversedPath, value=row, moleculeTracking=moleculeNonRingTracking, moleculeValue=begin)
else:
add(indexRow=reversedPath, value=row, moleculeTracking=moleculeNonRingTracking, moleculeValue=begin)
if len(path) < 100:
print("List of Rows for Ring-Attached:", path)
if len(reversedPath) < 100:
print("List of Rows for Non-Ring-Attached:", reversedPath)
print(f"Result:"
f"\n1) Ring-Attached Bonds: "
f"{len(path)} BDEs ({round(100 * len(path) / database.shape[0], 6)} %; "
f"{len(moleculeRingTracking)} molecules ({round(100 * len(moleculeRingTracking) / len(indexData), 6)}%)"
f"\n2) Non-Ring-Attached Bonds: "
f"{len(reversedPath)} BDEs ({round(100 * len(reversedPath) / database.shape[0], 6)}%); "
f"{len(moleculeNonRingTracking)} molecules ({round(100 * len(moleculeNonRingTracking) / len(indexData), 6)}%)")
return path, database[path, :], reversedPath, database[reversedPath, :]
def getRingAttachedBondsByFile(InputFileName: str, RingFileName: Optional[str], NonRingFileName: Optional[str],
MoleculeCol: int = 0, BondIdxCol: int = 3, aromaticOnly: bool = False) -> \
Tuple[List[int], pd.DataFrame, List[int], pd.DataFrame]:
"""
Retrieve a dataset contained bonds whose atoms are part of the rings
:param InputFileName: A string directory of dataset used to retrieve bond
:type InputFileName: str
:param RingFileName: A string directory to generate a csv dataset of ring-attached bond
:type RingFileName: str
:param NonRingFileName: A string directory to generate a csv dataset of ring-attached bond
:type NonRingFileName: str
:param MoleculeCol: The molecule's column
:type MoleculeCol: int
:param BondIdxCol: The bond index's column
:type BondIdxCol: int
:param aromaticOnly: Whether to extract aromatic bonds only
:type aromaticOnly: bool
:return: List[int]
"""
if True:
inputFullCheck(value=InputFileName, name='InputFileName', dtype="str")
if RingFileName is not None:
inputFullCheck(value=RingFileName, name='RingFileName', dtype="str")
if RingFileName == InputFileName:
RingFileName = f"{RemoveExtension(FixPath(RingFileName, 'csv'), '.csv')} - Ring.csv"
if NonRingFileName is not None:
inputFullCheck(value=NonRingFileName, name='NonRingFileName', dtype="str")
if NonRingFileName == InputFileName:
NonRingFileName = f"{RemoveExtension(FixPath(NonRingFileName, 'csv'), '.csv')} - Non-Ring.csv"
if NonRingFileName is None and RingFileName is None:
warning(" No file would be exported")
pass
array, columns = ReadFile(FilePath=InputFileName, header=0, get_values=True, get_columns=True)
path, forwardArray, reversedPath, reversedArray = \
getRingAttachedBondsDatabase(database=array, MoleculeCol=MoleculeCol, BondIdxCol=BondIdxCol,
aromaticOnly=aromaticOnly)
forwardDataframe = | pd.DataFrame(data=forwardArray, columns=columns) | pandas.DataFrame |
import pandas
from croissance.estimation.util import with_overhangs
def remove_outliers(series, window=30, std=2):
"""
Removes any points where the distance of the median exceeds ``std`` standard deviations within a rolling window.
:param series:
:param window:
:param std:
:return:
"""
if len(series.values) < 10:
return series, pandas.Series(data=[], index=[])
values = with_overhangs(series.values, window)
outliers = abs(values - values.rolling(window=window, center=True).median()) < values.rolling(window=window, center=True).std() * std
outlier_mask = outliers[window:-window].values
outliers = pandas.Series(data=series.values[~outlier_mask], index=series.index[~outlier_mask])
series = | pandas.Series(data=series.values[outlier_mask], index=series.index[outlier_mask]) | pandas.Series |
from millify import millify
import altair as alt
import pandas as pd
import streamlit as st
from pandas.tseries import offsets
from urllib.parse import urlparse
from . import utils
import streamlit.components.v1 as components
### SUmmary stats from coingecko
def get_cg_summary_data(coin_choice, df):
score_cols = [
"coingecko_score",
"developer_score",
"community_score",
"liquidity_score",
"public_interest_score",
]
coin_choice_df = df.loc[df.name == coin_choice]
genesis_date = coin_choice_df["genesis_date"].values[0]
last_updated = coin_choice_df["last_updated"].values[0]
contract_address = coin_choice_df["contract_address"].values[0]
coingecko_rank = coin_choice_df["coingecko_rank"].values[0]
market_cap_rank = coin_choice_df["market_cap_rank"].values[0]
sentiment_votes_up_percentage = coin_choice_df[
"sentiment_votes_up_percentage"
].values[0]
sentiment_votes_down_percentage = coin_choice_df[
"sentiment_votes_down_percentage"
].values[0]
# st.markdown("## Market Cap Rank")
st.metric(
label="Market Cap Rank",
value=f"#{market_cap_rank}",
)
st.metric(
label="CoinGecko Rank",
value=f"#{coingecko_rank}",
)
# st.markdown(
# f"<h1>Market Cap Rank #{market_cap_rank}</h1><h1>CoinGecko Rank #{coingecko_rank}</h1>",
# unsafe_allow_html=True,
# )
get_market_data(coin_choice, df)
st.markdown(
f'<h1>CoinGecko Sentiment<br><span style="color: green;">{sentiment_votes_up_percentage}%</span> <span style="color: red;"> {sentiment_votes_down_percentage}%</span></h1>',
unsafe_allow_html=True,
)
for col in score_cols:
st.markdown(
f"<p class='small-font'><strong>{col.replace('_', ' ').capitalize()}</strong>: {coin_choice_df[col].values[0]:.2f}%</p>", # noqa: E501
unsafe_allow_html=True,
)
if not | pd.isna(coin_choice_df["contract_address"].values[0]) | pandas.isna |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_data.ipynb (unless otherwise specified).
__all__ = ['DATA_PATH', 'acquire_data', 'rmtree', 'load_custom_data', 'load_data', 'pad_trajectories',
'normalize_trajectory', 'get_custom_dls', 'get_discriminative_dls', 'get_turning_point_dls', 'get_1vall_dls',
'get_validation_dl', 'get_dls_from_ds', 'create_custom_dataset', 'cut_trajectory', 'validate_model',
'validate_task']
# Cell
from pathlib import Path
import urllib.request as u_request
from zipfile import ZipFile
import csv
import pandas as pd
from andi import andi_datasets, normalize
import numpy as np
from fastai.text.all import *
# Cell
DATA_PATH = Path("../data")
# Cell
def acquire_data(train=True, val=True):
"""Obtains the train and validation datasets of the competition.
The train url maight fail. Get it from https://drive.google.com/drive/folders/1RXziMCO4Y0Fmpm5bmjcpy-Genhzv4QJ4"""
DATA_PATH.mkdir(exist_ok=True)
train_url = ("https://doc-4k-88-drive-data-export.googleusercontent.com/download/qh9kfuk2n3khcj0qvrn9t3a4j19nve1a/" +
"rqpd3tajosn0gta5f9mmbbb1e4u8csnn/1599642000000/17390da5-4567-4189-8a62-1749e1b19b06/108540842544374891611/" +
"ADt3v-N9HwRAxXINIFMKGcsrjzMlrvhOOYitRyphFom1Ma-CUUekLTkDp75fOegXlyeVVrTPjlnqDaK0g6iI7eDL9YJw91-" +
"jiityR3iTfrysZP6hpGA62c4lkZbjGp_NJL-XSDUlPcwiVi5Hd5rFtH1YYP0tiiFCoJZsTT4akE8fjdrkZU7vaqFznxuyQDA8YGaiuYlKu" +
"-F1HiAc9kG_k9EMgkMncNflNJtlugxH5pFcNDdrYiOzIINRIRivt5ScquQ_s4KyuV-zYOQ_g2_VYri8YAg0IqbBrcO-exlp5j-" +
"t02GDh5JZKU3Hky5b70Z8brCL5lvK0SFAFIKOer45ZrFaACA3HGRNJg==?authuser=0&nonce=k5g7m53pp3cqq&user=" +
"108540842544374891611&hash=m7kmrh87gmekjhrdcpbhuf1kj13ui0l2")
val_url = ("https://competitions.codalab.org/my/datasets/download/7ea12913-dfcf-4a50-9f5d-8bf9666e9bb4")
if train:
data = _download_bytes(train_url)
_write_bytes(data, DATA_PATH)
train_path = DATA_PATH/"Development dataset for Training"
train_path.rename(train_path.parent/"train")
if val:
data = _download_bytes(val_url)
_write_bytes(data, DATA_PATH)
val_path = DATA_PATH/"validation_for_scoring"
val_path.rename(val_path.parent/"val")
rmtree(DATA_PATH/"__MACOSX")
def _download_bytes(url):
"Downloads data from `url` as bytes"
u = u_request.urlopen(url)
data = u.read()
u.close()
return data
def _write_bytes(data, path):
"Saves `data` (bytes) into path."
zip_path = _zip_bytes(data)
_unzip_file(zip_path, new_path=path)
def _zip_bytes(data, path=None):
"Saves bytes data as .zip in `path`."
if path is None: path = Path("../temp")
zip_path = path.with_suffix(".zip")
with open(zip_path, "wb") as f:
f.write(data)
return zip_path
def _unzip_file(file_path, new_path=None, purge=True):
"Unzips file in `file_path` to `new_path`."
if new_path is None: new_path = file_path.with_suffix("")
zip_path = file_path.with_suffix(".zip")
with ZipFile(zip_path, 'r') as f:
f.extractall(new_path)
if purge: zip_path.unlink()
def rmtree(root):
for p in root.iterdir():
if p.is_dir(): rmtree(p)
else: p.unlink()
root.rmdir()
# Cell
def load_custom_data(dim=1, models=None, exps=None, path=None):
"Loads data from custom dataset."
path = DATA_PATH/f"custom{dim}.pkl" if path is None else path
df = pd.read_pickle(path)
mod_mask = sum([df['model'] == m for m in models]) if models is not None else np.ones(df.shape[0], dtype=bool)
exp_mask = sum([df['exp'] == e for e in exps]) if exps is not None else np.ones(df.shape[0], dtype=bool)
mask = mod_mask & exp_mask
return df[mask].reset_index(drop=True)
def load_data(task, dim=1, ds='train'):
"Loads 'train' or 'val' data of corresponding dimension."
path = DATA_PATH/ds
try:
df = pd.read_pickle(path/f"task{task}.pkl")
except:
_txt2df(task, ds=[ds])
df = pd.read_pickle(path/f"task{task}.pkl")
return df[df['dim']==dim].reset_index(drop=True)
def _txt2df(task, ds=['train', 'val']):
"Extracts dataset and saves it in df form"
if 'train' in ds:
df = pd.DataFrame(columns=['dim', 'y', 'x', 'len'], dtype=object)
train_path = DATA_PATH/"train"
if not (train_path/f"task{task}.txt").exists(): acquire_data(train=True, val=False)
with open(train_path/f"task{task}.txt", "r") as D, open(train_path/f"ref{task}.txt") as Y:
trajs = csv.reader(D, delimiter=";", lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC)
labels = csv.reader(Y, delimiter=";", lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC)
for t, y in zip(trajs, labels):
dim, x = int(t[0]), t[1:]
x = tensor(x).view(dim, -1).T
label = tensor(y[1:]) if task is 3 else y[1]
df = df.append({'dim': dim, 'y': label, 'x': x, 'len': len(x)}, ignore_index=True)
df.to_pickle(train_path/f"task{task}.pkl")
if 'val' in ds:
df = | pd.DataFrame(columns=['dim', 'x', 'len'], dtype=object) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 15:27:23 2020
@author: saksh
Main execution file for market_networks paper; Reccommended to use market_networks(phase_3).ipynb for a more thorough analysis
Adjust the file path in import_csv according to position of file
"""
#init
import pandas as pd
import numpy as np
np.random.seed(1337) #random state used throughout the notebook for reproducibility
from math import log
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import seaborn as sns
from datetime import datetime
import networkx as nx
import community as louvain
from collections import Counter
import random
from preprocess_funcs import louvain_community, variation_of_information, pd_fill_diagonal
plt.style.use('classic')
#dataset import
sp500 = pd.read_csv('/content/drive/My Drive/collab_files/^GSPC.csv', header = 0, index_col = 'Date')
sp500.index = pd.to_datetime(sp500.index, format = '%d-%m-%y')
sp500 = sp500[1:]
#sp500 = sp500.resample('W').mean()
#sp500.head()
print(len(sp500))
#import nifty50 data
nifty = pd.read_csv('/content/drive/My Drive/collab_files/^NSEI.csv', header = 0, index_col = 'Date')
nifty.index = pd.to_datetime(nifty.index, format = '%d-%m-%y')
nifty = nifty.reindex(index = sp500.index, method = 'bfill')
nifty.fillna(method = 'bfill', inplace=True)
#nifty = nifty.resample('W').mean()
#nifty.head()
print(len(nifty))
sing_sti = pd.read_csv('/content/drive/My Drive/collab_files/^sti_d.csv', header = 0, index_col = 'Date')
sing_sti.index = pd.to_datetime(sing_sti.index, format = '%Y-%m-%d')
sing_sti = sing_sti.reindex(index = sp500.index, method = 'bfill')
sing_sti.fillna(method = 'bfill', inplace=True)
print(len(sing_sti))
uk_100 = pd.read_csv('/content/drive/My Drive/collab_files/^ukx_d.csv', header = 0, index_col = 'Date')
uk_100.index = pd.to_datetime(uk_100.index, format = '%Y-%m-%d')
uk_100 = uk_100.reindex(index = sp500.index, method = 'bfill')
uk_100.fillna(method = 'bfill', inplace=True)
print(len(uk_100))
hangseng = pd.read_csv('/content/drive/My Drive/collab_files/^hsi_d.csv', header = 0, index_col = 'Date')
hangseng.index = pd.to_datetime(hangseng.index, format = '%Y-%m-%d')
hangseng = hangseng.reindex(index = sp500.index, method = 'bfill')
hangseng.fillna(method = 'bfill', inplace=True)
print(len(hangseng))
nikkei = pd.read_csv('/content/drive/My Drive/collab_files/^nkx_d.csv', header = 0, index_col = 'Date')
nikkei.index = pd.to_datetime(nikkei.index, format = '%Y-%m-%d')
nikkei = nikkei.reindex(index = sp500.index, method = 'bfill')
nikkei.fillna(method = 'bfill', inplace=True)
print(len(nikkei))
shanghai_comp = | pd.read_csv('/content/drive/My Drive/collab_files/^shc_d.csv', header = 0, index_col = 'Date') | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.format(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
with tm.assert_produces_warning(FutureWarning):
assert is_period(pidx)
s = Series(pidx, name='A')
assert is_period_dtype(s.dtype)
assert is_period_dtype(s)
with tm.assert_produces_warning(FutureWarning):
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert not is_period(np.dtype('float64'))
with tm.assert_produces_warning(FutureWarning):
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype3
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
@pytest.mark.parametrize('subtype', [
'interval[int64]', 'Interval[int64]', 'int64', np.dtype('int64')])
def test_construction(self, subtype):
i = IntervalDtype(subtype)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_construction_generic(self, subtype):
# generic
i = IntervalDtype(subtype)
assert i.subtype is None
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [
CategoricalDtype(list('abc'), False),
CategoricalDtype(list('wxyz'), True),
object, str, '<U10', 'interval[category]', 'interval[object]'])
def test_construction_not_supported(self, subtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalDtype')
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
@pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
msg = ("Incorrectly formatted string passed to constructor. "
r"Valid formats include Interval or Interval\[dtype\] "
"where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
assert not IntervalDtype.is_dtype('IntervalA')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'interval[int64]')
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
# invalid subtype comparisons do not raise when directly compared
dtype1 = IntervalDtype('float64')
dtype2 = IntervalDtype('datetime64[ns, US/Eastern]')
assert dtype1 != dtype2
assert dtype2 != dtype1
@pytest.mark.parametrize('subtype', [
None, 'interval', 'Interval', 'int64', 'uint64', 'float64',
'complex128', 'datetime64', 'timedelta64', PeriodDtype('Q')])
def test_equality_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert is_dtype_equal(dtype, 'interval')
assert is_dtype_equal(dtype, IntervalDtype())
@pytest.mark.parametrize('subtype', [
'int64', 'uint64', 'float64', 'complex128', 'datetime64',
'timedelta64', PeriodDtype('Q')])
def test_name_repr(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
expected = 'interval[{subtype}]'.format(subtype=subtype)
assert str(dtype) == expected
assert dtype.name == 'interval'
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_name_repr_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert str(dtype) == 'interval'
assert dtype.name == 'interval'
def test_basic(self):
assert is_interval_dtype(self.dtype)
ii = IntervalIndex.from_breaks(range(3))
assert is_interval_dtype(ii.dtype)
assert is_interval_dtype(ii)
s = Series(ii, name='A')
assert is_interval_dtype(s.dtype)
assert is_interval_dtype(s)
def test_basic_dtype(self):
assert is_interval_dtype('interval[int64]')
assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
assert is_interval_dtype(IntervalIndex.from_breaks(
date_range('20130101', periods=3)))
assert not is_interval_dtype('U')
assert not is_interval_dtype('S')
assert not is_interval_dtype('foo')
assert not is_interval_dtype(np.object_)
assert not is_interval_dtype(np.int64)
assert not is_interval_dtype(np.float64)
def test_caching(self):
IntervalDtype.reset_cache()
dtype = IntervalDtype("int64")
assert len(IntervalDtype._cache) == 1
IntervalDtype("interval")
assert len(IntervalDtype._cache) == 2
IntervalDtype.reset_cache()
tm.round_trip_pickle(dtype)
assert len(IntervalDtype._cache) == 0
class TestCategoricalDtypeParametrized(object):
@pytest.mark.parametrize('categories', [
list('abcd'),
np.arange(1000),
['a', 'b', 10, 2, 1.3, True],
[True, False],
pd.date_range('2017', periods=4)])
def test_basic(self, categories, ordered):
c1 = CategoricalDtype(categories, ordered=ordered)
tm.assert_index_equal(c1.categories, pd.Index(categories))
assert c1.ordered is ordered
def test_order_matters(self):
categories = ['a', 'b']
c1 = CategoricalDtype(categories, ordered=True)
c2 = CategoricalDtype(categories, ordered=False)
c3 = CategoricalDtype(categories, ordered=None)
assert c1 is not c2
assert c1 is not c3
@pytest.mark.parametrize('ordered', [False, None])
def test_unordered_same(self, ordered):
c1 = CategoricalDtype(['a', 'b'], ordered=ordered)
c2 = CategoricalDtype(['b', 'a'], ordered=ordered)
assert hash(c1) == hash(c2)
def test_categories(self):
result = CategoricalDtype(['a', 'b', 'c'])
tm.assert_index_equal(result.categories, pd.Index(['a', 'b', 'c']))
assert result.ordered is None
def test_equal_but_different(self, ordered):
c1 = CategoricalDtype([1, 2, 3])
c2 = CategoricalDtype([1., 2., 3.])
assert c1 is not c2
assert c1 != c2
@pytest.mark.parametrize('v1, v2', [
([1, 2, 3], [1, 2, 3]),
([1, 2, 3], [3, 2, 1]),
])
def test_order_hashes_different(self, v1, v2):
c1 = CategoricalDtype(v1, ordered=False)
c2 = CategoricalDtype(v2, ordered=True)
c3 = CategoricalDtype(v1, ordered=None)
assert c1 is not c2
assert c1 is not c3
def test_nan_invalid(self):
with pytest.raises(ValueError):
CategoricalDtype([1, 2, np.nan])
def test_non_unique_invalid(self):
with pytest.raises(ValueError):
| CategoricalDtype([1, 2, 1]) | pandas.core.dtypes.dtypes.CategoricalDtype |
import pandas as pd
TEST_PATH = '/ssd-1/clinical/clinical-abbreviations/data/oof_test.csv'
df = pd.read_csv("/ssd-1/clinical/clinical-abbreviations/data/full_groups.csv", na_filter=False)
preds = pd.read_csv(TEST_PATH, na_filter=False)
full_df = | pd.concat([df, preds], axis=1, ignore_index=True) | pandas.concat |
import pandas as pd
import os
import re
from 提取函数库 import *
TEST = True
CLEAN = False
PROD = 0
if PROD:
TEST =False
# 0 1 2 3
VERBOSE = 0
MAX_FILE_COUNT = -1
target_files = []
FOCUS_FEATURE = ["文化程度", "地区", "认罪态度良好", "前科"]
FOCUS_FEATURE = ["罚金"]
FOCUS_FEATURE = ["年龄"]
FOCUS_FEATURE = ["有期徒刑","拘役","管制","罚金", "盗窃数额"]
FOCUS_FEATURE = ["没收个人财产", "没收个人财产数额"]
version_id = "v1"
if TEST:
input_file_path = './testset_v1'
# target_files = ["165.txt"]
output_file_path = './processed'
if CLEAN:
clean_file_path = './ilegalfiles'
if PROD:
# target_files = ["重庆认罪认罚5622.txt"]
input_file_path = './rawdata/rawdata_v1'
input_file_path = "/Users/vectorshan/Desktop/清洗后数据/汇总"
output_file_path = './processed'
if version_id:
output_file_path += "/" + version_id
if not os.path.exists(output_file_path):
os.makedirs(output_file_path)
def unify_feature_name(name):
if name == "地点":
name = "地区"
if name == "罚金刑":
name = "罚金"
if name in ["退赃退赔"]:
name = "退赔"
if name == "自然灾害、事故灾害、社会安全事件等突发事件期间,在事件发生地盗窃":
name = "自然灾害"
if name == "在医院盗窃病人或者其亲友财物的":
name = "在医院盗窃"
if name == "盗窃残疾人、孤寡老人、丧失劳动能力人的财物的":
name = "盗窃残疾人"
if name == "盗窃救灾、抢险、防汛、优抚、扶贫、移民、救济款物的":
name = "盗窃救灾款物"
if name == "一年内曾因盗窃受过行政处罚":
name = "一年内盗窃"
if name == "因盗窃造成严重后果的":
name = "造成严重后果"
if name == "携带凶器盗窃":
name = "携带凶器"
return name
def unify_feature_value(name,value):
if value in ["是", "有"]:
value = "是"
if value in ["无", "否"]:
value = "否"
if value in [""]:
value = "未知"
if IsNum(value):
value = ToNum(value)
if name in ["盗窃数额","缓刑","没收个人财产数额"] and value in ["未知", "否", "无"]:
value = 0
if name in ["有期徒刑","拘役", "管制"]:
value = ToMonth(str(value))
return value
def readTestFile(file_path):
# 读取测试集,用来迭代开发时验证程序的正确性
if VERBOSE>=0: print("Reading " + file_path)
raw_text = open(file_path,'r').read()
result = re.split(r"\#\s*预期特征\s*\n",raw_text,1)
try:
article_section, feature_section = result
except Exception as e:
print("无法读取预期特征 {}".format(file_path))
return
features = {}
reasons = {}
for line in feature_section.split('\n'):
line = line.strip()
if line and not re.match(r'\s*#',line) and line.count('|') >= 2:
feature_name, feature_value, reason = re.split(r"\s*\|\s*", line, 2)
feature_name = unify_feature_name(feature_name)
feature_value = unify_feature_value(feature_name, feature_value)
features[feature_name] = feature_value
reasons[feature_name] = reason
else:
# 无法读取
pass
return article_section, features, reasons
def readRawFile(file_path):
# 读取清洗过的原始数据, 用来提取和输出特征
if VERBOSE >= 0: print("Reading " + file_path)
article = open(file_path, 'r').read()
if "预期特征" in article:
article = re.split(r"\#\s*预期特征\s*\n",article)[0]
if "提取结果" in article:
article = re.split(r"\#\s*提取结果\s*\n",article)[0]
return article
def ToMonth(value):
year,month = 0,0
m = re.search("([\d]+)Y",value)
if m:
year = int(m.groups()[0])
m = re.search("([\d]+)M",value)
if m:
month = int(m.groups()[0])
return year*12 + month
# 地点
# 为每一个特征单独创建特征提取函数
feature_fun_map ={
"性别" : 提取性别,
"年龄" : 提取年龄,
"文化程度" : 提取文化程度,
"聋哑" : 提取聋哑,
"盲人" : 提取盲人,
"残疾人" : 提取残疾人,
"精神病人" : 提取精神病人,
"地区" : 提取地区,
"认罪认罚" : 提取认罪认罚,
"被害人谅解" : 提取被害人谅解,
"自首" : 提取自首,
"坦白" : 提取坦白,
"认罪态度良好" : 提取认罪态度良好,
"立功" : 提取立功,
"退赔" : 提取退赔,
"自然灾害" : 提取自然灾害,
"在医院盗窃" : 提取在医院盗窃,
"盗窃残疾人" : 提取盗窃残疾人,
"盗窃救灾款物" : 提取盗窃救灾款物,
"一年内盗窃" : 提取一年内盗窃,
"造成严重后果" : 提取造成严重后果,
"前科" : 提取前科,
"累犯" : 提取累犯,
"盗窃数额" : 提取盗窃数额,
"入户" : 提取入户,
"携带凶器" : 提取携带凶器,
"扒窃" : 提取扒窃,
"破坏性手段" : 提取破坏性手段,
"未遂" : 提取未遂,
"中止" : 提取中止,
"多次盗窃" : 提取多次盗窃,
"免于刑事处罚" : 提取免于刑事处罚,
"拘役" : 提取拘役,
"管制" : 提取管制,
"有期徒刑" : 提取有期徒刑,
"罚金" : 提取罚金,
"没收个人财产" : 提取没收个人财产,
"没收个人财产数额" : 提取没收个人财产数额,
"缓刑" : 提取缓刑,
}
def format_function_names(keys):
print("{")
for key in keys:
print("\t\"{}\" : 提取{},".format(key,key))
print("}")
def exclude_file(article):
for word in ["共同犯罪","主犯","从犯","共犯","数罪并罚"]:
if word in article:
return True
return False
accuracy_stats = { name: [0,0] for name in feature_fun_map.keys()}
word_stats_list = ["判决如下","裁判日期", "法院认为","数罪并罚","本院认为" ]
word_stats = {word: [0,0] for word in word_stats_list}
def update_word_stats(filename,article):
for word in word_stats.keys():
word_stats[word][1]+=1
if word in article:
word_stats[word][0] +=1
else:
if VERBOSE >= 1: print("{} not in {}".format(word, filename))
def report_word_stats():
for word, stats in word_stats.items():
print("{:.1f}% 文章含有 {}".format(stats[0]/stats[1]*100,word))
if __name__ == "__main__":
print("数据读取路径: ",input_file_path)
col_names = ["文件名"] + list(feature_fun_map.keys())
result_dict = { col:[] for col in col_names}
if TEST:
for filename in os.listdir(input_file_path)[:MAX_FILE_COUNT]:
if target_files and filename not in target_files:
continue
extracted_features = {}
try:
article_section, expected_features, reasons = readTestFile(os.path.join(input_file_path,filename))
except Exception as e:
print("测试集无法识别: {} {}".format(filename,e))
continue
if exclude_file(article_section):
print("样本被剔除")
continue
registered_features_count = 39
if len(expected_features) != registered_features_count:
print("{} 特征数不符, 读取 {}, 预期 {}".format(filename,len(expected_features), registered_features_count))
if CLEAN:
os.rename(os.path.join(input_file_path,filename),os.path.join(clean_file_path,filename))
# 合法样本, 开始处理
for key, fun in feature_fun_map.items():
extracted_features[key] = fun(article_section)
result_dict[key].append(extracted_features[key])
result_dict["文件名"].append(filename)
# RE 调试点
temp = re.search(r"(\S*)[省|市].*法院.*", article_section, re.M + re.X)
update_word_stats(filename, article_section)
if extracted_features["盗窃数额"]<1000:
print(extracted_features["盗窃数额"])
continue
for name in feature_fun_map.keys():
if name not in extracted_features:
print("Feature not extracted: {}".format(name))
continue
accuracy_stats[name][1] += 1
if name not in expected_features:
print("Unexpected feature {} in file {}".format(name,filename))
continue
value = expected_features[name]
if str(extracted_features[name]) != str(value):
if VERBOSE>=1 or name in FOCUS_FEATURE :
print("{} 提取错误 {},\t 预期值 {},\t 提取值 {},\t {}".format(filename, name, value, extracted_features[name],reasons[name]))
else:
accuracy_stats[name][0] += 1
if VERBOSE>=1: print("提取正确 {},\t 预期值 {},\t 提取值 {}".format(name, value, extracted_features[name]))
print("\n统计分析\n")
print("提取准确度")
for name, stats in accuracy_stats.items():
print("{}:\t {}/{} = {:.1f}%".format(name, stats[0], stats[1], stats[0]/stats[1]*100))
if PROD:
extracted_features = {}
for filename in os.listdir(input_file_path)[:MAX_FILE_COUNT]:
if target_files and filename not in target_files:
continue
try:
article = readRawFile(os.path.join(input_file_path, filename))
except Exception as e:
print("文件无法读取: {} {}".format(filename, e))
continue
update_word_stats(filename, article)
if exclude_file(article):
print("样本被剔除")
continue
for key, fun in feature_fun_map.items():
value = fun(article)
if key == "认罪认罚":
value = 校准认罪认罚(filename, value)
extracted_features[key] = value
result_dict[key].append(value)
result_dict["文件名"].append(filename)
OUTPUT_PROD_LOG= False
if OUTPUT_PROD_LOG:
with open(os.path.join(output_file_path,filename),'w') as file:
file.write(article)
file.write("\n# 提取结果\n\n")
for key, value in extracted_features.items():
file.write("{} | {}\n".format(key,value))
result_df = | pd.DataFrame(data=result_dict) | pandas.DataFrame |
from brightics.common.report import ReportBuilder, strip_margin, plt2MD, dict2MD, \
pandasDF2MD, keyValues2MD
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
import pandas as pd
import numpy as np
def add_row_number(table, group_by=None, **params):
check_required_parameters(_add_row_number, params, ['table'])
if group_by is not None:
return _function_by_group(_add_row_number, table, group_by=group_by, **params)
else:
return _add_row_number(table, **params)
def _add_row_number(table, new_col='add_row_number'):
df = pd.DataFrame()
n = len(table)
for i in range(1, n + 1):
df2 = pd.DataFrame([{new_col:i}])
df = df.append(df2, ignore_index=True)
out_table = pd.concat([df, table], axis=1)
return {'out_table': out_table}
def discretize_quantile(table, group_by=None, **params):
check_required_parameters(_discretize_quantile, params, ['table'])
if group_by is not None:
return _function_by_group(_discretize_quantile, table, group_by=group_by, **params)
else:
return _discretize_quantile(table, **params)
def _discretize_quantile(table, input_col, num_of_buckets=2, out_col_name='bucket_number'):
out_table = table.copy()
out_table[out_col_name], buckets = pd.qcut(table[input_col], num_of_buckets, labels=False, retbins=True, precision=10, duplicates='drop')
# Build model
rb = ReportBuilder()
rb.addMD(strip_margin("""
## Quantile-based Discretization Result
"""))
# index_list, bucket_list
index_list = []
bucket_list = []
for i, bucket in enumerate(buckets):
if i == 1:
index_list.append(i - 1)
bucket_list.append("[{left}, {bucket}]".format(left=left, bucket=bucket))
elif i > 1:
index_list.append(i - 1)
bucket_list.append("({left}, {bucket}]".format(left=left, bucket=bucket))
left = bucket
# cnt_array
cnt = np.zeros(len(index_list), int)
for i in range(len(table)):
cnt[out_table[out_col_name][i]] += 1
# Build model
result = dict()
result_table = pd.DataFrame.from_items([
['bucket number', index_list],
['buckets', bucket_list],
['count', cnt]
])
result['result_table'] = result_table
rb.addMD(strip_margin("""
### Data = {input_col}
|
| {result_table}
""".format(input_col=input_col, n=num_of_buckets, result_table=pandasDF2MD(result_table))))
result['report'] = rb.get()
return {'out_table': out_table, 'model': result}
def binarizer(table, column, threshold=0, threshold_type='greater', out_col_name=None):
if out_col_name is None:
out_col_name = 'binarized_' + str(column)
table[out_col_name] = 0
for t in range(0, len(table[column])):
if threshold_type == 'greater':
if table[column][t] > threshold:
table[out_col_name][t] = 1
else:
if table[column][t] >= threshold:
table[out_col_name][t] = 1
return{'table':table}
def capitalize_variable(table, input_cols, replace, out_col_suffix=None):
if out_col_suffix is None:
out_col_suffix = '_' + replace
out_table = table
for input_col in input_cols:
out_col_name = input_col + out_col_suffix
out_col = pd.DataFrame(columns=[out_col_name])
if replace == 'upper':
out_col[out_col_name] = table[input_col].str.upper()
else:
out_col[out_col_name] = table[input_col].str.lower()
out_table = | pd.concat([out_table, out_col], axis=1) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.