prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from rest_framework.views import APIView
from rest_framework.response import Response
from django.core import serializers
from django.contrib.postgres.search import SearchQuery, SearchVector
from django.db.models import Q
from rest_framework import status
from . import models
import pandas as pd
import numpy as np
import json, math, pickle, collections
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split, cross_val_score, cross_validate
from sklearn import preprocessing
from sklearn.inspection import partial_dependence, plot_partial_dependence
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.preprocessing import normalize
from collections import Counter
from io import StringIO
import time, ast
from static.models import dl
# A tweet as json
'''
{'grp': '0', 'content': 'truck control...', 'screen_name': 'feedthebuzz',
'valence': 0.333333333, 'valence_seq': 'terror attack kills scores in nice',
'valence_seq_rank': 15, 'valence_pred': 0.161331654, 'valence_grp_pred': 0,
'dominance': 0.270833333, 'dominance_seq': 'terror attack kills scores in', 'dominance_seq_rank': 15, 'dominance_pred': 0.299620539, 'dominance_grp_pred': 0,
'care': 2, 'care_seq': 'terror attack kills scores in nice', 'care_seq_rank': 13, 'care_pred': 2, 'care_grp_pred': 0, 'care_prob': 4.848002434,
'fairness': 1, 'fairness_seq': 'terror attack kills', 'fairness_seq_rank': 3, 'fairness_pred': 2, 'fairness_grp_pred': 0, 'fairness_prob': 1.320369363,
'tweet_id': 0
}
'''
def save_model(model, model_id):
file_name = './app/static/models/' + model_id
file_name = file_name + '.pkl'
with open(file_name, 'wb') as f:
pickle.dump(model, f)
f.close()
def load_model(model_id):
file_name = './app/static/models/' + model_id + '.pkl'
model = ''
with open(file_name, 'rb') as f:
unpickler = pickle.Unpickler(f)
model = unpickler.load()
f.close()
return model
def get_rules(dtc, df):
rules_list = []
values_path = []
values = dtc.tree_.value
def RevTraverseTree(tree, node, rules, pathValues):
'''
Traverase an skl decision tree from a node (presumably a leaf node)
up to the top, building the decision rules. The rules should be
input as an empty list, which will be modified in place. The result
is a nested list of tuples: (feature, direction (left=-1), threshold).
The "tree" is a nested list of simplified tree attributes:
[split feature, split threshold, left node, right node]
'''
# now find the node as either a left or right child of something
# first try to find it as a left node
try:
prevnode = tree[2].index(node)
leftright = '<='
pathValues.append(values[prevnode])
except ValueError:
# failed, so find it as a right node - if this also causes an exception, something's really f'd up
prevnode = tree[3].index(node)
leftright = '>'
pathValues.append(values[prevnode])
# now let's get the rule that caused prevnode to -> node
p1 = df.columns[tree[0][prevnode]]
p2 = tree[1][prevnode]
rules.append(str(p1) + ' ' + leftright + ' ' + str(p2))
# if we've not yet reached the top, go up the tree one more step
if prevnode != 0:
RevTraverseTree(tree, prevnode, rules, pathValues)
# get the nodes which are leaves
leaves = dtc.tree_.children_left == -1
leaves = np.arange(0,dtc.tree_.node_count)[leaves]
# build a simpler tree as a nested list: [split feature, split threshold, left node, right node]
thistree = [dtc.tree_.feature.tolist()]
thistree.append(dtc.tree_.threshold.tolist())
thistree.append(dtc.tree_.children_left.tolist())
thistree.append(dtc.tree_.children_right.tolist())
# get the decision rules for each leaf node & apply them
for (ind,nod) in enumerate(leaves):
# get the decision rules
rules = []
pathValues = []
RevTraverseTree(thistree, nod, rules, pathValues)
pathValues.insert(0, values[nod])
pathValues = list(reversed(pathValues))
rules = list(reversed(rules))
rules_list.append(rules)
values_path.append(pathValues)
return (rules_list, values_path, leaves)
# For the initial run
class LoadData(APIView):
def get(self, request, format=None):
tweet_objects = models.Tweet.objects.all()
# serializer return string, so convert it to list with eval()
tweet_objects_json = eval(serializers.serialize('json', tweet_objects))
tweets_json = []
for tweet in tweet_objects_json:
tweet_json = tweet['fields']
tweet_json.update({ 'tweet_id': str(tweet['pk']) })
tweets_json.append(tweet_json)
return Response(tweets_json)
class LoadUsers(APIView):
def get(self, request, format=None):
users_objects = models.User.objects.all()
# serializer return string, so convert it to list with eval()
users_objects_json = eval(serializers.serialize('json', users_objects))
users_json = []
for user in users_objects_json:
user_json = user['fields']
user_json.update({ 'screen_name': user['pk'] })
users_json.append(user_json)
return Response(users_json)
class LoadWords(APIView):
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
group_objs = request_json['groups']
tweet_objects = models.Tweet.objects.all()
# serializer return string, so convert it to list with eval()
tweet_objects_json = eval(serializers.serialize('json', tweet_objects))
groups = [ group_obj['idx'] for group_obj in group_objs ]
tweets_json = []
word_tokens = [] # All importance word apperances from all second-level features
for tweet_idx, tweet in enumerate(tweet_objects_json):
tweet_json = tweet['fields']
tweet_json.update({ 'tweet_id': tweet['pk'] })
tweets_json.append(tweet_json)
word_tokens.append({ 'word': tweet_json['valence_seq'], 'group': tweet_json['grp'] })
word_tokens.append({ 'word': tweet_json['dominance_seq'], 'group': tweet_json['grp'] })
word_tokens.append({ 'word': tweet_json['fairness_seq'], 'group': tweet_json['grp'] })
word_tokens.append({ 'word': tweet_json['care_seq'], 'group': tweet_json['grp'] })
# word_tokens.append({ 'word': tweet_json['loyalty_seq'], 'group': tweet_json['grp'] })
# word_tokens.append({ 'word': tweet_json['authority_seq'], 'group': tweet_json['grp'] })
# word_tokens.append({ 'word': tweet_json['purity_seq'], 'group': tweet_json['grp'] })
# Orgainze word tokens as unique words and their frequencies
word_count_dict = {}
for word_dict in word_tokens:
if word_dict['word'] in word_count_dict.keys():
word_count_dict[word_dict['word']][word_dict['group']] += 1
word_count_dict[word_dict['word']]['count_total'] += 1
else:
word_count_dict[word_dict['word']] = {}
word_count_dict[word_dict['word']]['count_total'] = 0
for group in groups: # Create keys for all groups
word_count_dict[word_dict['word']][str(group)] = 0
#word_count_dict = dict(Counter(word_tokens)) # { 'dog': 2, 'cat': 1, ... }
df_word_count = pd.DataFrame()
df_word_list = pd.DataFrame(list(word_count_dict.keys()), columns=['word'])
df_word_count_per_group = pd.DataFrame.from_dict(list(word_count_dict.values()))
df_word_count = pd.concat([ df_word_list, df_word_count_per_group ], axis=1)
df_word_count['word'] = df_word_count['word'].map(lambda x: x.encode('unicode-escape').decode('utf-8'))
# Filter out words with threshold
df_filtered_word_count = df_word_count.loc[df_word_count['count_total'] > 10]
return Response(df_filtered_word_count.to_dict(orient='records')) # [{ 'word': 'dog', 'count': 2 }, { ... }, ...]
# For the global interpretability,
class SearchTweets(APIView):
def get(self, request, format=None):
pass
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
keywords = request_json['searchKeyword'].split(' ')
content_q = Q()
for keyword in keywords:
content_q &= Q(content__contains=keyword)
retrieved_tweet_objects = models.Tweet.objects.filter(content_q)
tweet_objects_json = eval(serializers.serialize('json', retrieved_tweet_objects))
tweets_json = [ tweet['fields'] for tweet in tweet_objects_json ]
return Response(tweets_json)
class RunDecisionTree(APIView):
def get(self, request, format=None):
pass
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
feature_objs = request_json['selectedFeatures']
features = [feature['key'] for feature in feature_objs]
tweets = request_json['tweets']
# tweet_objects = models.Tweet.objects.all()
# tweet_objects_json = eval(serializers.serialize('json', tweet_objects)) # serializer return string, so convert it to list with eval()
# tweets_json = [ tweet['fields'] for tweet in tweet_objects_json ]
df_tweets = pd.DataFrame(tweets)
lb = preprocessing.LabelBinarizer()
X = df_tweets[features]
y = lb.fit_transform(df_tweets['group'].astype(str)) # con: 0, lib: 1
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
if len(feature_objs) == 8: # if all features are selected, just load the saved model
clf = load_model('dt_all')
else:
clf = DecisionTreeClassifier(max_depth=9, random_state=42)
tree = clf.fit(X_train, y_train)
feature_imps = clf.feature_importances_
y_pred_binary = clf.predict(X)
y_pred_prob = clf.predict_proba(X)
y_pred_string = lb.inverse_transform(y_pred_binary)
df_tweets['pred'] = y_pred_string
df_tweets['prob'] = [probs[1] for probs in y_pred_prob] # Extract the prob of tweet being liberal
y_pred_for_test = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred_for_test)
scores = cross_validate(clf, X, y, cv=10)['test_score']
save_model(clf, 'dt_all')
return Response({
'modelId': 'dt_all',
'tweets': df_tweets.to_json(orient='records'),
'features': features,
'accuracy': accuracy,
'featureImps': feature_imps
})
# class RunClustering(APIView):
# def get(self, request, format=None):
# selected_features = ['valence', 'dominance', 'care', 'fairness']
# tweet_objects = models.Tweet.objects.all()
# # serializer return string, so convert it to list with eval()
# tweet_objects_json = eval(serializers.serialize('json', tweet_objects))
# tweets_json = [tweet['fields'] for tweet in tweet_objects_json]
# df_tweets = pd.DataFrame(tweets_json)
# # Clustering all together
# df_tweets_selected = df_tweets[selected_features]
# fit_cls = AgglomerativeClustering(n_clusters=10).fit(df_tweets_selected)
# cls_labels = fit_cls.labels_
# df_tweets['clusterId'] = cls_labels
# df_tweets_by_cluster = df_tweets.groupby(['clusterId'])
# num_tweets_per_group = df_tweets_by_cluster.size()
# df_group_ratio = df_tweets_by_cluster.agg({
# 'grp': lambda x: math.ceil((x.loc[x == '1'].shape[0] / x.shape[0]) * 100) / 100
# }).rename(columns={'grp': 'group_lib_ratio'})
# # Clustering per each goal's features
# goals_features = [
# { 'goal': 'emotion', 'features': ['valence', 'dominance'] },
# { 'goal': 'moral', 'features': ['care', 'fairness'] }
# ]
# clusters_per_goals = []
# for goal_features in goals_features:
# goal = goal_features['goal']
# df_tweets_per_goal = df_tweets_selected[goal_feature['features']]
# fit_cls = AgglomerativeClustering(n_clusters=4).fit(df_tweets_selected)
# cls_labels = fit_cls.labels_
# df_tweets_per_goal['clusterIdFor' + capitalize(goal)] = cls_labels
# df_clusters_per_goal = df_tweets_per_goal.agg({
# 'grp': lambda x: math.ceil((x.loc[x == '1'].shape[0] / x.shape[0]) * 100) / 100
# }).rename(columns={'grp': 'group_lib_ratio'})
# clusters_per_goal = {
# 'goal': 'emotion',
# 'clusters': df_clusters_per_goal.to_json(orient='records')
# }
# clusters_per_goal.append(clusters_per_goal)
# # Save all results for clustering-all
# df_clusters = pd.DataFrame({
# 'clusterId': list(df_tweets_by_cluster.groups),
# 'numTweets': num_tweets_per_group,
# 'groupRatio': df_group_ratio['group_lib_ratio'],
# 'pdpValue': 0.2
# # 'tweetIds': tweet_ids_per_cluster_list
# })
# cluster_ids = cls_labels
# return Response({
# 'clusterIdsForTweets': cluster_ids,
# 'clusters': df_clusters.to_json(orient='records'),
# 'clustersPerGoal': clusters_per_goal
# })
class CalculatePartialDependence(APIView):
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
model_id = request_json['currentModelInfo']['id']
tweets = request_json['tweets']
feature_objs = request_json['features']
features = [feature['key'] for feature in feature_objs]
df_tweets = pd.DataFrame(tweets)
lb = preprocessing.LabelBinarizer()
X = df_tweets[features]
y = lb.fit_transform(df_tweets['group'].astype(str))
y = np.ravel(y)
model = load_model(model_id)
pdp_values_list = {}
for feature_idx, feature in enumerate(features):
pdp_values, feature_values = partial_dependence(model, X, [feature_idx], percentiles=(0, 1)) # 0 is the selected feature index
pdp_values_list.append({
'feature': feature,
'values': pd.DataFrame({ 'pdpValue': pdp_values, 'featureValue': feature_values }).to_json(orient='index')
})
# performance
return Response({
'modelId': model,
'pdpValues': pdp_values_list
})
class RunClusteringAndPartialDependenceForClusters(APIView):
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
model_id = request_json['modelId']
feature_objs = request_json['features']
features = [feature['key'] for feature in feature_objs]
tweets = request_json['tweets']
groups = request_json['groups']
# tweet_objects = models.Tweet.objects.all()
# tweet_objects_json = eval(serializers.serialize('json', tweet_objects)) # serializer return string, so convert it to list with eval()
# tweets_json = [ tweet['fields'] for tweet in tweet_objects_json ]
df_tweets = pd.DataFrame(tweets)
df_tweets_selected = df_tweets[features]
# Run clustering
fit_cls = AgglomerativeClustering(n_clusters=10).fit(df_tweets_selected)
cls_labels = fit_cls.labels_
df_tweets['clusterId'] = cls_labels
df_tweets_by_cluster = df_tweets.groupby(['clusterId'])
num_tweets_per_group = df_tweets_by_cluster.size()
df_group_ratio = df_tweets_by_cluster.agg({
'group': lambda x: math.ceil((x.loc[x == '1'].shape[0] / x.shape[0]) * 100) / 100
}).rename(columns={'group': 'group_lib_ratio'}) # '1': lib
# Clustering per each goal's features
goals_features = [
{ 'goal': 'emotion', 'features': ['valence', 'dominance'] },
{ 'goal': 'moral', 'features': ['care', 'fairness', 'loyalty', 'authority', 'purity'] }
]
clusters_per_goals = []
for goal_features in goals_features:
goal = goal_features['goal']
features_in_goal = goal_features['features']
df_tweets_per_goal = df_tweets[goal_features['features'] + ['group']]
fit_cls = AgglomerativeClustering(n_clusters=4).fit(df_tweets_per_goal)
cls_labels_for_goal = fit_cls.labels_
df_tweets_per_goal['clusterId'] = cls_labels_for_goal
df_tweets_per_goal_by_cluster = df_tweets_per_goal.groupby(['clusterId'])
# Define aggregated functions
agg_dict = {}
agg_dict['group'] = lambda x: math.ceil((x.loc[x == '1'].shape[0] / x.shape[0]) * 100) / 100 # group ratio
agg_dict['clusterId'] = lambda x: x.count() / df_tweets.shape[0] # size of cluster (# of tweets)
for feature in features_in_goal: # mean feature values
agg_dict[feature] = lambda x: x.mean()
df_clusters_per_goal = df_tweets_per_goal_by_cluster.agg(agg_dict).rename(columns={
'group': 'group_lib_ratio',
'clusterId': 'countRatio'
})
clusters_per_goal = {
'goal': goal,
'clusters': df_clusters_per_goal.to_dict(orient='records')
}
clusters_per_goals.append(clusters_per_goal)
# Prepare data for partial dependence (PD)
lb = preprocessing.LabelBinarizer()
X = df_tweets[features]
X_for_groups = []
for group_idx, group in enumerate(groups):
X_group = X.loc[df_tweets['group'] == str(group_idx)]
X_for_groups.append(X_group)
y = lb.fit_transform(df_tweets['group'].astype(str))
y = np.ravel(y)
#model = load_model(model_id)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = DecisionTreeClassifier(random_state=20)
tree = model.fit(X_train, y_train)
# Calculate PD-all
pdp_values_for_all = []
for feature_idx, feature in enumerate(features):
pdp_values, feature_values = partial_dependence(model, X, [feature_idx], percentiles=(0, 1)) # 0 is the selected feature index
pdp_values_json = pd.DataFrame({ 'pdpValue': pdp_values[0], 'featureValue': feature_values[0] }).to_dict(orient='records')
pdp_values_for_all.append({ 'feature': feature, 'values': pdp_values_json })
# Calculate PD-per-group
pdp_values_for_groups = []
for group_idx, group in enumerate(groups):
pdp_values_for_features = []
for feature_idx, feature in enumerate(features):
pdp_values, feature_values = partial_dependence(model, X_for_groups[group_idx], [feature_idx], percentiles=(0, 1))
pdp_values_for_group = pdp_values[0]
# Do 1 - (probability) if the group is not true class (since probability is possibility of being the group 1 (blue team))
if group_idx == 0:
pdp_values_for_group = [ 1- pdp_value for pdp_value in pdp_values_for_group ]
pdp_values_json =
|
pd.DataFrame({ 'pdpValue': pdp_values_for_group, 'featureValue': feature_values[0] })
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/3/23 19:12
Desc: 东方财富网-数据中心-沪深港通持股
http://data.eastmoney.com/hsgtcg/
http://finance.eastmoney.com/news/1622,20161118685370149.html
"""
import requests
import json
import demjson
import pandas as pd
from bs4 import BeautifulSoup
def stock_em_hsgt_north_net_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f52",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_north_cash(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f53",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_north_acc_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f54",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df =
|
pd.DataFrame(data_json["data"]["s2n"])
|
pandas.DataFrame
|
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import torch.nn.functional as F
import pandas as pd
import time
from tqdm import tqdm
import apex.amp as amp
import albumentations as A
import os
import cv2
from dataset.dataset_attention_v2 import PANDA_Dataset_Attention
from tools.utils import *
torch.backends.cudnn.benchmark = True
from sklearn.metrics import confusion_matrix,roc_auc_score
from tools.mixup import *
import matplotlib.pyplot as plt
import shutil
from radam import RAdam
import json
def set_gpu_environ():
"""Sets CUDA_VISIBLE_DEVICES to those under minimal memory load.
Meant to be used in notebooks only.
"""
import os
import subprocess
query = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used', '--format=csv']).decode().split('\n')[1:-1]
utilization = [int(x.replace(" MiB", "")) for x in query]
free = [i for i in range(len(utilization)) if utilization[i] == min(utilization)]
set_visible = ",".join([str(i) for i in free])
os.environ["CUDA_VISIBLE_DEVICES"] = set_visible
def get_transforms_train_patch():
transforms=A.Compose(
[
A.OneOf([
A.Flip(p=0.5),
A.RandomRotate90(p=0.5),
],p=0.5
),
A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=10, border_mode=cv2.BORDER_REFLECT,p=0.3),
A.RandomBrightnessContrast(p=0.3),
A.OneOf([
A.GaussNoise(p=0.5),
A.GaussianBlur(p=0.5),
],p=0.3
),
A.CoarseDropout(max_holes=8,max_height=64,max_width=64,p=0.3,fill_value=(255,255,255))
]
)
return transforms
def log_cosh(pred,target,weight=None):
if weight is not None:
#print((torch.log(torch.cosh(pred - target))*weight))
return 2 * (torch.log(torch.cosh(pred - target))*weight).mean()
else:
return 2*torch.log(torch.cosh(pred-target)).mean()
def mse(pred,target,weight=None):
if weight is not None:
#print((((target-pred)**2)*weight))
return (((target-pred)**2)*weight).mean()
else:
return F.mse_loss(pred,target)
def huber(pred,target,weight=None):
if weight is not None:
return 2*(weight*F.smooth_l1_loss(pred,target,reduction='none')).mean()
else:
return 2*F.smooth_l1_loss(pred,target)
def smooth_cls(logits,target,pseudo_target,wt=0.7):
b=logits.size(0)
smooth_target=F.one_hot(target.long(),num_classes=6).float()
#print(smooth_target)
#print(pseudo_target)
smooth_target=wt*smooth_target+pseudo_target*(1-wt)
#print(smooth_target)
#print("*"*50)
log_prob=-F.log_softmax(logits,dim=1)
return (log_prob*smooth_target).sum()/b
def get_weight(module):
for name,p in module.named_parameters():
if "bias" not in name:
yield p
def get_bias(module):
for name,p in module.named_parameters():
if "bias" in name:
yield p
class Trainer():
def __init__(self, model, options):
self.model_name = options['model_name']
self.save_dir = options['save_dir']
os.makedirs(self.save_dir, exist_ok=True)
shutil.copy(sys.argv[0],self.save_dir+sys.argv[0].split("/")[-1])
df=
|
pd.read_csv(options['train_csv_path'])
|
pandas.read_csv
|
import os
from math import ceil
from tqdm import tqdm
from utils_ import to
import numpy as np
import torch
import pandas as pd
from pprint import pprint
from collections import defaultdict
from scipy.special import logsumexp
class FewShotTrainer:
def __init__(self, exp, train_writer=None, test_writer=None, generate_every=10, test_every=10):
self.model = exp.model
self.optimizer = exp.optimizer
self.scheduler = exp.scheduler
self.exp = exp
self.full = exp.full_loss
self.step = 0
self.epoch = 0
self.test_every = test_every
self.train_writer = train_writer
self.val_writer = test_writer
self.generate_every = generate_every
self.device = exp.gpu[0]
print(self.model)
def set_lr(self, lr):
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
@property
def module(self):
if isinstance(self.model, torch.nn.DataParallel):
return self.model.module
else:
return self.model
def l2_penalty(self):
loss = 0.
for p in self.model.parameters():
loss += torch.norm(p)
return loss
def save_model(self, path):
self.module.cpu()
path_dir, path_file = os.path.split(path)
if len(path_dir) != 0 and not os.path.exists(path_dir):
os.makedirs(path_dir)
torch.save(self.module.state_dict(), path)
self.module.to(self.device)
def run(self, verbose=True):
if self.exp.mode == 'train':
print(f'total epochs: {self.exp.epochs}')
for i in range(self.exp.epochs):
self.train_epoch(self.exp.train_loader, verbose=verbose,
checkpoint_path=self.exp.checkpoint_path,
test_loader=self.exp.test_loader)
if i % self.test_every == 0:
train_loss = self.test_epoch(self.exp.train_loader, verbose=verbose, iw=None)
test_loss = self.test_epoch(self.exp.val_loader, verbose=verbose,)
self.val_writer.add_scalar('ELBO', test_loss, self.step)
print(f'train ELBO: {train_loss:.4f}, val ELBO: {test_loss:.4f}')
elif self.exp.mode == 'test':
params = []
for test_loader, p in zip(self.exp.loaders, self.exp.params):
metrics = self.test_epoch(test_loader, iw=self.exp.importance_num,
verbose=verbose)
if not isinstance(metrics, dict):
metrics = {'ELBO': metrics}
params.append(p)
for n, metric in metrics.items():
params[-1][n] = metric
pprint(p)
|
pd.DataFrame.from_records(params)
|
pandas.DataFrame.from_records
|
"""
This module contains the definitions of both the ``IncrTriangle`` and
``CumTriangle`` classes. Users should avoid instantiating ``IncrTriangle``
or ``CumTriangle`` instances directly; rather the dataset and triangle
arguments should be passed to ``totri``, which will return either an
instance of ``CumTriangle`` or ``IncrTriangle``, depending on the argument
specified for ``type_``.
"""
import itertools
import numpy as np
import pandas as pd
from scipy import stats
from .chainladder import BaseChainLadder
from .chainladder.bootstrap import BootstrapChainLadder
class _BaseTriangle(pd.DataFrame):
def __init__(self, data, origin=None, dev=None, value=None):
"""
Transforms ``data`` into a triangle instance.
Parameters
----------
data: pd.DataFrame
The dataset to be transformed into a ``_BaseTriangle`` instance.
``data`` must be tabular loss data with at minimum columns
representing the origin/acident year, the development
period and the actual loss amount, given by ``origin``, ``dev``
and ``value`` arguments.
origin: str
The fieldname in ``data`` representing origin year.
dev: str
The fieldname in ``data`` representing development period.
value: str
The fieldname in ``data`` representing loss amounts.
"""
if not isinstance(data, pd.DataFrame):
raise TypeError("`data` must be an instance of pd.DataFrame.")
origin_ = "origin" if origin is None else origin
if origin_ not in data.columns:
raise KeyError("`{}` not present in data.".format(origin_))
dev_ = "dev" if dev is None else dev
if dev_ not in data.columns:
raise KeyError("`{}` not present in data.".format(dev_))
value_ = "value" if value is None else value
if value_ not in data.columns:
raise KeyError("`{}` not present in data.".format(value_))
data2 = data.copy(deep=True)
data2 = data2[[origin_, dev_, value_]]
data2 = data2.groupby([origin_, dev_], as_index=False).sum()
data2 = data2.sort_values(by=[origin_, dev_])
tri = data2.pivot(index=origin_, columns=dev_).rename_axis(None)
tri.columns = tri.columns.droplevel(0)
# Force all triangle cells to be of type np.float.
tri = tri.astype({kk:np.float for kk in tri.columns})
tri.columns.name = None
super().__init__(tri)
self.origin = origin_
self.value = value_
self.dev = dev_
# Properties.
self._latest_by_origin = None
self._latest_by_devp = None
self._nbr_cells = None
self._maturity = None
self._triind = None
self._devp = None
self._latest = None
self._origins = None
self._rlvi = None
self._clvi = None
self._dof = None
@property
def nbr_cells(self):
"""
Return the number of non-NaN cells.
Returns
-------
int
"""
if self._nbr_cells is None:
self._nbr_cells = self.count().sum()
return(self._nbr_cells)
@property
def triind(self):
"""
Table indicating forecast cells with 1, actual data with 0.
Returns
-------
pd.DataFrame
"""
if self._triind is None:
self._triind = self.applymap(lambda x: 1 if np.isnan(x) else 0)
return(self._triind)
@property
def rlvi(self):
"""
Determine the last valid index by origin.
Returns
-------
pd.DataFrame
"""
if self._rlvi is None:
self._rlvi = pd.DataFrame({
"dev":self.apply(
lambda x: x.last_valid_index(), axis=1).values
},index=self.index)
self._rlvi["col_offset"] = \
self._rlvi["dev"].map(lambda x: self.columns.get_loc(x))
return(self._rlvi)
@property
def clvi(self):
"""
Determine the last valid index by development period.
Returns
-------
pd.DataFrame
"""
if self._clvi is None:
self._clvi = pd.DataFrame({
"origin":self.apply(lambda x: x.last_valid_index(), axis=0).values
},index=self.columns)
self._clvi["row_offset"] = \
self._clvi["origin"].map(lambda x: self.index.get_loc(x))
return(self._clvi)
@property
def latest(self):
"""
Return the values on the triangle's latest diagonal. Loss amounts
are given, along with the associated origin year and development
period. The latest loss amount by origin year alone can be obtained
by calling ``self.latest_by_origin``, or by development period by
calling by ``self.latest_by_devp``.
Returns
-------
pd.DataFrame
"""
if self._latest is None:
lindx = self.apply(lambda dev_: dev_.last_valid_index(), axis=1)
self._latest = pd.DataFrame(
{"latest":self.lookup(lindx.index, lindx.values),
"origin":lindx.index, "dev":lindx.values})
return(self._latest[["origin", "dev", "latest"]].sort_index())
@property
def latest_by_origin(self):
"""
Return the latest loss amounts by origin year.
Returns
-------
pd.Series
"""
if self._latest_by_origin is None:
self._latest_by_origin = pd.Series(
data=self.latest["latest"].values, index=self.latest["origin"].values,
name="latest_by_origin")
return(self._latest_by_origin.sort_index())
@property
def latest_by_devp(self):
"""
Return the latest loss amounts by development period.
Returns
-------
pd.Series
"""
if self._latest_by_devp is None:
self._latest_by_devp = pd.Series(
data=self.latest["latest"].values, index=self.latest["dev"].values,
name="latest_by_devp")
return(self._latest_by_devp.sort_index())
@property
def devp(self):
"""
Return triangle's development periods.
Returns
-------
pd.Series
"""
if self._devp is None:
self._devp = pd.Series(self.columns,name="devp")
return(self._devp.sort_index())
@property
def origins(self):
"""
Return triangle's origin periods.
Returns
-------
pd.Series
"""
if self._origins is None:
self._origins = pd.Series(self.index, name="origin")
return(self._origins.sort_index())
@property
def maturity(self):
"""
Return the maturity for each origin period.
Returns
-------
ps.Series
"""
if self._maturity is None:
dfind, matlist = (1 - self.triind), list()
for i in range(dfind.index.size):
lossyear = dfind.index[i]
maxindex = dfind.loc[lossyear].to_numpy().nonzero()[0].max()
itermatur = dfind.columns[maxindex]
matlist.append(itermatur)
self._maturity = pd.Series(data=matlist, index=self.index, name="maturity")
return(self._maturity.sort_index())
def to_tbl(self, drop_nas=True):
"""
Transform triangle instance into a tabular representation.
Parameters
----------
drop_nas: bool
Should records with NA values be dropped? Default value is True.
Returns
-------
pd.DataFrame
"""
tri = self.reset_index(drop=False).rename({"index":"origin"}, axis=1)
df =
|
pd.melt(tri, id_vars=[self.origin], var_name=self.dev, value_name=self.value)
|
pandas.melt
|
import pandas as pd
def process_training(data):
c=0
for i in range(len(data)):
if int(data['ID'][i])==1:
c+=1
data['ID'][i]=c
else:
data['ID'][i]=c
return data
def Unique_sentences(tagged_sentence):
mat = [tuple(t) for t in tagged_sentence]
print("Number of phrases in Original Data ",len(mat))
matset = set(mat)
print("Number of Unique phrase ",len(matset))
unique_tagged_sentence=[tuple(t) for t in matset]
return unique_tagged_sentence
def Preparing_tagged_data(df):
tagged_sentence_string=[]
tagged_sentence=[]
c=1
temp=[]
for i in range(len(df)):
if df['ID'][i]==c:
temp.append((df['FORM'][i],df['XPOSTAG'][i]))
else:
tagged_sentence.append(temp)
temp=[]
temp.append((df['FORM'][i],df['XPOSTAG'][i]))
c+=1
tagged_sentence.append(temp)
return tagged_sentence
def creating_uniqe_df(tagged_sentence_uniq):
c=1
tagged_data=[]
for sent in tagged_sentence_uniq:
tagged_data_temp=[]
for l in sent:
l=list(l)
l.insert(0,c)
l=tuple(l)
tagged_data_temp.append(l)
tagged_data.append(tagged_data_temp)
c+=1
return tagged_data
df=pd.read_csv('Dataset/ETCSL_RAW_NER_POS.csv')
df=process_training(df)
tagged_sentence=Preparing_tagged_data(df)
print("Training data processed \n")
tagged_sentence_uniq=Unique_sentences(tagged_sentence)
tagged_data=creating_uniqe_df(tagged_sentence_uniq)
li=[tup for sent in tagged_data for tup in sent]
df=pd.DataFrame(li)
df=df.rename(columns={0:'ID',1:'FORM',2:'XPOSTAG'})
df_old=pd.read_csv('Dataset/Augmented_RAW_NER_POS.csv')
# total phrases in df_old are 25478
n=25479
c=1
for i in range(len(df_old)):
if(df_old['ID'][i]==c):
df_old['ID'][i]=n
else:
c+=1
n+=1
df_old['ID'][i]=n
df_new=pd.concat([df, df_old], ignore_index=True, sort=False)
l1=pd.read_csv('Named Entities Sumerian ORACC.csv',header=None)
l2=
|
pd.read_csv('Part of Speech (POS) tags Sumerian ORACC.csv',header=None)
|
pandas.read_csv
|
#!/usr/bin/env python
"""
Copyright 2021, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from app import create_app
import pandas as pd
import json
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
self.headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
self.df_in = pd.DataFrame(
data=[[0.3521, 55.1824, 0.8121, 2.3256]],
columns=['CNC', 'GR', 'HRD', 'ZDEN']
)
self.df_in_misordered = pd.DataFrame(
data=[[2.3256, 0.8121, 55.1824, 0.3521]],
columns=['ZDEN', 'HRD', 'GR', 'CNC']
)
self.df_in_extra_cols = pd.DataFrame(
data=[[8.5781, 0.3521, 55.1824, 0.8121, 0.78099, 6.8291, 2.3256]],
columns=['CAL', 'CNC', 'GR', 'HRD', 'HRM', 'PE', 'ZDEN']
)
self.df_in_bad_col_names = pd.DataFrame(
data=[[0.3521, 55.1824, 0.8121, 2.3256]],
columns=['Phi', 'Gamma', 'RD', 'RHOB']
)
self.df_out = pd.DataFrame(
data=[[102.225407, 196.408402]],
columns=['pred_DTC', 'pred_DTS']
)
def tearDown(self):
self.app_context.pop()
def test_get_predictions(self):
j_df = json.dumps(self.df_in.to_json(orient='split'))
response = self.client.post('api/get_predictions', data=j_df, headers=self.headers)
self.assertEqual(response.status_code, 200)
def test_get_predictions_swapped_input_cols(self):
j_df = json.dumps(self.df_in_misordered.to_json(orient='split'))
response = self.client.post('api/get_predictions', data=j_df, headers=self.headers)
df_pred = pd.read_json(response.data, orient='split')
self.assertAlmostEqual(self.df_out.iloc[0, 0], round(df_pred.iloc[0, 0], 6))
self.assertAlmostEqual(self.df_out.iloc[0, 1], round(df_pred.iloc[0, 1], 6))
def test_get_predictions_extra_cols(self):
j_df = json.dumps(self.df_in_extra_cols.to_json(orient='split'))
response = self.client.post('api/get_predictions', data=j_df, headers=self.headers)
df_pred =
|
pd.read_json(response.data, orient='split')
|
pandas.read_json
|
import multiprocessing
from Bio import SeqIO
import numpy as np
from functools import partial
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing
import seaborn as sns
from scipy.stats import mannwhitneyu as mw
sorts=["$P_{PK37}$","$P_{Urea}$","$P_{Guan}$","$P_{PK55}$","$P_{TL55}$","$P_{TL75}$","$G_{I^q}$","$G_{SH}$",r"$\beta_{I^q}$",r"$\beta_{SH}$",'$Y_{I^q}$','$Y_{SH}$']
best_data=pd.read_pickle('./seq_and_assay_best_sequences.pkl')
original_data=
|
pd.read_pickle('seq_to_assay_train_1,8,10_seq_and_assay_yield_forest_1_0.pkl')
|
pandas.read_pickle
|
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
#load data from a song_data to song, artist tbls
def process_song_file(cur, conn, filepath):
all_files=[]
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root, '*.json'))
for f in files:
all_files.append(os.path.abspath(f))
print('{} files are contained in "{}"'.format(len(all_files), filepath))
for i in range(len(all_files)):
df=
|
pd.read_json(all_files[i], lines=True)
|
pandas.read_json
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RepeatedKFold
class Data:
init_inputs: np.array
init_markers: pd.DataFrame()
init_X_train =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha =
|
pd.DataFrame(data['temp'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#RIL Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
tf = 201
#Parameters for residue decomposition (Source: De Rosa et al., 2017)
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
#df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S1')
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
t = range(0,tf,1)
#c_loss_S1 = df1['C_loss'].values
c_firewood_energy_S2 = df2['Firewood_other_energy_use'].values
c_firewood_energy_E = dfE['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
c_pellets_E = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S2
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
tf = 201
t = np.arange(tf)
def decomp_S2(t,remainAGB_S2):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2
#set zero matrix
output_decomp_S2 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2[i:,i] = decomp_S2(t[:len(t)-i],remain_part_S2)
print(output_decomp_S2[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2[:,i] = np.diff(output_decomp_S2[:,i])
i = i + 1
print(subs_matrix_S2[:,:4])
print(len(subs_matrix_S2))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2 = subs_matrix_S2.clip(max=0)
print(subs_matrix_S2[:,:4])
#make the results as absolute values
subs_matrix_S2 = abs(subs_matrix_S2)
print(subs_matrix_S2[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2)
subs_matrix_S2 = np.vstack((zero_matrix_S2, subs_matrix_S2))
print(subs_matrix_S2[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2 = (tf,1)
decomp_tot_S2 = np.zeros(matrix_tot_S2)
i = 0
while i < tf:
decomp_tot_S2[:,0] = decomp_tot_S2[:,0] + subs_matrix_S2[:,i]
i = i + 1
print(decomp_tot_S2[:,0])
#S2_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_S2')
tf = 201
t = np.arange(tf)
def decomp_S2_C(t,remainAGB_S2_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2_C
#set zero matrix
output_decomp_S2_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2_C[i:,i] = decomp_S2_C(t[:len(t)-i],remain_part_S2_C)
print(output_decomp_S2_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2_C[:,i] = np.diff(output_decomp_S2_C[:,i])
i = i + 1
print(subs_matrix_S2_C[:,:4])
print(len(subs_matrix_S2_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2_C = subs_matrix_S2_C.clip(max=0)
print(subs_matrix_S2_C[:,:4])
#make the results as absolute values
subs_matrix_S2_C = abs(subs_matrix_S2_C)
print(subs_matrix_S2_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2_C)
subs_matrix_S2_C = np.vstack((zero_matrix_S2_C, subs_matrix_S2_C))
print(subs_matrix_S2_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2_C = (tf,1)
decomp_tot_S2_C = np.zeros(matrix_tot_S2_C)
i = 0
while i < tf:
decomp_tot_S2_C[:,0] = decomp_tot_S2_C[:,0] + subs_matrix_S2_C[:,i]
i = i + 1
print(decomp_tot_S2_C[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_E(t,remainAGB_E):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E
#set zero matrix
output_decomp_E = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E[i:,i] = decomp_E(t[:len(t)-i],remain_part_E)
print(output_decomp_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E[:,i] = np.diff(output_decomp_E[:,i])
i = i + 1
print(subs_matrix_E[:,:4])
print(len(subs_matrix_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E = subs_matrix_E.clip(max=0)
print(subs_matrix_E[:,:4])
#make the results as absolute values
subs_matrix_E = abs(subs_matrix_E)
print(subs_matrix_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E)
subs_matrix_E = np.vstack((zero_matrix_E, subs_matrix_E))
print(subs_matrix_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E = (tf,1)
decomp_tot_E = np.zeros(matrix_tot_E)
i = 0
while i < tf:
decomp_tot_E[:,0] = decomp_tot_E[:,0] + subs_matrix_E[:,i]
i = i + 1
print(decomp_tot_E[:,0])
#E_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_E')
tf = 201
t = np.arange(tf)
def decomp_E_C(t,remainAGB_E_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E_C
#set zero matrix
output_decomp_E_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E_C[i:,i] = decomp_E_C(t[:len(t)-i],remain_part_E_C)
print(output_decomp_E_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E_C[:,i] = np.diff(output_decomp_E_C[:,i])
i = i + 1
print(subs_matrix_E_C[:,:4])
print(len(subs_matrix_E_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E_C = subs_matrix_E_C.clip(max=0)
print(subs_matrix_E_C[:,:4])
#make the results as absolute values
subs_matrix_E_C = abs(subs_matrix_E_C)
print(subs_matrix_E_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E_C)
subs_matrix_E_C = np.vstack((zero_matrix_E_C, subs_matrix_E_C))
print(subs_matrix_E_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E_C = (tf,1)
decomp_tot_E_C = np.zeros(matrix_tot_E_C)
i = 0
while i < tf:
decomp_tot_E_C[:,0] = decomp_tot_E_C[:,0] + subs_matrix_E_C[:,i]
i = i + 1
print(decomp_tot_E_C[:,0])
#plotting
t = np.arange(0,tf)
#plt.plot(t,decomp_tot_S1,label='S1')
plt.plot(t,decomp_tot_S2,label='S2')
plt.plot(t,decomp_tot_E,label='E')
plt.plot(t,decomp_tot_S2_C,label='S2_C')
plt.plot(t,decomp_tot_E_C,label='E_C')
plt.xlim(0,200)
plt.legend(loc='best', frameon=False)
plt.show()
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
#product lifetime
#building materials
B = 35
TestDSM2 = DynamicStockModel(t = df2['Year'].values, i = df2['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSME = DynamicStockModel(t = dfE['Year'].values, i = dfE['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr2, ExitFlag2 = TestDSM2.dimension_check()
CheckStrE, ExitFlagE = TestDSME.dimension_check()
Stock_by_cohort2, ExitFlag2 = TestDSM2.compute_s_c_inflow_driven()
Stock_by_cohortE, ExitFlagE = TestDSME.compute_s_c_inflow_driven()
S2, ExitFlag2 = TestDSM2.compute_stock_total()
SE, ExitFlagE = TestDSME.compute_stock_total()
O_C2, ExitFlag2 = TestDSM2.compute_o_c_from_s_c()
O_CE, ExitFlagE = TestDSME.compute_o_c_from_s_c()
O2, ExitFlag2 = TestDSM2.compute_outflow_total()
OE, ExitFlagE = TestDSME.compute_outflow_total()
DS2, ExitFlag2 = TestDSM2.compute_stock_change()
DSE, ExitFlagE = TestDSME.compute_stock_change()
Bal2, ExitFlag2 = TestDSM2.check_stock_balance()
BalE, ExitFlagE = TestDSME.check_stock_balance()
#print output flow
print(TestDSM2.o)
print(TestDSME.o)
plt.xlim(0,100)
plt.show()
#%%
#Step (5): Biomass growth
# RIL_Scenario biomass growth, following RIL disturbance
#recovery time, follow the one by Alice-guier
#H = [M, E, C_M, C_E]
#LD0 = [M, E, C_M, C_E]
H = [5.78, 7.71, 5.78, 7.71]
LD0 = [53.46-5.78, 53.46-7.71, 29.29-5.78, 29.29-7.71]
s = 1.106
#RIL
RT = ((H[0] + LD0[0])*100/initAGB)**s
print(RT)
#growth per year (Mg C/ha.yr)
gpy = (H[0] + LD0[0])/RT
print(gpy)
tf_RIL_S1 = 36
A1 = range(0,tf_RIL_S1,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_S1(A1):
return 44/12*1000*gpy*A1
seq_RIL = np.array([Y_RIL_S1(A1i) for A1i in A1])
print(len(seq_RIL))
print(seq_RIL)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL = []
for i in counter_35y:
y_RIL.append(seq_RIL)
flat_list_RIL = []
for sublist in y_RIL:
for item in sublist:
flat_list_RIL.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL = flat_list_RIL[:len(flat_list_RIL)-15]
print(flat_list_RIL)
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL = [p - q for q, p in zip(flat_list_RIL, flat_list_RIL[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL = [0 if i < 0 else i for i in flat_list_RIL]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL = [ -x for x in flat_list_RIL]
print(flat_list_RIL)
#RIL_C
RT_C = ((H[2] + LD0[2])*100/initAGB)**s
print(RT_C)
#growth per year (Mg C/ha.yr)
gpy_C = (H[2] + LD0[2])/RT_C
print(gpy_C)
tf_RIL_C = 36
A1 = range(0,tf_RIL_C,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_C(A1):
return 44/12*1000*gpy_C*A1
seq_RIL_C = np.array([Y_RIL_C(A1i) for A1i in A1])
print(len(seq_RIL_C))
print(seq_RIL_C)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL_C = []
for i in counter_35y:
y_RIL_C.append(seq_RIL_C)
flat_list_RIL_C = []
for sublist_C in y_RIL_C:
for item in sublist_C:
flat_list_RIL_C.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL_C = flat_list_RIL_C[:len(flat_list_RIL_C)-15]
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL_C, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL_C = [p - q for q, p in zip(flat_list_RIL_C, flat_list_RIL_C[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL_C = [0 if i < 0 else i for i in flat_list_RIL_C]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL_C.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL_C = [ -x for x in flat_list_RIL_C]
print(flat_list_RIL_C)
#%%
#Step (5_1): Biomass C sequestration of the remaining unharvested block
df2 =
|
pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 22 08:54:14 2022
@author: nurbuketeker
"""
from datasets import load_dataset
import pandas as pd
def getCLINCData():
dataset_small_train = load_dataset("clinc_oos", "small",split="train")
dataset_small_test = load_dataset("clinc_oos", "small",split="test")
df_train =
|
pd.DataFrame(dataset_small_train)
|
pandas.DataFrame
|
import warnings
warnings.filterwarnings("ignore")
import os
import json
import argparse
import time
import datetime
import json
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from scipy.stats import spearmanr, mannwhitneyu
import scipy.cluster.hierarchy as shc
from skbio.stats.composition import clr
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from scipy.cluster.hierarchy import cut_tree
from src.models.MiMeNet import MiMeNet, tune_MiMeNet
###################################################
# Read in command line arguments
###################################################
parser = argparse.ArgumentParser(description='Perform MiMeNet')
parser.add_argument('-micro', '--micro', help='Comma delimited file representing matrix of samples by microbial features', required=True)
parser.add_argument('-metab', '--metab', help= 'Comma delimited file representing matrix of samples by metabolomic features', required=True)
parser.add_argument('-external_micro', '--external_micro', help='Comma delimited file representing matrix of samples by microbial features')
parser.add_argument('-external_metab', '--external_metab', help= 'Comma delimited file representing matrix of samples by metabolomic features')
parser.add_argument('-annotation', '--annotation', help='Comma delimited file annotating subset of metabolite features')
parser.add_argument('-labels', '--labels', help="Comma delimited file for sample labels to associate clusters with")
parser.add_argument('-output', '--output', help='Output directory', required=True)
parser.add_argument('-net_params', '--net_params', help='JSON file of network hyperparameters', default=None)
parser.add_argument('-background', '--background', help='Directory with previously generated background', default=None)
parser.add_argument('-num_background', '--num_background', help='Number of background CV Iterations', default=100, type=int)
parser.add_argument('-micro_norm', '--micro_norm', help='Microbiome normalization (RA, CLR, or None)', default='CLR')
parser.add_argument('-metab_norm', '--metab_norm', help='Metabolome normalization (RA, CLR, or None)', default='CLR')
parser.add_argument('-threshold', '--threshold', help='Define significant correlation threshold', default=None)
parser.add_argument('-num_run_cv', '--num_run_cv', help='Number of iterations for cross-validation', default=1, type=int)
parser.add_argument('-num_cv', '--num_cv', help='Number of cross-validated folds', default=10, type=int)
parser.add_argument('-num_run', '--num_run', help='Number of iterations for training full model', type=int, default=10)
args = parser.parse_args()
micro = args.micro
metab = args.metab
external_micro = args.external_micro
external_metab = args.external_metab
annotation = args.annotation
out = args.output
net_params = args.net_params
threshold = args.threshold
micro_norm = args.micro_norm
metab_norm = args.metab_norm
num_run_cv = args.num_run_cv
num_cv = args.num_cv
num_run = args.num_run
background_dir = args.background
labels = args.labels
num_bg = args.num_background
tuned = False
gen_background = True
if background_dir != None:
gen_background = False
start_time = time.time()
if external_metab != None and external_micro == None:
print("Warning: External metabolites found with no external microbiome...ignoring external set!")
external_metab = None
if net_params != None:
print("Loading network parameters...")
try:
with open(net_params, "r") as infile:
params = json.load(infile)
num_layer = params["num_layer"]
layer_nodes = params["layer_nodes"]
l1 = params["l1"]
l2 = params["l2"]
dropout = params["dropout"]
learning_rate = params["lr"]
tuned = True
print("Loaded network parameters...")
except:
print("Warning: Could not load network parameter file!")
###################################################
# Load Data
###################################################
metab_df = pd.read_csv(metab, index_col=0)
micro_df = pd.read_csv(micro, index_col=0)
if external_metab != None:
external_metab_df = pd.read_csv(external_metab, index_col=0)
if external_micro != None:
external_micro_df = pd.read_csv(external_micro, index_col=0)
###################################################
# Filter only paired samples
###################################################
samples = np.intersect1d(metab_df.columns.values, micro_df.columns.values)
num_samples = len(samples)
metab_df = metab_df[samples]
micro_df = micro_df[samples]
for c in micro_df.columns:
micro_df[c] = pd.to_numeric(micro_df[c])
for c in metab_df.columns:
metab_df[c] = pd.to_numeric(metab_df[c])
if external_metab != None and external_micro != None:
external_samples = np.intersect1d(external_metab_df.columns.values, external_micro_df.columns.values)
external_metab_df = external_metab_df[external_samples]
external_micro_df = external_micro_df[external_samples]
for c in external_micro_df.columns:
external_micro_df[c] = pd.to_numeric(external_micro_df[c])
for c in external_metab_df.columns:
external_metab_df[c] = pd.to_numeric(external_metab_df[c])
num_external_samples = len(external_samples)
elif external_micro != None:
external_samples = external_micro_df.columns.values
external_micro_df = external_micro_df[external_samples]
for c in external_micro_df.columns:
external_micro_df[c] = pd.to_numeric(external_micro_df[c])
num_external_samples = len(external_samples)
###################################################
# Create output directory
###################################################
dirName = 'results'
try:
os.mkdir(dirName)
print("Directory " , dirName , " Created ")
except FileExistsError:
print("Directory " , dirName , " already exists")
dirName = 'results/' + out
try:
os.mkdir(dirName)
print("Directory " , dirName , " Created ")
except FileExistsError:
print("Directory " , dirName , " already exists")
dirName = 'results/' + out + "/Images"
try:
os.mkdir(dirName)
print("Directory " , dirName , " Created ")
except FileExistsError:
print("Directory " , dirName , " already exists")
###################################################
# Filter lowly abundant samples
###################################################
to_drop = []
for microbe in micro_df.index.values:
present_in = sum(micro_df.loc[microbe] > 0.0000)
if present_in <= 0.1 * num_samples:
to_drop.append(microbe)
micro_df = micro_df.drop(to_drop, axis=0)
to_drop = []
for metabolite in metab_df.index.values:
present_in = sum(metab_df.loc[metabolite] > 0.0000)
if present_in <= 0.1 * num_samples:
to_drop.append(metabolite)
metab_df = metab_df.drop(to_drop, axis=0)
if external_micro != None:
common_features = np.intersect1d(micro_df.index.values, external_micro_df.index.values)
micro_df = micro_df.loc[common_features]
external_micro_df = external_micro_df.loc[common_features]
if external_metab != None:
common_features = np.intersect1d(metab_df.index.values, external_metab_df.index.values)
metab_df = metab_df.loc[common_features]
external_metab_df = external_metab_df.loc[common_features]
###################################################
# Transform data to Compositional Data
###################################################
# Transform Microbiome Data
if micro_norm == "CLR":
micro_comp_df = pd.DataFrame(data=np.transpose(clr(micro_df.transpose() + 1)),
index=micro_df.index, columns=micro_df.columns)
if external_micro:
external_micro_comp_df = pd.DataFrame(data=np.transpose(clr(external_micro_df.transpose() + 1)),
index=external_micro_df.index, columns=external_micro_df.columns)
elif micro_norm == "RA":
col_sums = micro_df.sum(axis=0)
micro_comp_df = micro_df/col_sums
if external_micro:
col_sums = external_micro_df.sum(axis=0)
external_micro_comp_df = external_micro_df/col_sums
else:
micro_comp_df = micro_df
if external_micro:
external_micro_comp_df = external_micro_df
# Normalize Metabolome Data
if metab_norm == "CLR":
metab_comp_df = pd.DataFrame(data=np.transpose(clr(metab_df.transpose() + 1)),
index=metab_df.index, columns=metab_df.columns)
if external_metab:
external_metab_comp_df = pd.DataFrame(data=np.transpose(clr(external_metab_df.transpose() + 1)),
index=external_metab_df.index, columns=external_metab_df.columns)
elif metab_norm == "RA":
col_sums = metab_df.sum(axis=0)
metab_comp_df = metab_df/col_sums
if external_metab:
col_sums = external_metab_df.sum(axis=0)
external_metab_comp_df = external_metab_df/col_sums
else:
metab_comp_df = metab_df
if external_metab:
external_metab_comp_df = external_metab_df
micro_comp_df = micro_comp_df.transpose()
metab_comp_df = metab_comp_df.transpose()
if external_micro:
external_micro_comp_df = external_micro_comp_df.transpose()
if external_metab:
external_metab_comp_df = external_metab_comp_df.transpose()
###################################################
# Run Cross-Validation on Dataset
###################################################
score_matrices = []
print("Performing %d runs of %d-fold cross-validation" % (num_run_cv, num_cv))
cv_start_time = time.time()
tune_run_time = 0
micro = micro_comp_df.values
metab = metab_comp_df.values
dirName = 'results/' + out + '/CV'
try:
os.mkdir(dirName)
except FileExistsError:
pass
for run in range(0,num_run_cv):
# Set up output directory for CV runs
dirName = 'results/' + out + '/CV/' + str(run)
try:
os.mkdir(dirName)
except FileExistsError:
pass
# Set up CV partitions
kfold = KFold(n_splits=num_cv, shuffle=True)
cv = 0
for train_index, test_index in kfold.split(samples):
# Set up output directory for CV partition run
dirName = 'results/' + out + '/CV/' + str(run) + '/' + str(cv)
try:
os.mkdir(dirName)
except FileExistsError:
pass
# Partition data into training and test sets
train_micro, test_micro = micro[train_index], micro[test_index]
train_metab, test_metab = metab[train_index], metab[test_index]
train_samples, test_samples = samples[train_index], samples[test_index]
# Store training and test set partitioning
train_microbe_df = pd.DataFrame(data=train_micro, index=train_samples, columns=micro_comp_df.columns)
test_microbe_df = pd.DataFrame(data=test_micro, index=test_samples, columns=micro_comp_df.columns)
train_metab_df = pd.DataFrame(data=train_metab, index=train_samples, columns=metab_comp_df.columns)
test_metab_df =
|
pd.DataFrame(data=test_metab, index=test_samples, columns=metab_comp_df.columns)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
def create_convergence_histories(
problems, results, stopping_criterion, x_precision, y_precision
):
"""Create tidy DataFrame with all information needed for the benchmarking plots.
Args:
problems (dict): estimagic benchmarking problems dictionary. Keys are the
problem names. Values contain information on the problem, including the
solution value.
results (dict): estimagic benchmarking results dictionary. Keys are
tuples of the form (problem, algorithm), values are dictionaries of the
collected information on the benchmark run, including 'criterion_history'
and 'time_history'.
stopping_criterion (str): one of "x_and_y", "x_or_y", "x", "y". Determines
how convergence is determined from the two precisions.
x_precision (float or None): how close an algorithm must have gotten to the
true parameter values (as percent of the Euclidean distance between start
and solution parameters) before the criterion for clipping and convergence
is fulfilled.
y_precision (float or None): how close an algorithm must have gotten to the
true criterion values (as percent of the distance between start
and solution criterion value) before the criterion for clipping and
convergence is fulfilled.
Returns:
pandas.DataFrame: tidy DataFrame with the following columns:
- problem
- algorithm
- n_evaluations
- walltime
- criterion
- criterion_normalized
- monotone_criterion
- monotone_criterion_normalized
- parameter_distance
- parameter_distance_normalized
- monotone_parameter_distance
- monotone_parameter_distance_normalized
"""
# get solution values for each problem
f_opt = pd.Series(
{name: prob["solution"]["value"] for name, prob in problems.items()}
)
x_opt = {
name: prob["solution"]["params"]["value"] for name, prob in problems.items()
}
# build df from results
time_sr = _get_history_as_stacked_sr_from_results(results, "time_history")
time_sr.name = "walltime"
criterion_sr = _get_history_as_stacked_sr_from_results(results, "criterion_history")
x_dist_sr = _get_history_of_the_parameter_distance(results, x_opt)
df = pd.concat([time_sr, criterion_sr, x_dist_sr], axis=1)
df.index = df.index.rename({"evaluation": "n_evaluations"})
df = df.sort_index().reset_index()
first_evaluations = df.query("n_evaluations == 0").groupby("problem")
f_0 = first_evaluations["criterion"].mean()
x_0_dist = first_evaluations["parameter_distance"].mean()
x_opt_dist = {name: 0 for name in problems}
# normalizations
df["criterion_normalized"] = _normalize(
df=df, col="criterion", start_values=f_0, target_values=f_opt
)
df["parameter_distance_normalized"] = _normalize(
df=df,
col="parameter_distance",
start_values=x_0_dist,
target_values=x_opt_dist,
)
# create monotone versions of columns
df["monotone_criterion"] = _make_history_monotone(df, "criterion")
df["monotone_parameter_distance"] = _make_history_monotone(df, "parameter_distance")
df["monotone_criterion_normalized"] = _make_history_monotone(
df, "criterion_normalized"
)
df["monotone_parameter_distance_normalized"] = _make_history_monotone(
df, "parameter_distance_normalized"
)
if stopping_criterion is not None:
df, converged_info = _clip_histories(
df=df,
stopping_criterion=stopping_criterion,
x_precision=x_precision,
y_precision=y_precision,
)
else:
converged_info = None
return df, converged_info
def _get_history_as_stacked_sr_from_results(results, key):
"""Get history as stacked Series from results.
Args:
results (dict): estimagic benchmarking results dictionary.
key (str): name of the history for which to build the Series, e.g.
criterion_history.
Returns:
pandas.Series: index levels are 'problem', 'algorithm' and 'evaluation'.
the name is the key with '_history' stripped off.
"""
histories = {tup: res[key] for tup, res in results.items()}
sr =
|
pd.concat(histories)
|
pandas.concat
|
from os.path import join, exists, dirname, basename
from os import makedirs
import sys
import pandas as pd
from glob import glob
import seaborn as sns
import numpy as np
from scipy import stats
import xlsxwriter
import matplotlib.pyplot as plt
from scripts.parse_samplesheet import get_min_coverage, get_role, add_aliassamples, get_species
from scripts.snupy import check_snupy_status
import json
import datetime
import getpass
import socket
import requests
from requests.auth import HTTPBasicAuth
import urllib3
import yaml
import pickle
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
plt.switch_backend('Agg')
RESULT_NOT_PRESENT = -5
def report_undertermined_filesizes(fp_filesizes, fp_output, fp_error,
zscorethreshold=1):
# read all data
fps_sizes = glob(join(dirname(fp_filesizes), '*.txt'))
pds_sizes = []
for fp_size in fps_sizes:
data = pd.read_csv(
fp_size, sep="\t", names=["filesize", "filename", "status"],
index_col=1)
# mark given read as isme=True while all other data in the dir
# are isme=False
data['isme'] = fp_filesizes in fp_size
data['filesize'] /= 1024**3
pds_sizes.append(data)
pd_sizes = pd.concat(pds_sizes)
# compute z-score against non-bad known runs
pd_sizes['z-score'] = np.nan
idx_nonbad = pd_sizes[pd_sizes['status'] != 'bad'].index
pd_sizes.loc[idx_nonbad, 'z-score'] = stats.zscore(
pd_sizes.loc[idx_nonbad, 'filesize'])
# plot figure
fig = plt.figure()
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] != 'bad')]['filesize'],
kde=False, rug=False, color="black", label='known runs')
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] == 'bad')]['filesize'],
kde=False, rug=False, color="red", label='bad runs')
ax = sns.distplot(
pd_sizes[pd_sizes['isme'] == np.True_]['filesize'],
kde=False, rug=True, color="green", label='this run')
_ = ax.set_ylabel('number of files')
_ = ax.set_xlabel('file-size in GB')
ax.set_title('run %s' % basename(fp_filesizes)[:-4])
ax.legend()
# raise error if current run contains surprisingly large undetermined
# filesize
if pd_sizes[(pd_sizes['isme'] == np.True_) &
(pd_sizes['status'] == 'unknown')]['z-score'].max() > zscorethreshold:
ax.set_title('ERROR: %s' % ax.get_title())
fig.savefig(fp_error, bbox_inches='tight')
raise ValueError(
("Compared to known historic runs, your run contains surprisingly "
"(z-score > %f) large file(s) of undetermined reads. You will find"
" an supporting image at '%s'. Please do the following things:\n"
"1. discuss with lab personal about the quality of the run.\n"
"2. should you decide to keep going with this run, mark file "
"status (3rd column) in file '%s' as 'good'.\n"
"3. for future automatic considerations, mark file status (3rd "
"column) as 'bad' if you have decided to abort processing due to"
" too low quality (z-score kind of averages about known values)."
) % (zscorethreshold, fp_error, fp_filesizes))
else:
fig.savefig(fp_output, bbox_inches='tight')
def report_exome_coverage(
fps_sample, fp_plot,
min_coverage=30, min_targets=80, coverage_cutoff=200):
"""Creates an exome coverage plot for multiple samples.
Parameters
----------
fps_sample : [str]
A list of file-paths with coverage data in csv format.
fp_plot : str
Filepath of output graph.
min_coverage : int
Default: 30.
An arbitraty threshold of minimal coverage that we expect.
A vertical dashed line is drawn at this value.
min_targets : float
Default: 80.
An arbitraty threshold of minimal targets that we expect to be covered.
A horizontal dashed line is drawn at this value.
coverage_cutoff : float
Default: 200.
Rightmost coverage cut-off value where X-axis is limited.
Raises
------
ValueError : If one of the sample's coverage falls below expected
thresholds.
"""
# Usually we aim for a 30X coverage on 80% of the sites.
fig, ax = plt.subplots()
ax.axhline(y=min_targets, xmin=0, xmax=coverage_cutoff, color='gray',
linestyle='--')
ax.axvline(x=min_coverage, ymin=0, ymax=100, color='gray', linestyle='--')
samples_below_coverage_threshold = []
for fp_sample in fps_sample:
coverage = pd.read_csv(fp_sample, sep="\t")
samplename = fp_sample.split('/')[-1].split('.')[0]
linewidth = 1
if coverage[coverage['#coverage'] == min_coverage]['percent_cumulative'].min() < min_targets:
linewidth = 4
samples_below_coverage_threshold.append(samplename)
ax.plot(coverage['#coverage'],
coverage['percent_cumulative'],
label=samplename,
linewidth=linewidth)
ax.set_xlim((0, coverage_cutoff))
ax.set_xlabel('Read Coverage')
ax.set_ylabel('Targeted Exome Bases')
ax.legend()
if len(samples_below_coverage_threshold) > 0:
fp_plot = fp_plot.replace('.pdf', '.error.pdf')
fig.savefig(fp_plot, bbox_inches='tight')
if len(samples_below_coverage_threshold) > 0:
raise ValueError(
"The following %i sample(s) have coverage below expected "
"thresholds. Please discuss with project PIs on how to proceed. "
"Maybe, samples need to be re-sequenced.\n\t%s\nYou will find more"
" information in the generated coverage plot '%s'." % (
len(samples_below_coverage_threshold),
'\n\t'.join(samples_below_coverage_threshold),
fp_plot))
ACTION_PROGRAMS = [
{'action': 'background',
'program': 'GATK',
'fileending_snupy_extract': '.snp_indel.gatk',
'fileending_spike_calls': '.gatk.snp_indel.vcf',
'stepname_spike_calls': 'gatk_CombineVariants',
},
{'action': 'background',
'program': 'Platypus',
'fileending_snupy_extract': '.indel.ptp',
'fileending_spike_calls': '.ptp.annotated.filtered.indels.vcf',
'stepname_spike_calls': 'platypus_filtered',
},
{'action': 'tumornormal',
'program': 'Varscan',
'fileending_snupy_extract': '.somatic.varscan',
'fileending_spike_calls':
{'homo sapiens': '.snp.somatic_germline.vcf',
'mus musculus': '.indel_snp.vcf'},
'stepname_spike_calls': 'merge_somatic',
},
{'action': 'tumornormal',
'program': 'Mutect',
'fileending_snupy_extract': '.somatic.mutect',
'fileending_spike_calls': '.all_calls.vcf',
'stepname_spike_calls': 'mutect',
},
{'action': 'tumornormal',
'program': 'Excavator2',
'fileending_snupy_extract': '.somatic.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_somatic',
},
{'action': 'trio',
'program': 'Varscan\ndenovo',
'fileending_snupy_extract': '.denovo.varscan',
'fileending_spike_calls': '.var2denovo.vcf',
'stepname_spike_calls': 'writing_headers',
},
{'action': 'trio',
'program': 'Excavator2',
'fileending_snupy_extract': '.trio.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_trio',
},
]
def _get_statusdata_demultiplex(samplesheets, prefix, config):
demux_yields = []
for flowcell in samplesheets['run'].unique():
fp_yielddata = '%s%s%s/Data/%s.yield_data.csv' % (prefix, config['dirs']['intermediate'], config['stepnames']['yield_report'], flowcell)
if exists(fp_yielddata):
demux_yields.append(
pd.read_csv(fp_yielddata, sep="\t").rename(columns={'Project': 'Sample_Project', 'Sample': 'Sample_ID', 'Yield': 'yield'})) #.set_index(['Project', 'Lane', 'Sample', 'Barcode sequence'])
if len(demux_yields) <= 0:
return pd.DataFrame()
demux_yields = add_aliassamples(pd.concat(demux_yields, axis=0), config)
# map yields of original sampels to aliases
for idx, row in demux_yields[demux_yields['is_alias'] == True].iterrows():
orig = demux_yields[(demux_yields['Sample_Project'] == row['fastq-prefix'].split('/')[0]) & (demux_yields['Sample_ID'] == row['fastq-prefix'].split('/')[1])]['yield']
if orig.shape[0] > 0:
demux_yields.loc[idx, 'yield'] = orig.sum()
demux_yields = demux_yields.dropna(subset=['yield'])
return pd.DataFrame(demux_yields).groupby(['Sample_Project', 'Sample_ID'])['yield'].sum()
def _get_statusdata_coverage(samplesheets, prefix, config, min_targets=80):
coverages = []
for (sample_project, sample_id), meta in samplesheets.groupby(['Sample_Project', 'Sample_ID']):
role_sample_project, role_sample_id = sample_project, sample_id
if (meta['is_alias'] == True).any():
role_sample_project, role_sample_id = get_role(sample_project, meta['spike_entity_id'].unique()[0], meta['spike_entity_role'].unique()[0], samplesheets).split('/')
fp_coverage = join(prefix, config['dirs']['intermediate'], config['stepnames']['exome_coverage'], role_sample_project, '%s.exome_coverage.csv' % role_sample_id)
if exists(fp_coverage):
coverage = pd.read_csv(fp_coverage, sep="\t")
if coverage.shape[0] > 0:
coverages.append({
'Sample_Project': sample_project,
'Sample_ID': sample_id,
'coverage': coverage.loc[coverage['percent_cumulative'].apply(lambda x: abs(x-min_targets)).idxmin(), '#coverage']})
if len(coverages) <= 0:
return pd.DataFrame()
return pd.DataFrame(coverages).set_index(['Sample_Project', 'Sample_ID'])['coverage']
def _isKnownDuo(sample_project, spike_entity_id, config):
"""Checks if trio is a known duo, i.e. missing samples won't be available in the future.
Parameters
----------
sample_project : str
spike_entity_id : str
config : dict()
Snakemake configuration.
Returns
-------
Boolean: True, if spike_entity_id is in config list of known duos for given project.
False, otherwise.
"""
if 'projects' in config:
if sample_project in config['projects']:
if 'known_duos' in config['projects'][sample_project]:
if spike_entity_id in config['projects'][sample_project]['known_duos']:
return True
return False
def _get_statusdata_snupyextracted(samplesheets, prefix, snupy_instance, config):
results = []
for sample_project, meta in samplesheets.groupby('Sample_Project'):
# project in config file is not properly configure for snupy!
if config['projects'].get(sample_project, None) is None:
continue
if config['projects'][sample_project].get('snupy', None) is None:
continue
if config['projects'][sample_project]['snupy'][snupy_instance].get('project_id', None) is None:
continue
r = requests.get('%s/experiments/%s.json' % (config['credentials']['snupy'][snupy_instance]['host'], config['projects'][sample_project]['snupy'][snupy_instance]['project_id']),
auth=HTTPBasicAuth(config['credentials']['snupy'][snupy_instance]['username'], config['credentials']['snupy'][snupy_instance]['password']),
verify=False)
check_snupy_status(r)
samples = [sample['name'] for sample in r.json()['samples']]
for sample_id, meta_sample in meta.groupby('Sample_ID'):
for file_ending, action, program in [(ap['fileending_snupy_extract'], ap['action'], ap['program']) for ap in ACTION_PROGRAMS]:
# in some cases "sample name" hold spike_entity_id, in others Sample_ID
entity = sample_id
runs = '+'.join(sorted(meta_sample['run'].unique()))
if (action == 'trio'):
if meta_sample['spike_entity_role'].unique()[0] == 'patient':
entity = meta_sample['spike_entity_id'].iloc[0]
runs = '+'.join(sorted(samplesheets[samplesheets['spike_entity_id'] == meta_sample['spike_entity_id'].iloc[0]]['run'].unique()))
if (action == 'tumornormal'):
if meta_sample['spike_entity_role'].unique()[0] == 'tumor':
entity = meta_sample['spike_entity_id'].iloc[0]
runs = '+'.join(sorted(samplesheets[samplesheets['spike_entity_id'] == meta_sample['spike_entity_id'].iloc[0]]['run'].unique()))
name = '%s_%s/%s%s' % (runs, sample_project, entity, file_ending)
if (sample_project in config['projects']) and (pd.notnull(meta_sample['spike_entity_role'].iloc[0])):
if ((action == 'trio') and (meta_sample['spike_entity_role'].iloc[0] in ['patient', 'sibling']) and (not _isKnownDuo(sample_project, meta_sample['spike_entity_id'].iloc[0], config))) or\
((action == 'background')) or\
((action == 'tumornormal') and (meta_sample['spike_entity_role'].iloc[0].startswith('tumor'))):
results.append({
'Sample_Project': sample_project,
'Sample_ID': sample_id,
'action': action,
'program': program,
'status': name in samples,
'snupy_sample_name': name
})
if len(results) <= 0:
return pd.DataFrame()
return pd.DataFrame(results).set_index(['Sample_Project', 'Sample_ID', 'action', 'program'])
def _get_statusdata_numberpassingcalls(samplesheets, prefix, config, RESULT_NOT_PRESENT, verbose=sys.stderr):
results = []
# leave out samples aliases
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] != True].fillna('not defined').groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
def _get_fileending(file_ending, fastq_prefix, samplesheets, config):
if isinstance(file_ending, dict):
return file_ending[get_species(fastq_prefix, samplesheets, config)]
else:
return file_ending
for ap in ACTION_PROGRAMS:
fp_vcf = None
if (ap['action'] == 'background') and pd.notnull(spike_entity_role):
if (ap['program'] == 'GATK'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Platypus'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['action'] == 'tumornormal'):
for (alias_sample_project, alias_spike_entity_role, alias_sample_id), alias_meta in samplesheets[(samplesheets['fastq-prefix'] == fastq_prefix) & (samplesheets['spike_entity_role'].apply(lambda x: x.split('_')[0] if pd.notnull(x) else x).isin(['tumor']))].groupby(['Sample_Project', 'spike_entity_role', 'Sample_ID']):
# for Keimbahn, the tumor sample needs to include the name of the original sample ID
instance_id = '%s/%s' % (alias_sample_project, alias_sample_id)
if alias_spike_entity_role == 'tumor':
# for Maus_Hauer, the filename holds the entity name, but not the Sample ID
instance_id = '%s/%s' % (sample_project, spike_entity_id)
if (alias_spike_entity_role.split('_')[0] in set(['tumor'])):
if (ap['program'] == 'Varscan'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Mutect'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Excavator2'):
fp_vcf = '%s%s%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['action'] == 'trio'):
for (alias_sample_project, alias_spike_entity_role, alias_sample_id, alias_spike_entity_id), alias_meta in samplesheets[(samplesheets['fastq-prefix'] == fastq_prefix) & (samplesheets['spike_entity_role'].isin(['patient', 'sibling']))].groupby(['Sample_Project', 'spike_entity_role', 'Sample_ID', 'spike_entity_id']):
# Trios are a more complicated case, since by default the result name is given by the
# spike_entity_id, but if computed for siblings, the name is given by the fastq-prefix
if (ap['program'] == 'Varscan\ndenovo'):
if (alias_spike_entity_role in set(['patient'])):
fp_vcf = '%s%s%s/%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], alias_sample_project, alias_spike_entity_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (alias_spike_entity_role in set(['sibling'])):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Excavator2'):
if (alias_spike_entity_role in set(['patient'])):
fp_vcf = '%s%s%s/%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], alias_sample_project, alias_spike_entity_id, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (alias_spike_entity_role in set(['sibling'])):
fp_vcf = '%s%s%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
# remove entry, if it is known (config.yaml) that this trio is incomplete
if (spike_entity_role == 'patient') and (spike_entity_id in config.get('projects', []).get(sample_project, []).get('known_duos', [])):
fp_vcf = None
results.append({
'Sample_Project': sample_project,
'Sample_ID': fastq_prefix.split('/')[-1],
'action': ap['action'],
'program': ap['program'],
'fp_calls': fp_vcf,
})
status = 0
num_status = 20
if verbose is not None:
print('of %i: ' % num_status, file=verbose, end="")
for i, res in enumerate(results):
if (verbose is not None) and int(i % (len(results) / num_status)) == 0:
status+=1
print('%i ' % status, file=verbose, end="")
nr_calls = RESULT_NOT_PRESENT
if (res['fp_calls'] is not None) and exists(res['fp_calls']):
try:
if res['program'] == 'Varscan':
nr_calls = pd.read_csv(res['fp_calls'], comment='#', sep="\t", dtype=str, header=None, usecols=[7], squeeze=True).apply(lambda x: ';SS=2;' in x).sum()
else:
nr_calls = pd.read_csv(res['fp_calls'], comment='#', sep="\t", dtype=str, header=None, usecols=[6], squeeze=True).value_counts()['PASS']
except pd.io.common.EmptyDataError:
nr_calls = 0
res['number_calls'] = nr_calls
if verbose is not None:
print('done.', file=verbose)
if len(results) <= 0:
return pd.DataFrame()
results = pd.DataFrame(results)
results = results[pd.notnull(results['fp_calls'])].set_index(['Sample_Project', 'Sample_ID', 'action', 'program'])['number_calls']
# add alias sample results
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] == True].groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
for (_, _, action, program), row in results.loc[fastq_prefix.split('/')[0], fastq_prefix.split('/')[-1], :].iteritems():
results.loc[sample_project, meta['Sample_ID'].unique()[0], action, program] = row
# remove samples, that don't have their own role, but were used for aliases
for (sample_project, sample_id), _ in samplesheets[pd.isnull(samplesheets['spike_entity_role'])].groupby(['Sample_Project', 'Sample_ID']):
idx_to_drop = results.loc[sample_project, sample_id, ['tumornormal', 'trio'], :].index
if len(idx_to_drop) > 0:
results.drop(index=idx_to_drop, inplace=True)
return results
def _get_genepanel_data(samplesheets, prefix, config):
results = []
columns = ['Sample_Project', 'Sample_ID', 'genepanel', 'gene']
# leave out samples aliases
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] != True].fillna('not defined').groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
#print(sample_project, spike_entity_id, spike_entity_role, fastq_prefix)
for file in glob('%s%s%s/*/%s.tsv' % (prefix, config['dirs']['intermediate'], config['stepnames']['genepanel_coverage'], fastq_prefix)):
#print("\t", file)
coverage = pd.read_csv(file, sep="\t")
parts = file.split('/')
# determine genepanel name, project and sample_id from filename
coverage['Sample_Project'] = sample_project
coverage['Sample_ID'] = meta['Sample_ID'].unique()[0]
coverage['genepanel'] = parts[-3][:-5]
coverage = coverage.set_index(columns)
results.append(coverage)
if len(results) > 0:
results = pd.concat(results).sort_values(by=columns)
else:
results = pd.DataFrame(columns=columns)
# add alias sample results
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] == True].groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
for (_, _, action, program), row in results.loc[fastq_prefix.split('/')[0], fastq_prefix.split('/')[-1], :].iterrows():
results.loc[sample_project, meta['Sample_ID'].unique()[0], action, program] = row
return results
def get_status_data(samplesheets, config, snupy_instance, prefix=None, verbose=sys.stderr):
"""
Parameters
----------
samplesheets : pd.DataFrame
The global samplesheets.
config : dict()
Snakemake configuration object.
prefix : str
Default: None, i.e. config['dirs']['prefix'] is used.
Filepath to spike main directory.
verbose : StringIO
Default: sys.stderr
If not None: print verbose information.
Returns
-------
4-tuple: (data_yields, data_coverage, data_snupy, data_calls)
"""
global RESULT_NOT_PRESENT
NUMSTEPS = 6
if prefix is None:
prefix = config['dirs']['prefix']
if verbose is not None:
print("Creating report", file=verbose)
# obtain data
if verbose is not None:
print("1/%i) gathering demuliplexing yields: ..." % NUMSTEPS, file=verbose, end="")
data_yields = _get_statusdata_demultiplex(samplesheets, prefix, config)
if verbose is not None:
print(" done.\n2/%i) gathering coverage: ..." % NUMSTEPS, file=verbose, end="")
data_coverage = _get_statusdata_coverage(samplesheets, prefix, config)
if verbose is not None:
print(" done.\n3/%i) gathering snupy extraction status: ..." % NUMSTEPS, file=verbose, end="")
data_snupy = _get_statusdata_snupyextracted(samplesheets, prefix, snupy_instance, config)
if verbose is not None:
print(" done.\n4/%i) gathering number of PASSing calls: ..." % NUMSTEPS, file=verbose, end="")
data_calls = _get_statusdata_numberpassingcalls(samplesheets, prefix, config, RESULT_NOT_PRESENT, verbose=verbose)
if verbose is not None:
print(" done.\n5/%i) gathering gene coverage: ..." % NUMSTEPS, file=verbose, end="")
data_genepanels = _get_genepanel_data(samplesheets, prefix, config)
if verbose is not None:
print("done.\n6/%i) generating Excel output: ..." % NUMSTEPS, file=verbose, end="")
return (data_yields, data_coverage, data_snupy, data_calls, data_genepanels)
def write_status_update(data, filename, samplesheets, config, offset_rows=0, offset_cols=0, min_yield=5.0, verbose=sys.stderr):
"""
Parameters
----------
data : (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame)
yields, coverage, snupy, calls. Result of function get_status_data.
filename : str
Filepath to output Excel file.
samplesheets : pd.DataFrame
The global samplesheets.
config : dict()
Snakemake configuration object.
offset_rows : int
Default: 0
Number if rows to leave blank on top.
offset_cols : int
Default: 0
Number if columns to leave blank on the left.
min_yield : float
Default: 5.0
Threshold when to color yield falling below this value in red.
Note: I don't know what a good default looks like :-/
verbose : StringIO
Default: sys.stderr
If not None: print verbose information.
"""
global RESULT_NOT_PRESENT
# for debugging purposes
pickle.dump(data, open('%s.datadump' % filename, 'wb'))
data_yields, data_coverage, data_snupy, data_calls, data_genepanels = data
# start creating the Excel result
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
format_good = workbook.add_format({'bg_color': '#ccffcc'})
format_bad = workbook.add_format({'bg_color': '#ffcccc'})
# date information
format_info = workbook.add_format({
'valign': 'vcenter',
'align': 'center',
'font_size': 9})
info_username = getpass.getuser()
info_now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
info_machine = socket.gethostname()
worksheet.merge_range(offset_rows, offset_cols, offset_rows+1, offset_cols+3, ('status report created\nat %s\nby %s\non %s' % (info_now, info_username, info_machine)),format_info)
gene_order = []
if data_genepanels.shape[0] > 0:
for panel in sorted(data_genepanels.index.get_level_values('genepanel').unique()):
for gene in sorted(data_genepanels.loc(axis=0)[:, :, panel, :].index.get_level_values('gene').unique()):
gene_order.append((panel, gene))
# header action
format_action = workbook.add_format({
'valign': 'vcenter',
'align': 'center',
'bold': True})
aps = pd.Series([ap['action'] for ap in ACTION_PROGRAMS]).to_frame()
for caption, g in aps.groupby(0):
left = offset_cols+6+g.index[0]
right = offset_cols+6+g.index[-1]
if left == right:
worksheet.write(offset_rows, left, caption, format_action)
else:
worksheet.merge_range(offset_rows, left, offset_rows, right, caption, format_action)
# header
format_header = workbook.add_format({
'rotation': 90,
'bold': True,
'valign': 'vcenter',
'align': 'center'})
worksheet.set_row(offset_rows+1, 80)
for i, caption in enumerate(['yield (MB)', 'coverage'] + [ap['program'] for ap in ACTION_PROGRAMS]):
worksheet.write(offset_rows+1, offset_cols+4+i, caption, format_header)
format_spike_seqdate = workbook.add_format({
'align': 'center',
'valign': 'vcenter',
'font_size': 8})
worksheet.write(offset_rows+1, offset_cols+6+len(ACTION_PROGRAMS), 'sequenced at', format_spike_seqdate)
# header for gene panels
format_header_genes = workbook.add_format({
'rotation': 90,
'bold': False,
'valign': 'vcenter',
'align': 'center',
'font_size': 8})
if len(gene_order) > 0:
for caption, g in pd.DataFrame(gene_order).groupby(0):
left = offset_cols+6+len(ACTION_PROGRAMS)+1+g.index[0]
right = offset_cols+6+len(ACTION_PROGRAMS)+1+g.index[-1]
if left == right:
worksheet.write(offset_rows, left, caption, format_action)
else:
worksheet.merge_range(offset_rows, left, offset_rows, right, caption, format_action)
for i, (panel, gene) in enumerate(gene_order):
worksheet.write(offset_rows+1, offset_cols+6+len(ACTION_PROGRAMS)+1+i, gene, format_header_genes)
worksheet.set_column(offset_cols+6+len(ACTION_PROGRAMS)+1, offset_cols+6+len(ACTION_PROGRAMS)+1+len(gene_order), 3)
worksheet.freeze_panes(offset_rows+2, offset_cols+4)
# body
format_project = workbook.add_format({
'rotation': 90,
'bold': True,
'valign': 'vcenter',
'align': 'center'})
format_spike_entity_id = workbook.add_format({
'valign': 'vcenter',
'align': 'center'})
format_spike_sampleID = workbook.add_format({
'valign': 'vcenter',
'align': 'center'})
format_spike_entity_role_missing = workbook.add_format({
'valign': 'vcenter',
'align': 'center',
'font_color': '#ff0000'})
format_gene_coverage_good = workbook.add_format({
'valign': 'vcenter',
'align': 'right',
'font_size': 6,
'bg_color': '#ccffcc'})
format_gene_coverage_bad = workbook.add_format({
'valign': 'vcenter',
'align': 'right',
'font_size': 6,
'bg_color': 'ffcccc'})
row = offset_rows+2
for sample_project, grp_project in samplesheets.groupby('Sample_Project'):
# add in lines to indicate missing samples, e.g. for trios that are incomplete
missing_samples = []
for spike_entity_id, grp_spike_entity_group in grp_project.groupby('spike_entity_id'):
if len(set(grp_spike_entity_group['spike_entity_role'].unique()) & set(['patient', 'father', 'mother', 'sibling'])) > 0:
for role in ['patient', 'mother', 'father']:
if grp_spike_entity_group[grp_spike_entity_group['spike_entity_role'] == role].shape[0] <= 0:
missing_samples.append({
'spike_entity_id': spike_entity_id,
'Sample_ID': role,
'spike_entity_role': role,
'missing': True,
})
# combine samples from samplesheets AND those that are expected but missing
samples_and_missing = pd.concat([grp_project, pd.DataFrame(missing_samples)], sort=False).fillna(value={'spike_entity_id': ''})
worksheet.merge_range(row, offset_cols, row+len(samples_and_missing.groupby(['spike_entity_id', 'Sample_ID']))-1, offset_cols, sample_project.replace('_', '\n'), format_project)
worksheet.set_column(offset_cols, offset_cols, 4)
# groupby excludes NaNs, thus I have to hack: replace NaN by "" here and
# reset to np.nan within the loop
for spike_entity_group, grp_spike_entity_group in samples_and_missing.groupby('spike_entity_id'):
if spike_entity_group != "":
label = spike_entity_group
if _isKnownDuo(sample_project, spike_entity_group, config):
label += "\n(known duo)"
if len(grp_spike_entity_group.groupby('Sample_ID')) > 1:
worksheet.merge_range(row, offset_cols+1, row+len(grp_spike_entity_group.groupby('Sample_ID'))-1, offset_cols+1, label, format_spike_entity_id)
else:
worksheet.write(row, offset_cols+1, label, format_spike_entity_id)
else:
spike_entity_group = np.nan
worksheet.set_column(offset_cols+1, offset_cols+1, 10)
for nr_sample_id, (sample_id, grp_sample_id) in enumerate(grp_spike_entity_group.sort_values(by='spike_entity_role').groupby('Sample_ID')):
worksheet.set_column(offset_cols+2, offset_cols+2, 4)
role = grp_sample_id['spike_entity_role'].iloc[0]
is_missing = ('missing' in grp_sample_id.columns) and (grp_sample_id[grp_sample_id['missing'] == np.True_].shape[0] > 0)
# sample_ID, extend field if no spike_entity_group or spike_entity_role is given
col_start = offset_cols+2
col_end = offset_cols+2
if pd.isnull(spike_entity_group):
col_start -= 1
if pd.isnull(role):
col_end += 1
sample_id_value = sample_id
# if sample_id starts with name of the entity group, we are using "..." to make it visually more pleasing
if pd.notnull(spike_entity_group) and sample_id_value.startswith(spike_entity_group):
sample_id_value = '%s' % sample_id[len(spike_entity_group):]
frmt = format_spike_sampleID
if is_missing:
sample_id_value = '?'
if not _isKnownDuo(sample_project, spike_entity_group, config):
frmt = format_spike_entity_role_missing
if col_start != col_end:
worksheet.merge_range(row, col_start, row, col_end, sample_id_value, frmt)
else:
worksheet.write(row, col_start, sample_id_value, frmt)
# spike_entity_role
if pd.notnull(role):
worksheet.write(row, offset_cols+3, str(role), frmt)
if is_missing:
fmt = format_spike_entity_role_missing
if _isKnownDuo(sample_project, spike_entity_group, config):
fmt = format_spike_sampleID
worksheet.merge_range(row, offset_cols+4, row, offset_cols+3+2+len(ACTION_PROGRAMS)+1, "missing sample", fmt)
else:
# demultiplexing yield
frmt = format_bad
value_yield = "missing"
if (sample_project, sample_id) in data_yields.index:
value_yield = float('%.1f' % (int(data_yields.loc[sample_project, sample_id]) / (1000**3)))
if value_yield >= 5.0:
frmt = format_good
if grp_sample_id['Lane'].dropna().shape[0] <= 0:
value_yield = 'per sample fastq'
frmt = format_good
worksheet.write(row, offset_cols+4, value_yield, frmt)
worksheet.set_column(offset_cols+4, offset_cols+4, 4)
# coverage
if ((sample_project, sample_id) in data_coverage.index) and (pd.notnull(data_coverage.loc[sample_project, sample_id])):
frmt = format_bad
value_coverage = "missing"
if (sample_project, sample_id) in data_coverage.index:
value_coverage = data_coverage.loc[sample_project, sample_id]
if value_coverage >= get_min_coverage(sample_project, config):
frmt = format_good
worksheet.write(row, offset_cols+5, value_coverage, frmt)
worksheet.set_column(offset_cols+5, offset_cols+5, 4)
for i, (action, program) in enumerate([(ap['action'], ap['program']) for ap in ACTION_PROGRAMS]):
value_numcalls = ""
frmt = None
if (sample_project, sample_id, action, program) in data_calls:
value_numcalls = data_calls.loc[sample_project, sample_id, action, program]
if not isinstance(value_numcalls, np.int64):
value_numcalls = value_numcalls.iloc[0]
if (sample_project, sample_id, action, program) in data_snupy.index:
if data_snupy.loc[sample_project, sample_id, action, program]['status']:
frmt = format_good
else:
frmt = format_bad
if value_numcalls == RESULT_NOT_PRESENT:
value_numcalls = 'vcf missing'
frmt = format_bad
elif value_numcalls != "" and frmt is None:
frmt = format_bad
if frmt is not None:
worksheet.write(row, offset_cols+6+i, value_numcalls, frmt)
# sequencing date
worksheet.write(row, offset_cols+6+len(ACTION_PROGRAMS), ' / '.join(sorted(map(
lambda x: datetime.datetime.strptime('20%s' % x.split('_')[0], '%Y%m%d').strftime("%Y-%m-%d"), grp_sample_id['run'].unique()))), format_spike_seqdate)
worksheet.set_column(offset_cols+6+len(ACTION_PROGRAMS), offset_cols+6+len(ACTION_PROGRAMS), 16)
# gene panel coverage
if pd.notnull(role):
for gene_index, (panel, gene) in enumerate(gene_order):
if (sample_project, sample_id, panel, gene) in data_genepanels.index:
cov = data_genepanels.loc[sample_project, sample_id, panel, gene]
#cov_text = '%i | %.1f | %i' % (cov['mincov'], cov['avgcov_0'], cov['maxcov'])
cov_text = '%.1f' % cov['avgcov_0']
frmt = format_gene_coverage_bad
if cov['avgcov_0'] >= get_min_coverage(sample_project, config):
frmt = format_gene_coverage_good
worksheet.write(row, offset_cols+6+len(ACTION_PROGRAMS)+1+gene_index, cov_text, frmt)
row += 1
print("done.\n", file=verbose, end="")
workbook.close()
def _divide_non_zero(numerator, denumerator):
if denumerator <= 0:
return 0
return numerator / denumerator
def collect_yield_data(dir_flowcell, verbose=None):
"""Composes yield report for (potentially) split demulitplexing.
Notes
-----
People used different length barcodes in the same lane / flowcell.
This should in general be avoided, due to potential clashes of barcodes.
Therefore Illumina's bcl2fastq fails to process sample sheets formatted
like this. I overcome this issue by splitting the samplesheet and running
bcl2fast multiple times independently. However, this requires complicated
logic especially for stats for the undetermined reads.
Parameters
----------
dir_flowcell : str
Filepath to demultiplexing results in split fashion.
Returns
-------
3-tuple of pd.DataFrame : (Flowcell Summary, Lane Summary, Top Unknown Barcodes)
Formatting is required before data will resemble Illumina's original yield report.
"""
lane_summary = []
lane_meta = []
cluster_meta = []
unknown_barcodes = []
if verbose:
verbose.write('collect_yield_data(%s):\n' % dir_flowcell)
parts = glob(join(dir_flowcell, 'part_*'))
for num_part, dir_part in enumerate(parts):
if verbose:
verbose.write(' part %i of %i\n' % (num_part+1, len(parts)))
clusters = []
for fp_fastqsummary in glob(join(dir_part, 'Stats/FastqSummaryF*L*.txt')):
cluster = pd.read_csv(fp_fastqsummary, sep="\t")
perc_pf_clusters = pd.concat([cluster.groupby(['SampleNumber'])['NumberOfReadsPF'].sum(),
cluster.groupby(['SampleNumber'])['NumberOfReadsRaw'].sum()], axis=1)
perc_pf_clusters['Lane'] = fp_fastqsummary.split('/')[-1].split('.')[0].split('L')[-1]
clusters.append(perc_pf_clusters)
clusters = pd.concat(clusters).reset_index().set_index(['Lane', 'SampleNumber'])
cluster_meta.append(clusters)
meta_samples = pd.read_csv(join(dir_part, 'Stats/AdapterTrimming.txt'), sep="\t", usecols=[0,2,3,4])
meta_samples = meta_samples.iloc[:meta_samples[meta_samples['Lane'].apply(lambda x: x.startswith('Lane:'))].index.max()-2,:].drop_duplicates()
meta_samples.set_index(['Lane', 'Sample Id'], inplace=True)
fp_json = join(dir_part, 'Stats/Stats.json')
part_stats = json.load(open(fp_json, 'r'))
# sample numbers in FastqSummary files match S-idx numbers, which are only increased if sample is not seen before, independent on lane
sample_numbers = dict()
for res_conv in part_stats['ConversionResults']:
numq30bases = 0
sumQualityScore = 0
for res_demux in res_conv['DemuxResults']:
q30bases = sum([res_metrics['YieldQ30'] for res_metrics in res_demux['ReadMetrics']])
qualityScore = sum([res_metrics['QualityScoreSum'] for res_metrics in res_demux['ReadMetrics']])
if res_demux['SampleId'] not in sample_numbers:
sample_numbers[res_demux['SampleId']] = len(sample_numbers)+1
sample_number = sample_numbers[res_demux['SampleId']]
sample_result = {
'Lane': res_conv['LaneNumber'],
'Project': meta_samples.loc[str(res_conv['LaneNumber']), res_demux['SampleId']]['Project'],
'Sample': res_demux['SampleId'],
'PF Clusters': res_demux['NumberReads'],
'% of the lane': _divide_non_zero(res_demux['NumberReads'], res_conv['TotalClustersPF']),
'Yield': res_demux['Yield'],
'% PF Clusters': _divide_non_zero(clusters.loc[str(res_conv['LaneNumber']), sample_number]['NumberOfReadsPF'], clusters.loc[str(res_conv['LaneNumber']), sample_number]['NumberOfReadsRaw']),
'% >= Q30 bases': _divide_non_zero(q30bases, res_demux['Yield']),
'Q30 bases': q30bases,
'QualityScoreSum': qualityScore,
'Mean Quality Score': _divide_non_zero(sum([res_metrics['QualityScoreSum'] for res_metrics in res_demux['ReadMetrics']]), res_demux['Yield']),
'Sample_Number': sample_number,
# default values
'Barcode sequence': 'unknown',
'% Perfect barcode': 1,
'% One mismatch barcode': np.nan,
}
if 'IndexMetrics' in res_demux:
sample_result['Barcode sequence'] = res_demux['IndexMetrics'][0]['IndexSequence']
sample_result['Barcode length'] = len(res_demux['IndexMetrics'][0]['IndexSequence'])
sample_result['% Perfect barcode'] = _divide_non_zero(sum([res_idx['MismatchCounts']['0'] for res_idx in res_demux['IndexMetrics']]), res_demux['NumberReads'])
sample_result['% One mismatch barcode'] = _divide_non_zero(sum([res_idx['MismatchCounts']['1'] for res_idx in res_demux['IndexMetrics']]), res_demux['NumberReads'])
lane_summary.append(sample_result)
numq30bases += q30bases
sumQualityScore += qualityScore
if 'Undetermined' in res_conv:
numq30bases += sum([res_metrics['YieldQ30'] for res_metrics in res_conv['Undetermined']['ReadMetrics']])
sumQualityScore += sum([res_metrics['QualityScoreSum'] for res_metrics in res_conv['Undetermined']['ReadMetrics']])
lane_meta.append({
'Lane': res_conv['LaneNumber'],
"TotalClustersRaw" : res_conv['TotalClustersRaw'],
"TotalClustersPF" : res_conv['TotalClustersPF'],
"Yield": res_conv['Yield'],
"YieldQ30": numq30bases,
"QualityScoreSum": sumQualityScore,
"Flowcell": part_stats['Flowcell'],
})
for res_unknown in part_stats['UnknownBarcodes']:
for barcode in res_unknown['Barcodes'].keys():
res_barcode = {
'Lane': res_unknown['Lane'],
'Count': res_unknown['Barcodes'][barcode],
'Barcode length': 0,
'Barcode sequence': 'unknown'}
if 'Barcodes' in res_unknown:
res_barcode['Barcode sequence'] = barcode
res_barcode['Barcode length'] = len(barcode)
unknown_barcodes.append(res_barcode)
lane_meta = pd.DataFrame(lane_meta).drop_duplicates().set_index('Lane')
lane_meta['run'] = dir_flowcell.split('/')[-1]
lane_summary = pd.DataFrame(lane_summary)
cluster_meta = pd.concat(cluster_meta)
if len(set(lane_summary['Barcode sequence'].unique()) - set(['unknown'])) > 0:
undetermined = []
for lane, clst_lane in lane_summary.groupby('Lane'):
undetermined.append({
'Lane': lane,
'Project': 'default',
'Sample': 'Undetermined',
'Barcode sequence': 'unknown',
'PF Clusters': lane_meta.loc[lane, 'TotalClustersPF'] - clst_lane['PF Clusters'].sum(),
'% of the lane': _divide_non_zero(lane_meta.loc[lane, 'TotalClustersPF'] - clst_lane['PF Clusters'].sum(), lane_meta.loc[lane, 'TotalClustersPF']),
'% Perfect barcode': 1,
'% One mismatch barcode': np.nan,
'Yield': lane_meta.loc[lane, 'Yield'] - clst_lane['Yield'].sum(),
'% PF Clusters': _divide_non_zero(lane_meta.loc[lane, 'TotalClustersPF'] - cluster_meta.loc[str(lane), range(1, cluster_meta.index.get_level_values('SampleNumber').max()), :]['NumberOfReadsPF'].sum(), (lane_meta.loc[lane, 'TotalClustersRaw'] - cluster_meta.loc[str(lane), range(1, cluster_meta.index.get_level_values('SampleNumber').max()), :]['NumberOfReadsRaw'].sum())),
'% >= Q30 bases': _divide_non_zero(lane_meta.loc[lane, 'YieldQ30'] - lane_summary[lane_summary['Lane'] == lane]['Q30 bases'].sum(), lane_meta.loc[lane, 'Yield'] - lane_summary[lane_summary['Lane'] == lane]['Yield'].sum()),
'Mean Quality Score': _divide_non_zero(lane_meta.loc[lane, 'QualityScoreSum'] - lane_summary[lane_summary['Lane'] == lane]['QualityScoreSum'].sum(), lane_meta.loc[lane, 'Yield'] - lane_summary[lane_summary['Lane'] == lane]['Yield'].sum()),
})
lane_summary = pd.concat([lane_summary, pd.DataFrame(undetermined)], sort=False)
# traverse unknown barcodes and filter those that are actually used by samples
# this is non-trivial, because due to splitting, used barcodes might have different sizes!
unknown_barcodes = pd.DataFrame(unknown_barcodes)
if len(set(unknown_barcodes['Barcode sequence'].unique()) - set(['unknown'])) > 0:
idx_remove = []
for lane in unknown_barcodes['Lane'].astype(int).unique():
lane_known_bcs = lane_summary[(lane_summary['Lane'] == lane) & (lane_summary['Barcode sequence'] != 'unknown')][['Barcode sequence', 'Barcode length']]
lane_unknown_bcs = unknown_barcodes[unknown_barcodes['Lane'] == lane]
remove = set()
for len_known, known in lane_known_bcs.groupby('Barcode length'):
for len_unknown, unknown in lane_unknown_bcs.groupby('Barcode length'):
if len_known == len_unknown:
remove |= set(unknown['Barcode sequence']) & set(known['Barcode sequence'])
elif len_known < len_unknown:
for _, bc_unknown in unknown['Barcode sequence'].iteritems():
if bc_unknown[:int(len_known)] in set(known['Barcode sequence']):
remove |= set([bc_unknown])
elif len_known > len_unknown:
remove |= set(unknown['Barcode sequence']) & set(known['Barcode sequence'].apply(lambda x: x[:int(len_unknown)]))
idx_remove.extend(lane_unknown_bcs[lane_unknown_bcs['Barcode sequence'].isin(remove)].index)
top_unknown_barcodes = unknown_barcodes.loc[set(unknown_barcodes.index) - set(idx_remove),:]
else:
top_unknown_barcodes = unknown_barcodes
return lane_meta, lane_summary, top_unknown_barcodes
def create_html_yield_report(fp_yield_report, lane_meta, lane_summary, top_unknown_barcodes, config):
"""Creates a HTML yield report.
Parameters
----------
fp_yield_report : str
Filepath to resulting HTML file.
lane_meta : pd.DataFrame
Result of collect_yield_data, first component:
Flowcell summary statistics, basically # clusters.
lane_summary : pd.DataFrame
Result of collect_yield_data, second component:
Sample demultiplexing information.
top_unknown_barcodes : pd.DataFrame
Result of collect_yield_data, third component:
Infos about high abundant unused barcodes.
config : dict
Snakemakes configuration dictionary.
"""
#dir_flowcell : str
# Filepath to demultiplexing results in split fashion.
out = '<html>\n<head>\n'
#out += '<link rel="stylesheet" href="Report.css" type="text/css">\n'
out += '<style>\n'
out += 'body {font-size: 100%; font-family:monospace;}\n'
out += 'table#ReportTable {border-width: 1px 1px 1px 1px; border-collapse: collapse;}\n'
out += 'table#ReportTable td {text-align:right; padding: 0.3em;}\n'
out += 'thead { display: table-header-group }\n'
out += 'tfoot { display: table-row-group }\n'
out += 'tr { page-break-inside: avoid }\n'
out += '</style>\n'
out += '<title>%s</title>\n' % lane_meta['run'].unique()[0]
out += '</head>\n<body>'
out += "%s / [all projects] / [all samples] / [all barcodes]" % lane_meta['Flowcell'].unique()[0]
out += "<h2>Flowcell Summary</h2>"
fc_summary = lane_meta.sum()[['TotalClustersRaw', 'TotalClustersPF', 'Yield']].to_frame().T
fc_summary.rename(columns={'TotalClustersRaw': 'Clusters (Raw)',
'TotalClustersPF': 'Clusters(PF)',
'Yield': 'Yield (MBases)'}, inplace=True)
for col in ['Clusters (Raw)', 'Clusters(PF)']:
fc_summary[col] = fc_summary[col].apply(lambda x: '{:,}'.format(x))
fc_summary['Yield (MBases)'] = fc_summary['Yield (MBases)'].apply(lambda x: '{:,}'.format(int(x/1000000)))
out += fc_summary.to_html(index=False, table_id='ReportTable', justify='center')
out += "<h2>Lane Summary</h2>"
x = lane_summary.sort_values(['Lane', 'Sample_Number'])[['Lane', 'Project', 'Sample', 'Barcode sequence', 'PF Clusters', '% of the lane', '% Perfect barcode', '% One mismatch barcode', 'Yield', '% PF Clusters', '% >= Q30 bases', 'Mean Quality Score']].rename(columns={'Yield': 'Yield (Mbases)'})
x['PF Clusters'] = x['PF Clusters'].apply(lambda x: '{:,}'.format(x))
for col in ['% of the lane', '% Perfect barcode', '% PF Clusters', '% >= Q30 bases', '% One mismatch barcode']:
x[col] = x[col].apply(lambda x: '' if x == 0 else '%.2f' % (x*100))
x['Yield (Mbases)'] = x['Yield (Mbases)'].apply(lambda x: '{:,}'.format(int(x/1000000)))
x['Mean Quality Score'] = x['Mean Quality Score'].apply(lambda x: '' if x == 0 else '%.2f' % x)
x['% One mismatch barcode'] = x['% One mismatch barcode'].replace('nan', 'NaN')
out += x.to_html(index=False, table_id='ReportTable', justify='center')
out += "<h2>Top Unknown Barcodes</h2>"
topX = 10
lanes = []
for lane, lane_barcodes in top_unknown_barcodes.groupby('Lane'):
x = lane_barcodes.sort_values('Count', ascending=False).iloc[:topX][['Lane', 'Count', 'Barcode sequence']].rename(columns={'Barcode sequence': 'Sequence'})#.reset_index().set_index(['Lane', 'Count'])
x.index = range(1,topX+1)[:x.shape[0]]
x['Count'] = x['Count'].apply(lambda x: '{:,}'.format(x))
lanes.append(x)
topunknown = pd.concat(lanes, axis=1)
out += '<table border="1" class="dataframe" id="ReportTable">\n<thead>\n<tr style="text-align: center;">\n'
for col in topunknown.columns:
out += '<th>%s</th>\n' % col
out += '</tr>\n</thead>\n<tbody>\n'
for i, row in topunknown.iterrows():
out += '<tr>\n'
for col, value in row.iteritems():
if col == 'Lane':
if i == 1:
out += '<th rowspan=%s>%s</th>\n' % (min(topX, topunknown.shape[0]), value)
else:
out += '<td>%s</td>\n' % value
out += '</tr>\n'
out += '</tbody>\n</table>\n'
out += "Report generated by %s.</body>\n</html>" % config['name_program']
with open(fp_yield_report, 'w') as f:
f.write(out)
def _agilent_annotation_to_genenames(annotation, field):
"""Splits Agilent capture kit annotations into key-value pairs for different database sources.
Parameters
----------
annotation : str
Annotation line of Agilent coverage.bed file, column 4.
field : str
Name of database entry to be returned.
Returns
-------
str : Name of entry.
Notes
-----
Should a reference database provide multiple names, only the first is used!"""
gene_names = dict()
if ',' not in annotation:
return np.nan
for entry in annotation.split(','):
db_name, _id = entry.split('|')
if db_name not in gene_names:
gene_names[db_name] = _id
return gene_names.get(field, np.nan)
def get_gene_panel_coverage(fp_genepanel, fp_bamstat, fp_agilent_coverage, fp_output):
"""Looks up gene coverage for given panel in given sample, based on bamstat.
Parameters
----------
fp_genepanel : str
Filepath to yaml gene panel configuration file.
fp_bamstat : str
Filepath to bamstat output.
fp_agilent_coverage: str
Filepath to original Agilent coverage bed file, i.e. with gene names.
fp_output : str
Filepath for output filename.
"""
# load gene panel definition
if not exists(fp_genepanel):
raise ValueError("Gene panel file '%s' does not exist." % fp_genepanel)
panel = yaml.load(open(fp_genepanel, 'r'))
# read capture kit probe positions, including gene names
probes = pd.read_csv(fp_agilent_coverage, sep="\t", header=None, skiprows=2)
probes[3] = probes[3].apply(lambda x: _agilent_annotation_to_genenames(x, panel['reference_name']))
probes.columns = ['chromosome', 'start', 'end', 'gene']
# subset probes to those covering genes of the panel
probes_of_interest = probes[probes['gene'].isin(panel['genes'])]
# load coverage information
coverage =
|
pd.read_csv(fp_bamstat, sep="\s+", dtype=str)
|
pandas.read_csv
|
import os
import shutil
import filecmp
from unittest import TestCase
import pandas as pd
from pylearn.varselect import count_xvars, rank_xvars, extract_xvar_combos, remove_high_corvar
class TestVariableSelect(TestCase):
def setUp(self):
self.output = './tests/output'
if not os.path.exists(self.output):
os.makedirs(self.output)
def assert_file_same(self, filename):
expected = os.path.join('./tests/data/expected', filename)
actual = os.path.join('./tests/output', filename)
return filecmp.cmp(expected, actual)
def test_count_xvars(self):
vsel_xy_config = pd.read_csv('./tests/data/vsel_xy_config.csv')
count = count_xvars(vsel_xy_config)
self.assertEqual(count, 708)
def test_rank_xvars(self):
varselect =
|
pd.read_csv('./tests/data/rlearn/VARSELECT.csv')
|
pandas.read_csv
|
from js import document, Plotly,console
import logging
import pandas as pd
import numpy as np
import csv
import sys
logging.basicConfig(level=logging.INFO)
def load():
document.getElementById('load-python').classList.add('alert-success')
document.getElementById('load-python').innerHTML = 'Python Loaded'
def process_input():
"""
Get data and set the header - return array
"""
rows = []
file_data = document.getElementById("output").textContent
logging.info(file_data)
csv_reader = csv.reader(file_data.splitlines())
for row in csv_reader:
logging.info(row)
rows.append(row)
df = pd.DataFrame(data=rows)
set_header = df.iloc[0]
df = df[1:]
df.columns = set_header
logging.info(df)
return df
def process_scatter():
df = process_input()
col1 = df[df.columns[0]].values
col2 = df[df.columns[1]].values
Plotly.plot(document.getElementById('plot2'),
[{'x':col1, 'y': col2,
'type': 'scatter',
'mode': 'markers+lines',
'hoverinfo': 'label',
'label': 'Zoom Background Interest'
}])
def process_pie():
df = process_input()
df[df.columns[1]] =
|
pd.to_numeric(df[df.columns[1]])
|
pandas.to_numeric
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
|
tm.assert_almost_equal(result, expected)
|
pandas.util.testing.assert_almost_equal
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides functions to prepare the GradCam dataset.
"""
# Imports
import os
import json
import urllib
import shutil
import requests
import logging
import numpy as np
from torchvision import transforms
from torchvision import datasets
from collections import namedtuple
import pandas as pd
from pynet.datasets import Fetchers
# Global parameters
Item = namedtuple("Item", ["input_path", "output_path", "metadata_path",
"labels"])
URLS = [
"https://miro.medium.com/max/419/1*kc-k_j53HOJH_sifhg4lHg.jpeg",
"https://miro.medium.com/max/500/1*506ySAThs6pItevFqZF-4g.jpeg",
"https://miro.medium.com/max/500/1*XbnzdczNru6HsX6qPZaXLg.jpeg",
"https://miro.medium.com/max/384/1*oRpjlGC3sUy5yQJtpwclwg.jpeg",
"https://miro.medium.com/max/500/1*EQ3JBr2vGPuovYFyh6mQeQ.jpeg"
]
logger = logging.getLogger("pynet")
@Fetchers.register
def fetch_gradcam(datasetdir, inception=False):
""" Fetch/prepare the GradCam dataset for pynet.
Parameters
----------
datasetdir: str
the dataset destination folder.
inception: bool, default True
if set apply the inception transforms on the inputs.
Returns
-------
item: namedtuple
a named tuple containing 'input_path', 'output_path', and
'metadata_path'.
"""
logger.info("Loading gradcam dataset.")
if not os.path.isdir(datasetdir):
os.mkdir(datasetdir)
labels_url = (
"https://s3.amazonaws.com/deep-learning-models/image-models/"
"imagenet_class_index.json")
with urllib.request.urlopen(labels_url) as response:
labels = dict(
(key, val)
for key, val in json.loads(response.read().decode()).items())
desc_path = os.path.join(datasetdir, "pynet_gradcam.tsv")
input_path = os.path.join(datasetdir, "pynet_gradcam_inputs.npy")
incep_input_path = os.path.join(
datasetdir, "pynet_gradcam_incep_inputs.npy")
if not os.path.isfile(desc_path):
imagedir = os.path.join(datasetdir, "images")
if not os.path.isdir(imagedir):
os.mkdir(imagedir)
metadata = dict((key, []) for key in ("name", ))
for cnt, url in enumerate(URLS):
logger.debug("Processing {0}...".format(url))
ext = url.split(".")[-1]
name = "image{0}".format(cnt)
imagefile = os.path.join(imagedir, name + "." + ext)
metadata["name"].append(name)
if not os.path.isfile(imagefile):
response = requests.get(url, stream=True)
with open(imagefile, "wb") as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
else:
logger.debug(
"Image '{0}' already downloaded.".format(imagefile))
transform = transforms.Compose([
transforms.Resize((244, 244)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset = datasets.ImageFolder(root=datasetdir, transform=transform)
data = []
for item in dataset:
data.append(item[0].numpy())
data = np.asarray(data)
np.save(input_path, data)
transform = transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset = datasets.ImageFolder(root=datasetdir, transform=transform)
data = []
for item in dataset:
data.append(item[0].numpy())
data = np.asarray(data)
np.save(incep_input_path, data)
df =
|
pd.DataFrame.from_dict(metadata)
|
pandas.DataFrame.from_dict
|
import os
import json
import luigi
import pandas as pd
class MakeTapConfig(luigi.Task):
ticker = luigi.Parameter()
def requires(self):
return []
def output(self):
return luigi.LocalTarget('config/%s.json' % self.ticker)
def run(self):
with self.output().open('w') as f:
json.dump({'start_date': '2017-01-01', 'end_date': '2017-07-03',
'ticker': self.ticker}, f)
class SyncPrice(luigi.Task):
ticker = luigi.Parameter()
def requires(self):
return MakeTapConfig(ticker=self.ticker)
def output(self):
return luigi.LocalTarget('output/%s.csv' % self.ticker)
def run(self):
tap_cmd = 'tap-quandl-stock-price -c %s' % self.input().fn
target_cmd = 'target-csv -c csv_config.json -o %s' % self.output().fn
os.system('%s | %s' % (tap_cmd, target_cmd))
class QuandlSync(luigi.Task):
input_filename = luigi.Parameter()
output_filename = luigi.Parameter()
def requires(self):
task_list = []
with open(self.input_filename, 'r') as f:
task_list = [SyncPrice(ticker.strip()) for ticker in f.readlines()]
return task_list
def output(self):
return luigi.LocalTarget(self.output_filename)
def run(self):
input_filenames = [x.fn for x in self.input()]
df_list = [pd.read_csv(fn) for fn in input_filenames]
df =
|
pd.concat(df_list)
|
pandas.concat
|
#!/usr/bin/env python
# Copyright 2020 ARC Centre of Excellence for Climate Extremes
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import xarray as xr
import numpy as np
import pandas as pd
import datetime
TESTS_HOME = os.path.abspath(os.path.dirname(__file__))
TESTS_DATA = os.path.join(TESTS_HOME, "testdata")
# oisst data from 2003 to 2004 included for small region
oisst = os.path.join(TESTS_DATA, "oisst_2003_2004.nc")
# oisst data from 2003 to 2004 included for all land region
land = os.path.join(TESTS_DATA, "land.nc")
# threshold and seasonal avg calculated using Eric Olivier MHW code on two points of OISST region subset for same period 2003-2004
# point1 lat=-42.625, lon=148.125
# point2 lat=-41.625, lon=148.375
oisst_clim = os.path.join(TESTS_DATA,"test_clim_oisst.nc")
oisst_clim_nosmooth = os.path.join(TESTS_DATA,"test_clim_oisst_nosmooth.nc")
relthreshnorm = os.path.join(TESTS_DATA, "relthreshnorm.nc")
@pytest.fixture(scope="module")
def oisst_ts():
ds = xr.open_dataset(oisst)
return ds.sst
@pytest.fixture(scope="module")
def landgrid():
ds = xr.open_dataset(land)
return ds.sst
@pytest.fixture(scope="module")
def clim_oisst():
ds = xr.open_dataset(oisst_clim)
return ds
@pytest.fixture(scope="module")
def clim_oisst_nosmooth():
ds = xr.open_dataset(oisst_clim_nosmooth)
return ds
@pytest.fixture(scope="module")
def dsnorm():
ds = xr.open_dataset(relthreshnorm)
return ds.stack(cell=['lat','lon'])
@pytest.fixture
def oisst_doy():
a = np.arange(1,367)
b = np.delete(a,[59])
return np.concatenate((b,a))
@pytest.fixture
def tstack():
return np.array([ 16.99, 17.39, 16.99, 17.39, 17.3 , 17.39, 17.3 ])
@pytest.fixture
def filter_data():
a = [0,1,1,1,1,1,0,0,1,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,0,0]
time = pd.date_range('2001-01-01', periods=len(a))
array = pd.Series(a, index=time)
idxarr = pd.Series(data=np.arange(len(a)), index=time)
bthresh = array==1
st = pd.Series(index=time, dtype='float64').rename('start')
end = pd.Series(index=time, dtype='float64').rename('end')
events =
|
pd.Series(index=time, dtype='float64')
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
##########
Processing
##########
*Created on Thu Jun 1 14:15 2017 by <NAME>*
Processing results from the CellPainting Assay in the Jupyter notebook.
This module provides the DataSet class and its methods.
Additional functions in this module act on pandas DataFrames."""
import time
import glob
import os.path as op
from collections import Counter
import xml.etree.ElementTree as ET
import pickle
import pandas as pd
import numpy as np
from rdkit.Chem import AllChem as Chem
from rdkit import DataStructs
from IPython.core.display import HTML
from . import tools as cpt
from .config import ACT_PROF_PARAMETERS
from .config import LIMIT_SIMILARITY_L, LIMIT_CELL_COUNT_L, LIMIT_ACTIVITY_L
try:
from misc_tools import apl_tools
AP_TOOLS = True
#: Library version
VERSION = apl_tools.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} (commit: {})".format(__name__, VERSION))
except ImportError:
AP_TOOLS = False
print("{:45s} ({})".format(__name__, time.strftime("%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
try:
from . import resource_paths as cprp
except ImportError:
from . import resource_paths_templ as cprp
print("* Resource paths not found, stub loaded.")
print(" Automatic loading of resources will not work,")
print(" please have a look at resource_paths_templ.py")
FINAL_PARAMETERS = ['Metadata_Plate', 'Metadata_Well', 'plateColumn', 'plateRow',
"Compound_Id", 'Container_Id', "Well_Id", "Producer", "Pure_Flag", "Toxic",
"Rel_Cell_Count", "Known_Act", "Trivial_Name", 'WellType', 'Conc_uM',
"Activity", "Act_Profile", "Plate", "Smiles"]
DROP_FROM_NUMBERS = ['plateColumn', 'plateRow', 'Conc_uM', "Compound_Id"]
DROP_GLOBAL = ["PathName_CellOutlines", "URL_CellOutlines", 'FileName_CellOutlines',
'ImageNumber', 'Metadata_Site', 'Metadata_Site_1', 'Metadata_Site_2']
QUANT = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
DEBUG = False
def debug_print(txt, val):
if DEBUG:
txt = txt + ":"
print("DEBUG {:20s}".format(txt), val)
class DataSet():
def __init__(self, log=True):
self.data = pd.DataFrame()
self.fields = {"plateColumn": "Metadata_Plate",
"WellType": "WellType", "ControlWell": "Control", "CompoundWell": "Compound"}
self.log = log
def __getitem__(self, item):
res = self.data[item]
if isinstance(res, pd.DataFrame):
result = DataSet()
result.data = res
result.print_log("subset")
else:
result = res
return result
def __getattr__(self, name):
"""Try to call undefined methods on the underlying pandas DataFrame."""
def method(*args, **kwargs):
res = getattr(self.data, name)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
result = DataSet()
result.data = res
result.print_log(name)
else:
result = res
return result
return method
def show(self):
parameters = [k for k in FINAL_PARAMETERS if k in self.data]
print("Shape: ", self.shape)
print("Parameters:", parameters)
return HTML(self.data[parameters]._repr_html_())
def head(self, n=5):
parameters = [k for k in FINAL_PARAMETERS if k in self.data]
res = self.data[parameters].head(n)
result = DataSet()
result.data = res
result.print_log("head")
return result
def drop_cols(self, cols, inplace=False):
"""Drops the list of columns from the DataFrame.
Listed columns that are not present in the DataFrame are simply ignored
(no error is thrown)."""
if inplace:
drop_cols(self.data, cols, inplace=True)
self.print_log("drop cols (inplace)")
else:
result = DataSet()
result.data = drop_cols(self.data, cols, inplace=False)
result.print_log("drop cols")
return result
def keep_cols(self, cols, inplace=False):
if inplace:
self.data = self.data[cols]
self.print_log("keep cols (inplace)")
else:
result = DataSet()
result.data = self.data[cols]
result.print_log("keep cols")
return result
def print_log(self, component, add_info=""):
if self.log:
print_log(self.data, component, add_info)
def load(self, fn, sep="\t"):
"""Read one or multiple result files and concatenate them into one dataset.
`fn` is a single filename (string) or a list of filenames."""
self.data = load(fn, sep=sep).data
self.print_log("load data")
def write_csv(self, fn, parameters=None, sep="\t"):
result = self.data.copy()
if isinstance(parameters, list):
result = result[parameters]
result.to_csv(fn, sep=sep, index=False)
def write_pkl(self, fn):
self.data.to_pickle(fn)
def write_parameters(self, fn="parameters.txt"):
parameters = sorted(self.measurements)
with open("parameters.txt", "w") as f:
f.write('"')
f.write('",\n"'.join(parameters))
f.write('"')
print(len(parameters), "parameters written.")
def describe(self, times_mad=3.0):
df = numeric_parameters(self.data)
stats = pd.DataFrame()
stats["Min"] = df.min()
stats["Max"] = df.max()
stats["Median"] = df.median()
stats["MAD"] = df.mad()
stats["Outliers"] = df[(((df - df.median()).abs() - times_mad * df.mad()) > 0)].count()
print(self.shape)
return stats
def well_type_from_position(self):
"""Assign the WellType from the position on the plate.
Controls are in column 11 and 12"""
result = DataSet(log=self.log)
result.data = well_type_from_position(self.data)
result.print_log("well type from pos")
return result
def well_from_position(self, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Assign Metadata_Well from plateRow, plateColumn"""
result = DataSet(log=self.log)
result.data = well_from_position(self.data, well_name=well_name,
row_name=row_name, col_name=col_name)
result.print_log("well from pos")
return result
def position_from_well(self, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Generate plateRow and plateColumn from Metatadata_Well"""
result = DataSet(log=self.log)
result.data = position_from_well(self.data, well_name=well_name,
row_name=row_name, col_name=col_name)
result.print_log("pos from well")
return result
def join_layout_384(self, layout_fn, on="Address_384"):
result = DataSet(log=self.log)
result.data = join_layout_384(self.data, layout_fn, on=on)
result.print_log("join layout 384")
return result
def join_layout_1536(self, plate, quadrant, on="Address_384", how="inner"):
"""Cell Painting is always run in 384er plates.
COMAS standard screening plates are format 1536.
With this function, the 1536-to-384 reformatting file
with the smiles added by join_smiles_to_layout_1536()
can be used directly to join the layout to the individual 384er plates."""
result = DataSet(log=self.log)
result.data = join_layout_1536(self.data, plate, quadrant, on=on, how=how)
result.print_log("join layout 1536")
return result
def numeric_parameters(self):
result = DataSet()
result.data = numeric_parameters(self.data)
return result
def flag_toxic(self, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Flag data rows of toxic compounds"""
result = DataSet()
result.data = flag_toxic(self.data, cutoff=cutoff)
flagged = result.data["Toxic"].sum()
result.print_log("flag toxic", "{:3d} flagged".format(flagged))
return result
def remove_toxic(self, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Remove data rows of toxic compounds"""
result = DataSet()
toxic = DataSet()
result.data, toxic.data = remove_toxic(self.data, cutoff=cutoff)
result.print_log("remove toxic", "{:3d} removed".format(toxic.shape[0]))
return result, toxic
def remove_impure(self, strict=False, reset_index=True):
"""Remove entries with `Pure_Flag == "Fail"`"""
result = DataSet()
flagged = DataSet()
result.data, flagged.data = remove_impure(self.data)
result.print_log("remove impure", "{:3d} removed".format(flagged.shape[0]))
return result, flagged
def remove_outliers(self, times_dev=3.0, group_by=None, method="median"):
"""Returns the filtered dataframe as well as the outliers.
method can be `median`or `mean` """
result = DataSet()
outliers = DataSet()
result.data, outliers.data = remove_outliers(self.data, times_dev=times_dev,
group_by=group_by, method=method)
result.print_log("remove outliers", "{:3d} removed".format(outliers.shape[0]))
return result, outliers
def remove_skipped_echo_direct_transfer(self, fn):
"""Remove wells that were reported as skipped in the Echo protocol (xml).
This functions works with Echo direct transfer protocols.
Function supports using wildcards in the filename, the first file will be used.
Returns a new dataframe without the skipped wells."""
result = DataSet()
result.data, skipped = remove_skipped_echo_direct_transfer(self.data, fn=fn)
skipped_str = "(" + ", ".join(skipped) + ")"
result.print_log("remove skipped", "{:3d} skipped {}".format(self.shape[0] - result.shape[0],
skipped_str))
return result
def drop_dups(self, cpd_id="Compound_Id"):
"""Drop duplicate Compound_Ids"""
result = DataSet()
result.data = self.data.drop_duplicates(cpd_id)
result.print_log("drop dups")
return result
def group_on_well(self, group_by=FINAL_PARAMETERS):
"""Group results on well level."""
result = DataSet()
result.data = group_on_well(self.data, group_by=group_by)
result.print_log("group on well")
return result
def join_batch_data(self, df_data=None, how="left", fillna="n.d."):
"""Join data by Batch_Id."""
result = DataSet()
result.data = join_batch_data(self.data, df_data=df_data, how=how, fillna=fillna)
result.print_log("join batch data")
return result
def join_container_data(self, df_data=None, how="left", fillna=""):
"""Join data by Container_Id."""
result = DataSet()
result.data = join_container_data(self.data, df_data=df_data, how=how, fillna=fillna)
result.print_log("join cntnr data")
return result
def join_container(self, cont_data=None, how="inner"):
result = DataSet(log=self.log)
result.data = join_container(self.data, cont_data=cont_data, how=how)
result.print_log("join container")
return result
def join_smiles(self, df_smiles=None, how="left"):
"""Join Smiles from Compound_Id."""
result = DataSet()
result.data = join_smiles(self.data, df_smiles=df_smiles, how=how)
result.print_log("join smiles")
return result
def join_annotations(self):
"""Join Annotations from Compound_Id."""
result = DataSet()
result.data = join_annotations(self.data)
result.print_log("join annotations")
return result
def add_dmso(self):
"""Add DMSO to references."""
result = DataSet()
result.data = add_dmso(self.data)
result.print_log("add DMSO")
return result
def poc(self, group_by=None, well_type="WellType", control_name="Control"):
"""Normalize the data set to Percent-Of-Control per group (e.g. per plate)
based on the median of the controls.
Parameters:
group_by (string or None): optional column by which the calculation should be grouped,
e.g. the column with plate name."""
result = DataSet()
result.data = poc(self.data, group_by=group_by)
self.print_log("POC")
return result
def activity_profile(self, mad_mult=3.5, parameters=ACT_PROF_PARAMETERS, only_final=True):
"""Generates the `Act_Profile` column.
The byte is set when the parameter's value is greater (or smaller)
than parameter_ctrl.median() + (or -) `mad_mult`* parameter.mad()
If a list of parameters is given, then the activity profile will be calculated
for these parameters.
If `only_final` == `True`, then only the parameters listed in `FINAL_PARAMETERS`
are kept in the output_table.
Returns a new Pandas DataFrame."""
result = DataSet()
result.data = activity_profile(self.data, mad_mult=mad_mult, parameters=parameters,
only_final=only_final)
result.print_log("activity profile")
return result
def relevant_parameters(self, ctrls_std_rel_min=0.001,
ctrls_std_rel_max=0.10):
result = DataSet()
result.data = relevant_parameters(self.data, ctrls_std_rel_min=ctrls_std_rel_min,
ctrls_std_rel_max=ctrls_std_rel_max)
num_parm = len(result.measurements)
result.print_log("relevant parameters", "{:.3f}/{:.3f}/{:4d}"
.format(ctrls_std_rel_min, ctrls_std_rel_max, num_parm))
return result
def correlation_filter(self, cutoff=0.9, method="pearson"):
"""The correlation removes all highly correlated columns from the dataframe.
The function was implemented according to the description of the corresponding
KNIME component.
Parameters:
cutoff (float): correlation cutoff
method (string): "pearson", "kendall", "spearman" (very slow)
Returns a new DataFrame with only the non-correlated columns"""
result = DataSet()
result.data, iterations = correlation_filter(self.data, cutoff=cutoff, method=method)
num_parm = len(result.measurements)
result.print_log("correl. filter (mad)", "{:3d} iterations/{:4d}"
.format(iterations, num_parm))
return result
def correlation_filter_std(self, cutoff=0.9, method="pearson"):
"""The correlation removes all highly correlated columns from the dataframe.
The function was implemented according to the description of the corresponding
KNIME component.
Parameters:
cutoff (float): correlation cutoff
method (string): "pearson", "kendall", "spearman" (very slow)
Returns a new DataFrame with only the non-correlated columns"""
result = DataSet()
result.data, iterations = correlation_filter_std(self.data, cutoff=cutoff, method=method)
num_parm = len(result.measurements)
result.print_log("correl. filter (std)", "{:3d} iterations/{:4d}"
.format(iterations, num_parm))
return result
def add_act_profile_for_control(self, parameters=ACT_PROF_PARAMETERS):
# Compound_Id DMSO: 245754
control = {"Compound_Id": 245754, "Trivial_Name": "Control", "Activity": 0,
"Act_Profile": "".join(["1"] * len(parameters))}
ck = control.keys()
for k in ck:
if k not in self.data.keys():
control.pop(k)
tmp = pd.DataFrame(control)
result = DataSet()
result.data = pd.concat(self.data, tmp)
return result
def update_similar_refs(self, mode="cpd", write=True):
"""Find similar compounds in references and update the export file.
The export file of the dict object is in pkl format. In addition,
a tsv file (or maybe JSON?) is written for use in PPilot.
This method dpes not return anything, it just writes the result to fle."""
rem = "" if write else "write is off"
update_similar_refs(self.data, mode=mode, write=write)
self.print_log("update similar", rem)
def update_datastore(self, mode="cpd", write=True):
"""Update the DataStore with the current DataFrame."""
update_datastore(self.data, mode=mode, write=write)
def find_similar(self, act_profile, cutoff=0.5, max_num=5):
"""Filter the dataframe for activity profiles similar to the given one.
`cutoff` gives the similarity threshold, default is 0.5."""
result = DataSet()
result.data = find_similar(self.data, act_profile=act_profile, cutoff=cutoff, max_num=max_num)
result.print_log("find similar")
return result
def well_id_similarity(self, well_id1, well_id2):
"""Calculate the similarity of the activity profiles from two compounds
(identified by `Compound_Id`). Returns value between 0 .. 1"""
return well_id_similarity(self.data, well_id1, self.data, well_id2)
def count_active_parameters_occurrences(self, act_prof="Act_Profile",
parameters=ACT_PROF_PARAMETERS):
"""Counts the number of times each parameter has been active in the dataset."""
return count_active_parameters_occurrences(self.data, act_prof=act_prof,
parameters=ACT_PROF_PARAMETERS)
@property
def shape(self):
return self.data.shape
@property
def metadata(self):
"""Returns a list of the those parameters in the DataFrame that are NOT CellProfiler measurements."""
return metadata(self.data)
@property
def measurements(self):
"""Returns a list of the CellProfiler parameters that are in the DataFrame."""
return measurements(self.data)
def load(fn, sep="\t"):
"""Read one or multiple result files and concatenate them into one dataset.
`fn` is a single filename (string) or a list of filenames."""
result = DataSet()
if isinstance(fn, list):
result.data = pd.concat((pd.read_csv(f, sep=sep) for f in fn))
else:
result.data = pd.read_csv(fn, sep=sep)
drop = [d for d in DROP_GLOBAL if d in result.data.keys()]
result.data.drop(drop, axis=1, inplace=True)
result.print_log("load dataset")
return result
def load_pkl(fn):
result = DataSet()
result.data = pd.read_pickle(fn)
result.print_log("load pickle")
return result
def print_log(df, component, add_info=""):
component = component + ":"
if len(add_info) > 0:
add_info = " ({})".format(add_info)
print("* {:22s} ({:5d} | {:4d}){}".format(component, df.shape[0], df.shape[1], add_info))
def read_smiles_file(fn, props=['Compound_Id', "Smiles"]):
"""Read in the file with the Compound_Ids and the Smiles.
Return a DataFrame for fast access."""
result = pd.read_csv(fn, sep="\t")
result = result[props]
result = result.apply(pd.to_numeric, errors='ignore')
return result
def clear_resources():
try:
del SMILES
print("* deleted resource: SMILES")
except NameError:
pass
try:
del ANNOTATIONS
print("* deleted resource: ANNOTATIONS")
except NameError:
pass
try:
del REFERENCES
print("* deleted resource: REFERENCES")
except NameError:
pass
try:
del SIM_REFS
print("* deleted resource: SIM_REFS")
except NameError:
pass
try:
del DATASTORE
print("* deleted resource: DATASTORE")
except NameError:
pass
try:
del LAYOUTS
print("* deleted resource: LAYOUTS")
except NameError:
pass
def load_resource(resource, mode="cpd"):
"""Available resources: SMILES, ANNOTATIONS, SIM_REFS, REFERENCES,
CONTAINER, CONTAINER_DATA, BATCH_DATA, DATASTORE, LAYOUTS"""
res = resource.lower()
glbls = globals()
if "smi" in res:
if "SMILES" not in glbls:
# except NameError:
global SMILES
print("- loading resource: (SMILES)")
SMILES = read_smiles_file(cprp.smiles_path,
props=cprp.smiles_cols)
SMILES = SMILES.apply(pd.to_numeric, errors='ignore')
elif "annot" in res:
if "ANNOTATIONS" not in glbls:
global ANNOTATIONS
print("- loading resource: (ANNOTATIONS)")
ANNOTATIONS = pd.read_csv(cprp.annotations_path, sep="\t")
ANNOTATIONS = ANNOTATIONS.apply(pd.to_numeric, errors='ignore')
elif "sim" in res:
if "SIM_REFS" not in glbls:
global SIM_REFS
print("- loading resource: (SIM_REFS)")
if "ext" in mode.lower():
srp = cprp.sim_refs_ext_path
else:
srp = cprp.sim_refs_path
try:
SIM_REFS =
|
pd.read_csv(srp, sep="\t")
|
pandas.read_csv
|
import datetime
import math
import os
import random
import sys
import warnings
import dill
import pathos
import numpy as np
import pandas as pd
# Ultimately, we (the authors of quantile_ml) are responsible for building a project that's robust against warnings.
# The classes of warnings below are ones we've deemed acceptable. The user should be able to sit at a high level of abstraction, and not be bothered with the internals of how we're handing these things.
# Ignore all warnings that are UserWarnings or DeprecationWarnings. We'll fix these ourselves as necessary.
# warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
pd.options.mode.chained_assignment = None # default='warn'
import scipy
from sklearn.calibration import CalibratedClassifierCV
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, brier_score_loss, make_scorer, accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from quantile_ml import DataFrameVectorizer
from quantile_ml import utils
from quantile_ml import utils_categorical_ensembling
from quantile_ml import utils_data_cleaning
from quantile_ml import utils_feature_selection
from quantile_ml import utils_model_training
from quantile_ml import utils_models
from quantile_ml import utils_scaling
from quantile_ml import utils_scoring
xgb_installed = False
try:
import xgboost as xgb
xgb_installed = True
except ImportError:
pass
keras_installed = False
try:
from keras.models import Model
keras_installed = True
except ImportError as e:
keras_import_error = e
pass
class Predictor(object):
def __init__(self, type_of_estimator, column_descriptions, verbose=True, name=None):
if type_of_estimator.lower() in ['regressor','regression', 'regressions', 'regressors', 'number', 'numeric', 'continuous']:
self.type_of_estimator = 'regressor'
elif type_of_estimator.lower() in ['classifier', 'classification', 'categorizer', 'categorization', 'categories', 'labels', 'labeled', 'label']:
self.type_of_estimator = 'classifier'
else:
print('Invalid value for "type_of_estimator". Please pass in either "regressor" or "classifier". You passed in: ' + type_of_estimator)
raise ValueError('Invalid value for "type_of_estimator". Please pass in either "regressor" or "classifier". You passed in: ' + type_of_estimator)
self.column_descriptions = column_descriptions
self.verbose = verbose
self.trained_pipeline = None
self._scorer = None
self.date_cols = []
# Later on, if this is a regression problem, we will possibly take the natural log of our y values for training, but we will still want to return the predictions in their normal scale (not the natural log values)
self.took_log_of_y = False
self.take_log_of_y = False
self._validate_input_col_descriptions()
self.grid_search_pipelines = []
self.name = name
def _validate_input_col_descriptions(self):
found_output_column = False
self.cols_to_ignore = []
expected_vals = set(['categorical', 'text', 'nlp'])
for key, value in self.column_descriptions.items():
value = value.lower()
self.column_descriptions[key] = value
if value == 'output':
self.output_column = key
found_output_column = True
elif value == 'date':
self.date_cols.append(key)
elif value == 'ignore':
self.cols_to_ignore.append(key)
elif value in expected_vals:
pass
else:
raise ValueError('We are not sure how to process this column of data: ' + str(value) + '. Please pass in "output", "categorical", "ignore", "nlp", or "date".')
if found_output_column is False:
print('Here is the column_descriptions that was passed in:')
print(self.column_descriptions)
raise ValueError('In your column_descriptions, please make sure exactly one column has the value "output", which is the value we will be training models to predict.')
# We will be adding one new categorical variable for each date col
# Be sure to add it here so the rest of the pipeline knows to handle it as a categorical column
for date_col in self.date_cols:
self.column_descriptions[date_col + '_day_part'] = 'categorical'
# We use _construct_pipeline at both the start and end of our training.
# At the start, it constructs the pipeline from scratch
# At the end, it takes FeatureSelection out after we've used it to restrict DictVectorizer, and adds final_model back in if we did grid search on it
def _construct_pipeline(self, model_name='LogisticRegression', trained_pipeline=None, final_model=None, feature_learning=False, final_model_step_name='final_model'):
pipeline_list = []
if self.user_input_func is not None:
if trained_pipeline is not None:
pipeline_list.append(('user_func', trained_pipeline.named_steps['user_func']))
elif self.transformation_pipeline is None:
print('Including the user_input_func in the pipeline! Please remember to return X, and not modify the length or order of X at all.')
print('Your function will be called as the first step of the pipeline at both training and prediction times.')
pipeline_list.append(('user_func', FunctionTransformer(func=self.user_input_func, pass_y=False, validate=False)))
# These parts will be included no matter what.
if trained_pipeline is not None:
pipeline_list.append(('basic_transform', trained_pipeline.named_steps['basic_transform']))
else:
pipeline_list.append(('basic_transform', utils_data_cleaning.BasicDataCleaning(column_descriptions=self.column_descriptions)))
if self.perform_feature_scaling is True:
if trained_pipeline is not None:
pipeline_list.append(('scaler', trained_pipeline.named_steps['scaler']))
else:
pipeline_list.append(('scaler', utils_scaling.CustomSparseScaler(self.column_descriptions)))
if trained_pipeline is not None:
pipeline_list.append(('dv', trained_pipeline.named_steps['dv']))
else:
pipeline_list.append(('dv', DataFrameVectorizer.DataFrameVectorizer(sparse=True, sort=True, column_descriptions=self.column_descriptions)))
if self.perform_feature_selection == True:
if trained_pipeline is not None:
# This is the step we are trying to remove from the trained_pipeline, since it has already been combined with dv using dv.restrict
pass
else:
pipeline_list.append(('feature_selection', utils_feature_selection.FeatureSelectionTransformer(type_of_estimator=self.type_of_estimator, column_descriptions=self.column_descriptions, feature_selection_model='SelectFromModel') ))
if trained_pipeline is not None:
# First, check and see if we have any steps with some version of keyword matching on something like 'intermediate_model_predictions' or 'feature_learning_model' or 'ensemble_model' or something like that in them.
# add all of those steps
# then try to add in the final_model that was passed in as a param
# if it's none, then we've already added in the final model with our keyword matching above!
for step in trained_pipeline.steps:
step_name = step[0]
if step_name[-6:] == '_model':
pipeline_list.append((step_name, trained_pipeline.named_steps[step_name]))
# Handling the case where we have run gscv on just the final model itself, and we now need to integrate it back into the rest of the pipeline
if final_model is not None:
pipeline_list.append((final_model_step_name, final_model))
# else:
# pipeline_list.append(('final_model', trained_pipeline.named_steps['final_model']))
else:
final_model = utils_models.get_model_from_name(model_name, training_params=self.training_params)
pipeline_list.append(('final_model', utils_model_training.FinalModelATC(model=final_model, type_of_estimator=self.type_of_estimator, ml_for_analytics=self.ml_for_analytics, name=self.name, scoring_method=self._scorer, feature_learning=feature_learning)))
constructed_pipeline = Pipeline(pipeline_list)
return constructed_pipeline
def _get_estimator_names(self):
if self.type_of_estimator == 'regressor':
base_estimators = ['GradientBoostingRegressor']
if self.compare_all_models != True:
return base_estimators
else:
base_estimators.append('RANSACRegressor')
base_estimators.append('RandomForestRegressor')
base_estimators.append('LinearRegression')
base_estimators.append('AdaBoostRegressor')
base_estimators.append('ExtraTreesRegressor')
return base_estimators
elif self.type_of_estimator == 'classifier':
base_estimators = ['GradientBoostingClassifier']
if self.compare_all_models != True:
return base_estimators
else:
base_estimators.append('LogisticRegression')
base_estimators.append('RandomForestClassifier')
return base_estimators
else:
raise('TypeError: type_of_estimator must be either "classifier" or "regressor".')
def _prepare_for_training(self, X):
# We accept input as either a DataFrame, or as a list of dictionaries. Internally, we use DataFrames. So if the user gave us a list, convert it to a DataFrame here.
if isinstance(X, list):
X_df =
|
pd.DataFrame(X)
|
pandas.DataFrame
|
import time
import sys
import os
import logging
print(sys.path)
logging.basicConfig(level=logging.DEBUG)
def test_lightgbm_gpu():
import numpy as np
import pandas as pd
from h2o4gpu.util.lightgbm_dynamic import got_cpu_lgb, got_gpu_lgb
import lightgbm as lgb
X1= np.repeat(np.arange(10), 1000)
X2= np.repeat(np.arange(10), 1000)
np.random.shuffle(X2)
y = (X1 + np.random.randn(10000)) * (X2 + np.random.randn(10000))
data =
|
pd.DataFrame({'y': y, 'X1': X1, 'X2': X2})
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pickle
import sys
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import gridspec
from pylfi.utils import setup_logger
from ._checks import *
from ._journal_base import JournalInternal
class Journal:
def __init__(self):
# list of parameter names (the 'name' kw from Prior object)
self.parameter_names = []
# list of parameter LaTeX names (the 'tex' kw from Prior object)
self.parameter_names_tex = []
# list of labels (param names) for plots; uses 'name' if 'tex' is None
self.labels = []
# list for storing distances of accepted samples
#self.distances = []
#self.rel_distances = []
# list for storing summary statistic values of accepted samples
#self.sumstats = []
# for tallying the number of inferred parameters
self._n_parameters = 0
# dict for storing inference configuration
self.configuration = {}
# dict for summarizing inference run
self._sampler_summary = {}
# dict for storing sampler results
self._sampler_results = {}
self._posterior_samples = {}
self._sampler_stats = {}
# bool used to limit access if journal has not been written to
self._journal_started = False
def _write_to_journal(
self,
observation,
simulator,
stat_calc,
priors,
distance_metric,
inference_scheme,
n_samples,
n_simulations,
posterior_samples,
summary_stats,
distances,
epsilons,
log
):
# journal is started
self._journal_started = True
self._log = log
if self._log:
self.logger = setup_logger(self.__class__.__name__)
self.logger.info("Write to journal.")
# initialize data structures
self._write_initialize(priors)
# write sampler results
self._write_results(posterior_samples,
summary_stats,
distances,
epsilons)
self._sampler_results_df = pd.DataFrame(self._sampler_results)
self._posterior_samples_df =
|
pd.DataFrame(self._posterior_samples)
|
pandas.DataFrame
|
"""Summarise length of edges/number of nodes within each boundary (commune, district, province)
Purpose
-------
Collect network attributes
- Combine with boundary Polygons to collect network-boundary intersection attributes
- Write final results to an Excel sheet
Input data requirements
-----------------------
1. Correct paths to all files and correct input parameters
2. Shapefiles of networks with attributes:
- edge_id or node_id - String/Integer/Float Edge ID or Node ID of network
- length - Float length of edge intersecting with hazards
- geometry - Shapely geometry of edges as LineString or nodes as Points
3. Shapefile of administrative boundaries of Argentina with attributes:
- province_i - String/Integer ID of Province
- pro_name_e - String name of Province in English
- district_i - String/Integer ID of District
- dis_name_e - String name of District in English
- commune_id - String/Integer ID of Commune
- name_eng - String name of Commune in English
- geometry - Shapely geometry of boundary Polygon
Results
-------
1. Excel sheet of network-hazard-boundary intersection with attributes:
- edge_id/node_id - String name of intersecting edge ID or node ID
- length - Float length of intersection of edge LineString and hazard Polygon: Only for edges
- province_id - String/Integer ID of Province
- province_name - String name of Province in English
- district_id - String/Integer ID of District
- district_name - String name of District in English
- commune_id - String/Integer ID of Commune
- commune_name - String name of Commune in English
"""
import itertools
import os
import sys
import geopandas as gpd
import pandas as pd
from shapely.geometry import Polygon
from atra.utils import *
from atra.transport_flow_and_failure_functions import *
from tqdm import tqdm
def risk_results_reorganise(risk_dataframe,id_column):
risk_columns = []
flood_types = ['fluvial flooding', 'pluvial flooding']
climate_scenarios = ['Future_Med','Future_High']
all_ids = pd.DataFrame(list(set(risk_dataframe[id_column].values.tolist())),columns=[id_column])
for ft in flood_types:
ht = risk_dataframe[risk_dataframe['hazard_type'] == ft]
current = list(set(list(zip(ht[id_column].values.tolist(),ht['current'].values.tolist()))))
current = pd.DataFrame(current,columns=[id_column,'{} current'.format(ft)])
risk_columns.append('{} current'.format(ft))
all_ids = pd.merge(all_ids,current,how='left',on=[id_column]).fillna(0)
for cs in climate_scenarios:
ht = risk_dataframe[(risk_dataframe['hazard_type'] == ft) & (risk_dataframe['climate_scenario'] == cs)]
future = list(set(list(zip(ht[id_column].values.tolist(),ht['future'].values.tolist(),ht['change'].values.tolist()))))
future = pd.DataFrame(future,columns=[id_column,'{} {} future'.format(ft,cs),'{} {} change'.format(ft,cs)])
risk_columns.append('{} {} future'.format(ft,cs))
risk_columns.append('{} {} change'.format(ft,cs))
all_ids = pd.merge(all_ids,future,how='left',on=[id_column]).fillna(0)
return all_ids, risk_columns
def risk_results_reorganise_climate_outlooks(risk_dataframe,id_column):
risk_columns = []
climate_scenarios = ['Future_Med','Future_High']
all_ids = risk_dataframe[[id_column,'current']]
# all_ids = pd.DataFrame(list(set(risk_dataframe[id_column].values.tolist())),columns=[id_column])
# current = list(set(list(zip(risk_dataframe[id_column].values.tolist(),
# risk_dataframe['current'].values.tolist()))))
# current = pd.DataFrame(current,columns=[id_column,'current'])
risk_columns.append('current')
# all_ids = pd.merge(all_ids,current,how='left',on=[id_column]).fillna(0)
for cs in climate_scenarios:
# ht = risk_dataframe[risk_dataframe['climate_scenario'] == cs]
# future = list(set(list(zip(risk_dataframe[id_column].values.tolist(),
# risk_dataframe['future'].values.tolist(),
# risk_dataframe['change'].values.tolist()))))
# future = pd.DataFrame(future,columns=[id_column,'{} value'.format(cs),'{} change'.format(cs)])
# risk_columns.append('{} value'.format(cs))
# risk_columns.append('{} change'.format(cs))
all_ids = pd.merge(all_ids,
risk_dataframe[risk_dataframe['climate_scenario'] == cs][[id_column,'future','change']],
how='left',on=[id_column]).fillna(0)
all_ids.rename(columns={'future':'{} value'.format(cs),'change':'{} change'.format(cs)},inplace=True)
risk_columns.append('{} value'.format(cs))
risk_columns.append('{} change'.format(cs))
return all_ids, risk_columns
def change_matrix(risk_dataframe,value_threshold,change_threshold):
total_counts_df = risk_dataframe.groupby(['hazard_type','climate_scenario']).size().reset_index(name='total_counts')
# print (total_counts_df)
scenario_df = risk_dataframe[risk_dataframe['change'] >= change_threshold].groupby(['hazard_type','climate_scenario']).size().reset_index(name='change_counts')
# print (change_df)
total_counts_df = pd.merge(total_counts_df,scenario_df,how='left',on=['hazard_type','climate_scenario']).fillna(0)
total_counts_df['percent'] = 100.0*total_counts_df['change_counts']/total_counts_df['total_counts']
scenario_df = risk_dataframe[risk_dataframe['future'] >= value_threshold].groupby(['hazard_type','climate_scenario']).size().reset_index(name='future_counts')
total_counts_df = pd.merge(total_counts_df,scenario_df,how='left',on=['hazard_type','climate_scenario']).fillna(0)
total_counts_df['percent_future'] = 100.0*total_counts_df['future_counts']/total_counts_df['total_counts']
scenario_df = risk_dataframe[risk_dataframe['current'] >= value_threshold].groupby(['hazard_type','climate_scenario']).size().reset_index(name='current_counts')
total_counts_df = pd.merge(total_counts_df,scenario_df,how='left',on=['hazard_type','climate_scenario']).fillna(0)
total_counts_df['percent_current'] = 100.0*total_counts_df['current_counts']/total_counts_df['total_counts']
scenario_df = risk_dataframe[(risk_dataframe['future'] >= value_threshold) & (risk_dataframe['change'] >= change_threshold)].groupby(['hazard_type','climate_scenario']).size().reset_index(name='future_percent_counts')
total_counts_df = pd.merge(total_counts_df,scenario_df,how='left',on=['hazard_type','climate_scenario']).fillna(0)
print (total_counts_df)
def main():
"""Summarise
1. Specify the paths from where you to read and write:
- Input data
- Intermediate calcuations data
- Output results
2. Supply input data and parameters
- Names of the three Provinces - List of string types
- Names of modes - List of strings
- Names of output modes - List of strings
- Names of hazard bands - List of integers
- Names of hazard thresholds - List of integers
- Condition 'Yes' or 'No' is the users wants to process results
3. Give the paths to the input data files:
- Commune boundary and stats data shapefile
- String name of sheet in hazard datasets description Excel file
4. Specify the output files and paths to be created
"""
tqdm.pandas()
incoming_data_path,data_path, calc_path, output_path = load_config()['paths']['incoming_data'],load_config()['paths']['data'], load_config()[
'paths']['calc'], load_config()['paths']['output']
# Supply input data and parameters
modes = ['road','rail','bridge']
risk_types = ['risks','eael','risks']
val_cols = ['min_total_tons','max_total_tons',
'min_tr_loss','max_tr_loss',
'min_econ_loss','max_econ_loss',
'min_econ_impact','max_econ_impact']
od_output_excel = os.path.join(os.path.join(output_path,'network_stats','network_failures_ranked.xlsx'))
failure_excel_writer = pd.ExcelWriter(od_output_excel)
od_output_excel = os.path.join(os.path.join(output_path,'network_stats','network_combined_risks_ranked.xlsx'))
risk_excel_writer =
|
pd.ExcelWriter(od_output_excel)
|
pandas.ExcelWriter
|
import pandas as pd
from pandas import Timestamp
import numpy as np
import pytest
import niimpy
from niimpy.util import TZ
df11 = pd.DataFrame(
{"user": ['wAzQNrdKZZax']*3 + ['Afxzi7oI0yyp']*3 + ['lb983ODxEFUD']*3,
"device": ['iMTB2alwYk1B']*3 + ['3Zkk0bhWmyny']*3 + ['n8rndM6J5_4B']*3,
"time": [1547709614.05, 1547709686.036, 1547709722.06, 1547710540.99, 1547710688.469, 1547711339.439, 1547711831.275, 1547711952.182, 1547712028.281 ],
"battery_level": [96, 96, 95, 95, 94, 93, 94, 94, 94],
"battery_status": ['3']*5 + ['2', '2', '3', '3'],
"battery_health": ['2']*9,
"battery_adaptor": ['0']*5+['1', '1', '0', '0'],
"datetime": ['2019-01-17 09:20:14.049999872+02:00', '2019-01-17 09:21:26.036000+02:00', '2019-01-17 09:22:02.060000+02:00',
'2019-01-17 09:35:40.990000128+02:00', '2019-01-17 09:38:08.469000192+02:00', '2019-01-17 09:48:59.438999808+02:00',
'2019-01-17 09:57:11.275000064+02:00', '2019-01-17 09:59:12.181999872+02:00', '2019-01-17 10:00:28.280999936+02:00']
})
df11['datetime'] = pd.to_datetime(df11['datetime'])
df11 = df11.set_index('datetime', drop=False)
def test_get_battery_data():
df=df11.copy()
battery = niimpy.battery.get_battery_data(df)
assert battery.loc[Timestamp('2019-01-17 09:20:14.049999872+02:00'), 'battery_level'] == 96
assert battery.loc[Timestamp('2019-01-17 09:21:26.036000+02:00'), 'battery_health'] == '2'
assert battery.loc[Timestamp('2019-01-17 09:48:59.438999808+02:00'), 'battery_status'] == '2'
assert battery.loc[Timestamp('2019-01-17 09:57:11.275000064+02:00'), 'battery_adaptor'] == '1'
def test_battery_occurrences():
df=df11.copy()
occurances = niimpy.battery.battery_occurrences(df, hours=0, minutes=10)
assert occurances.loc[Timestamp('2019-01-17 09:20:14.049999872+02:00'), 'occurrences'] == 2
assert occurances.loc[Timestamp('2019-01-17 09:40:14.049999872+02:00'), 'occurrences'] == 1
def test_battery_gaps():
df=df11.copy()
gaps = niimpy.battery.battery_gaps(df)
assert gaps.delta.dtype == 'timedelta64[ns]'
assert gaps.tvalue.dtype == 'datetime64[ns, pytz.FixedOffset(120)]'
assert gaps.loc[
|
Timestamp('2019-01-17 09:22:02.060000+02:00')
|
pandas.Timestamp
|
import sys
import numpy as np
import pandas as pd
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pvlib._deprecation import pvlibDeprecationWarning
from pandas.util.testing import assert_series_equal
import pytest
from test_pvsystem import sam_data, pvsyst_module_params
from conftest import fail_on_pvlib_version, requires_scipy, requires_tables
@pytest.fixture
def system(sam_data):
modules = sam_data['sandiamod']
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = modules[module].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_snl_ac_system(sam_data):
modules = sam_data['cecmod']
module = 'Canadian_Solar_CS5P_220M'
module_parameters = modules[module].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_native_snl_ac_system(sam_data):
module = 'Canadian_Solar_CS5P_220M'
module_parameters = sam_data['cecmod'][module].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvsyst_dc_snl_ac_system(sam_data, pvsyst_module_params):
module = 'PVsyst test module'
module_parameters = pvsyst_module_params
module_parameters['b'] = 0.05
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_adr_ac_system(sam_data):
modules = sam_data['cecmod']
module = 'Canadian_Solar_CS5P_220M'
module_parameters = modules[module].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['adrinverter']
inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_pvwatts_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture
def location():
return Location(32.2, -111, altitude=700)
@pytest.fixture
def weather():
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'ghi': [500, 0], 'dni': [800, 0], 'dhi': [100, 0]},
index=times)
return weather
def test_ModelChain_creation(system, location):
mc = ModelChain(system, location)
@pytest.mark.parametrize('strategy, expected', [
(None, (32.2, 180)), ('None', (32.2, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
@requires_scipy
def test_run_model(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
with pytest.warns(pvlibDeprecationWarning):
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 183.522449305, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194545796, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(system, location):
mc = ModelChain(system, location, airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194760203, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_with_weather(system, location, weather, mocker):
mc = ModelChain(system, location)
m = mocker.spy(system, 'sapm_celltemp')
weather['wind_speed'] = 5
weather['temp_air'] = 10
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
# assert_called_once_with cannot be used with series, so need to use
# assert_series_equal on call_args
assert_series_equal(m.call_args[0][1], weather['wind_speed']) # wind
assert_series_equal(m.call_args[0][2], weather['temp_air']) # temp
assert not mc.ac.empty
def test_run_model_tracker(system, location, weather, mocker):
system = SingleAxisTracker(module_parameters=system.module_parameters,
inverter_parameters=system.inverter_parameters)
mocker.spy(system, 'singleaxis')
mc = ModelChain(system, location)
mc.run_model(weather.index, weather=weather)
assert system.singleaxis.call_count == 1
assert (mc.tracking.columns == ['tracker_theta', 'aoi', 'surface_azimuth',
'surface_tilt']).all()
assert mc.ac[0] > 0
assert np.isnan(mc.ac[1])
def poadc(mc):
mc.dc = mc.total_irrad['poa_global'] * 0.2
mc.dc.name = None # assert_series_equal will fail without this
@pytest.mark.parametrize('dc_model', [
'sapm',
pytest.param('cec', marks=requires_scipy),
pytest.param('desoto', marks=requires_scipy),
pytest.param('pvsyst', marks=requires_scipy),
pytest.param('singlediode', marks=requires_scipy),
'pvwatts_dc'])
def test_infer_dc_model(system, cec_dc_snl_ac_system, pvsyst_dc_snl_ac_system,
pvwatts_dc_pvwatts_ac_system, location, dc_model,
weather, mocker):
dc_systems = {'sapm': system,
'cec': cec_dc_snl_ac_system,
'desoto': cec_dc_snl_ac_system,
'pvsyst': pvsyst_dc_snl_ac_system,
'singlediode': cec_dc_snl_ac_system,
'pvwatts_dc': pvwatts_dc_pvwatts_ac_system}
dc_model_function = {'sapm': 'sapm',
'cec': 'calcparams_cec',
'desoto': 'calcparams_desoto',
'pvsyst': 'calcparams_pvsyst',
'singlediode': 'calcparams_desoto',
'pvwatts_dc': 'pvwatts_dc'}
system = dc_systems[dc_model]
# remove Adjust from model parameters for desoto, singlediode
if dc_model in ['desoto', 'singlediode']:
system.module_parameters.pop('Adjust')
m = mocker.spy(system, dc_model_function[dc_model])
mc = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert isinstance(mc.dc, (pd.Series, pd.DataFrame))
@pytest.mark.parametrize('dc_model', [
'sapm',
pytest.param('cec', marks=requires_scipy),
pytest.param('cec_native', marks=requires_scipy)])
def test_infer_spectral_model(location, system, cec_dc_snl_ac_system,
cec_dc_native_snl_ac_system, dc_model):
dc_systems = {'sapm': system,
'cec': cec_dc_snl_ac_system,
'cec_native': cec_dc_native_snl_ac_system}
system = dc_systems[dc_model]
mc = ModelChain(system, location,
orientation_strategy='None', aoi_model='physical')
assert isinstance(mc, ModelChain)
def test_dc_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'poadc')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model=poadc,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert isinstance(mc.ac, (pd.Series, pd.DataFrame))
assert not mc.ac.empty
def acdc(mc):
mc.ac = mc.dc
@pytest.mark.parametrize('ac_model', [
'snlinverter', pytest.param('adrinverter', marks=requires_scipy),
'pvwatts'])
def test_ac_models(system, cec_dc_adr_ac_system, pvwatts_dc_pvwatts_ac_system,
location, ac_model, weather, mocker):
ac_systems = {'snlinverter': system, 'adrinverter': cec_dc_adr_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system}
system = ac_systems[ac_model]
mc = ModelChain(system, location, ac_model=ac_model,
aoi_model='no_loss', spectral_model='no_loss')
if ac_model == 'pvwatts':
ac_model += '_ac'
m = mocker.spy(system, ac_model)
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert isinstance(mc.ac, pd.Series)
assert not mc.ac.empty
assert mc.ac[1] < 1
def test_ac_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'acdc')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, ac_model=acdc,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
|
assert_series_equal(mc.ac, mc.dc)
|
pandas.util.testing.assert_series_equal
|
import os.path
import time
from collections import defaultdict
# import ray
import cv2
import numpy as np
import pandas as pd
import utils.metrics as metrics
from joblib import Parallel, delayed
from scipy import spatial
from tqdm import tqdm
from utils.hpatch import get_patch
from utils.misc import green
PARALLEL_EVALUATION = True
id2t = {0: {'e': 'ref', 'h': 'ref', 't': 'ref'},
1: {'e': 'e1', 'h': 'h1', 't': 't1'},
2: {'e': 'e2', 'h': 'h2', 't': 't2'},
3: {'e': 'e3', 'h': 'h3', 't': 't3'},
4: {'e': 'e4', 'h': 'h4', 't': 't4'},
5: {'e': 'e5', 'h': 'h5', 't': 't5'}}
tp = ['e', 'h', 't']
moddir = os.path.dirname(os.path.abspath(__file__))
tskdir = os.path.normpath(os.path.join(moddir, "..", "..", "tasks"))
def seqs_lengths(seqs):
""" Helper method to return length for all seqs"""
N = {}
for seq in seqs:
N[seq] = seqs[seq].N
return N
def dist_matrix(D1, D2, distance):
""" Distance matrix between two sets of descriptors"""
if distance == 'L2':
D = spatial.distance.cdist(D1, D2, 'euclidean')
elif distance == 'HAMMING':
from utilities import libupmboost_algs
# D = spatial.distance.cdist(D1, D2, 'cityblock')
# D = spatial.distance.cdist(np.unpackbits(D1, axis=1), np.unpackbits(D2, axis=1), 'hamming')
D = libupmboost_algs.cpp_numpy_popcount(np.bitwise_xor(D1[:, np.newaxis], D2[np.newaxis]), 2)
D = D.astype(np.float32) / 256.0
elif distance == 'L1':
D = spatial.distance.cdist(D1, D2, 'cityblock')
else:
raise ValueError('Unknown distance - valid options are |L2|L1|HAMMING|')
return D
#####################
# Verification task #
#####################
def get_verif_dists(descr, pairs, op):
d = {}
for t in ['e', 'h', 't']:
d[t] = np.empty((pairs.shape[0], 1))
idx = 0
pbar = tqdm(pairs)
pbar.set_description("Processing verification task %i/3 " % op)
# TODO Use BFMatcher to calculate this distance
for p in pbar:
[t1, t2] = [id2t[p[1]], id2t[p[4]]]
for t in tp:
d1 = getattr(descr[p[0]], t1[t])[p[2]]
d2 = getattr(descr[p[3]], t2[t])[p[5]]
distance = descr['distance']
if distance == 'L2':
dist = spatial.distance.euclidean(d1, d2)
elif distance == 'HAMMING':
# dist = spatial.distance.cityblock(d1, d2)
# dist = np.unpackbits(np.bitwise_xor(d1, d2)).sum()
from utilities import libupmboost_algs
dist = libupmboost_algs.cpp_numpy_popcount(np.bitwise_xor(d1, d2))
elif distance == 'L1':
dist = spatial.distance.cityblock(d1, d2)
else:
raise ValueError('Unknown distance - valid options are |L2|L1|HAMMING|')
d[t][idx] = dist
idx += 1
return d
def eval_verification(descr, split):
print('>> Evaluating %s task' % green('verification'))
start = time.time()
pos = pd.read_csv(os.path.join(tskdir, 'verif_pos_split-' + split['name'] + '.csv')).values
neg_intra = pd.read_csv(os.path.join(tskdir, 'verif_neg_intra_split-' + split['name'] + '.csv')).values
neg_inter = pd.read_csv(os.path.join(tskdir, 'verif_neg_inter_split-' + split['name'] + '.csv')).values
d_pos = get_verif_dists(descr, pos, 1)
d_neg_intra = get_verif_dists(descr, neg_intra, 2)
d_neg_inter = get_verif_dists(descr, neg_inter, 3)
results = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
for t in tp:
l = np.vstack((np.zeros_like(d_pos[t]), np.ones_like(d_pos[t])))
d_intra = np.vstack((d_neg_intra[t], d_pos[t]))
d_inter = np.vstack((d_neg_inter[t], d_pos[t]))
# get results for the balanced protocol: 1M Positives - 1M Negatives
_, _, auc = metrics.roc(-d_intra, l)
results[t]['intra']['balanced']['auc'] = auc
results[t]['inter']['balanced']['auc'] = auc
# get results for the imbalanced protocol: 0.2M Pos - 1M Negs
N_imb = d_pos[t].shape[0] + int(d_pos[t].shape[0] * 0.2) # 1M + 0.2*1M
_, _, ap = metrics.pr(-d_intra[0:N_imb], l[0:N_imb])
results[t]['intra']['imbalanced']['ap'] = ap
_, _, ap = metrics.pr(-d_inter[0:N_imb], l[0:N_imb])
results[t]['inter']['imbalanced']['ap'] = ap
end = time.time()
print(">> %s task finished in %.0f secs " % (green('Verification'),
end - start))
return results
def gen_verif(seqs, split, N_pos=1e6, N_neg=1e6):
np.random.seed(42)
# positives
s = np.random.choice(split['test'], int(N_pos))
seq2len = seqs_lengths(seqs)
s_N = [seq2len[k] for k in s]
s_idx = np.array(
[np.random.choice(np.arange(k), 2, replace=False) for k in s_N])
s_type = np.array(
[np.random.choice(np.arange(5), 2, replace=False) for k in s_idx])
df = pd.DataFrame({'s1': pd.Series(s, dtype=object),
's2': pd.Series(s, dtype=object),
'idx1': pd.Series(s_idx[:, 0], dtype=int),
'idx2': pd.Series(s_idx[:, 0], dtype=int),
't1': pd.Series(s_type[:, 0], dtype=int),
't2': pd.Series(s_type[:, 1], dtype=int)})
df = df[['s1', 't1', 'idx1', 's2', 't2',
'idx2']] # updated order for matlab comp.
df.to_csv(
os.path.join(tskdir, 'verif_pos_split-' + split['name'] + '.csv'),
index=False)
# intra-sequence negatives
df = pd.DataFrame({'s1': pd.Series(s, dtype=object),
's2': pd.Series(s, dtype=object),
'idx1': pd.Series(s_idx[:, 0], dtype=int),
'idx2': pd.Series(s_idx[:, 1], dtype=int),
't1': pd.Series(s_type[:, 0], dtype=int),
't2': pd.Series(s_type[:, 1], dtype=int)})
df = df[['s1', 't1', 'idx1', 's2', 't2',
'idx2']] # updated order for matlab comp.
df.to_csv(
os.path.join(tskdir,
'verif_neg_intra_split-' + split['name'] + '.csv'),
index=False)
# inter-sequence negatives
s_inter = np.random.choice(split['test'], int(N_neg))
s_N_inter = [seq2len[k] for k in s_inter]
s_idx_inter = np.array([np.random.randint(k) for k in s_N_inter])
df = pd.DataFrame({'s1': pd.Series(s, dtype=object),
's2':
|
pd.Series(s_inter, dtype=object)
|
pandas.Series
|
import copy
import csv
import gzip
import logging
import os
import re
import subprocess
import tempfile
from collections import defaultdict
from multiprocessing import Pool
from pathlib import Path
import numpy as np
import pandas as pd
import tqdm
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from centreseq.bin.core.accessories import run_subprocess
main_log = logging.getLogger('main_log')
def read_seqs(infile, filter_list=None):
"""
Reads up sequences from a path to a fasta file
:param infile: path to fasta file
:param filter_list: Strings that should be in the description of the sequences
:return: a list of strings
"""
r = []
f = open_possible_gzip(infile)
for seq in SeqIO.parse(f, "fasta"):
if filter_list is not None:
assert isinstance(filter_list, list)
if any([x in seq.description for x in filter_list]):
r.append(seq)
else:
r.append(seq)
f.close()
return r
def faster_fasta_searching(infile, filter_list=[]):
"""
Loads up a fasta file into a list. Much faster than using SeqIO
:param infile: fasta infile
:param filter_list: a list of sequence ids you want to keep. If you want to keep everything pass []
:return:
"""
skip = True
gene_name = ""
gene_description = ""
seq = ""
seqs_all = []
f = open_possible_gzip(infile)
for line in f:
if line[0] == ">":
# Resolve last gene
if (filter_list == []) | (gene_name in filter_list):
seqs_all.append(SeqRecord(Seq(seq), id=gene_name, name=gene_name, description=gene_description))
# Initialize new gene
seq = ""
gene_name = line.split(" ")[0].lstrip(">")
gene_description = line.rstrip("\n")
# If we want everything
if filter_list == []:
skip = False
else:
# Keep this gene
if gene_name in filter_list:
skip = False
else:
skip = True
elif skip:
continue
else:
# Add sequence to the string
seq += line.rstrip("\n")
f.close()
# Resolve the final gene
if (filter_list == []) | (gene_name in filter_list):
seqs_all.append(SeqRecord(Seq(seq), id=gene_name, name=gene_name, description=gene_description))
return seqs_all
def open_possible_gzip(infile, flags="rt"):
"""
Opens a file handle for a gzipped or non-zipped file
:param infile: Path to file
:param flags:
:return: file handle
"""
infile = str(infile)
if re.search("\.gz$", infile):
f = gzip.open(infile, flags)
else:
f = open(infile, flags)
return f
def write_seqs_to_file(seq_list, outfile_seq=None):
"""
Write sequences to file. If not file is given then this is written to a tempfile
:param seq_list: a list of sequence objects
:param outfile_seq: outfile path
:return: the name of the output file
"""
if outfile_seq is None:
outfile_seq = tempfile.NamedTemporaryFile(suffix=".fasta", delete=False).name
with open(outfile_seq, "w") as f:
SeqIO.write(seq_list, f, "fasta")
return outfile_seq
def run_mmseqs(seqs1, seqs2):
"""
Equivalent to blast_seqs() but uses mmseqs and thus is much faster
:param seqs1: list of sequences to compare
:param seqs2: list of sequence to be compared against
:return:
"""
query_fasta = write_seqs_to_file(seqs1)
target_fasta = write_seqs_to_file(seqs2)
outfile = Path(tempfile.gettempdir()) / (next(tempfile._get_candidate_names()) + ".dat")
tmpdir = tempfile.TemporaryDirectory()
# This needs at least mmseqs v8
result = subprocess.run(["mmseqs"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# m = re.search("MMseqs2 Version: ([0-9])\..+", result.stdout.decode('utf-8'))
# assert m, "Can't read your mmseqs version, requires at least version 8"
# assert int(m.group(1)) >= 8, "Require mmseqs at least version 8"
cmd = f"mmseqs easy-search {query_fasta} {target_fasta} {outfile} {tmpdir.name} --threads 1 --split-memory-limit {max_mem_use} --search-type 3"
run_subprocess(cmd, get_stdout=True)
with open(outfile) as f:
mmseqs_output = f.read().rstrip("\n")
# I've renamed these for consistency with blast output
columns = "qseqid,sseqid,pident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bitscore".split(",")
return mmseqs_output, columns
def load_pangenome_list(pangenome_list: list):
"""
Takes a putative core genome list and load it up
Checks whether there are any paralogs, and therefore, if we need to run the algorithm
:param pangenome_list: a list of gene names
"""
# Default is no change
update = False
# Check if we have any paralogs, and thus, need to update
for item in pangenome_list:
if pd.isnull(item):
continue
sp = "_".join(item.split("_")[:-1])
assert sp != "", "Cannot read species name from sequence name"
# If we have any paralogs, then we're going to need to update this
if len(clusters_global[sp][item]) > 2:
update = True
return update
def prehash_clusters(indir: Path):
"""
Loads up the mmseq self-clustering files (aka list of paralogs)
Fills self.clusters to be species-> dict from gene to list of genes in its cluster
"""
clusters = {}
for sp in os.listdir(str(indir / 'mmseqs2')):
reg_sub = re.sub('\.[^.]*$', '', sp)
infile_clusters = f"{str(indir)}/mmseqs2/{sp}/{reg_sub}_DB.cluster.tsv"
if not Path(infile_clusters).is_file():
infile_clusters = f"{str(indir)}/mmseqs2/{sp}/{reg_sub}.cluster.tsv"
clusters[sp] = load_clusters(infile_clusters)
return clusters
def find_medoid(indir, pangenome_list):
"""
Finds the medoid of the groups of nucleotide sequences
Picks a best representative sequence for each species and puts the results into self.cluster
"""
# If we do need to update this
seqs_all = [] # ffn sequence objects for everything in the distance matrix
groups = {} # sp -> groups of genes
grouped_seqs = [] # a list of the species annotation for every row/col of the distance matrix
# Load up the nucleotide sequences such that we can cluster
for item in pangenome_list:
if pd.isnull(item):
continue
sp = "_".join(item.split("_")[:-1])
# Load up the nucleotide sequences of those
infile_ffn = Path(indir / 'prokka' / sp / f"{sp}.ffn")
seqs = faster_fasta_searching(infile_ffn, filter_list=clusters_global[sp][item])
groups[sp] = seqs
seqs_all.extend(seqs)
grouped_seqs.extend([sp for i in range(len(seqs))])
# Calculate a distance matrix for the combined group of paralogs
dist_matrix, all_genes = calc_distance_matrix(seqs_all)
# The index of the medoid. This index is not in pangenome_list, rather in the full list of sequences (seqs_all)
medoid_id = define_medoid(dist_matrix, grouped_seqs)
medoid = seqs_all[medoid_id].id
main_log.debug(f"Medoid is {str(medoid)}")
best_seqs = choose_best_seqs(dist_matrix, all_genes, groups, medoid_id)
# Put best_seqs into the spaces in cluster_post
cluster_post = copy.copy(pangenome_list)
j = 0
for i, e in enumerate(cluster_post):
if not
|
pd.isnull(e)
|
pandas.isnull
|
import json
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from perceptron import Neuron
def read_data():
with open('data.json') as f:
return json.load(f)
def main():
sns.set(rc={'figure.figsize': (8, 4)})
sns.set_style('darkgrid', {'figure.facecolor': '#dddddd', 'axes.facecolor': '#dddddd'})
n = Neuron(number_of_inputs=2, training_rate=.1)
data = read_data()
X = data['X']
y = data['y']
error = n.run(X, y)
df = pd.DataFrame(data=enumerate(error), columns=['iteration', 'error'])
plt.subplot(1, 2, 1)
sns.lineplot(x='iteration', y='error', data=df)
plot_data = []
for i, point in enumerate(X):
prediction = n.predict(point)
label = y[i]
if prediction == label:
plot_data.append([*point, label])
else:
plot_data.append([*point, 'error'])
df2 =
|
pd.DataFrame(data=plot_data, columns=['x', 'y', 'label'])
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from numpy.random import uniform, seed
# from scipy.interpolate import griddata
tf.random.set_seed(123)
data_path = "../../../data"
train_file_path = "%s/titanic/train.csv" % data_path
test_file_path = "%s/titanic/eval.csv" % data_path
# Load dataset.
dftrain =
|
pd.read_csv(train_file_path)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# author:zhengk
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
from matplotlib.font_manager import FontProperties
def timeline_plot():
df_ori = pd.read_csv('articles.csv', sep=';', header=None)
# 取第一列并分割日期与标题
df = df_ori.iloc[:, 0]
df = df.str.split(';', expand=True)
# 格式化日期,设置column,并将日期设置为index
df.columns = ['date', 'title']
df.date =
|
pd.to_datetime(df.date)
|
pandas.to_datetime
|
from contextlib import closing
import socket
import json
import os
import tempfile
from pathlib import Path
from tempfile import NamedTemporaryFile
from textwrap import dedent
from unittest.mock import patch
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import pyarrow
import pytest
import responses
from datarobot_drum.drum.drum import (
possibly_intuit_order,
output_in_code_dir,
create_custom_inference_model_folder,
)
from datarobot_drum.drum.exceptions import DrumCommonException
from datarobot_drum.drum.model_adapter import PythonModelAdapter
from datarobot_drum.drum.language_predictors.python_predictor.python_predictor import (
PythonPredictor,
)
from datarobot_drum.drum.language_predictors.r_predictor.r_predictor import RPredictor
from datarobot_drum.drum.language_predictors.java_predictor.java_predictor import JavaPredictor
from datarobot_drum.drum.push import _push_inference, _push_training, drum_push
from datarobot_drum.drum.common import (
read_model_metadata_yaml,
MODEL_CONFIG_FILENAME,
TargetType,
validate_config_fields,
ModelMetadataKeys,
)
from datarobot_drum.drum.utils import StructuredInputReadUtils
class TestOrderIntuition:
tests_data_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "testdata"))
binary_filename = os.path.join(tests_data_path, "iris_binary_training.csv")
regression_filename = os.path.join(tests_data_path, "boston_housing.csv")
one_target_filename = os.path.join(tests_data_path, "one_target.csv")
def test_colname(self):
classes = possibly_intuit_order(self.binary_filename, target_col_name="Species")
assert set(classes) == {"Iris-versicolor", "Iris-setosa"}
def test_colfile(self):
with NamedTemporaryFile() as target_file:
df = pd.read_csv(self.binary_filename)
with open(target_file.name, "w") as f:
target_series = df["Species"]
target_series.to_csv(f, index=False, header="Target")
classes = possibly_intuit_order(self.binary_filename, target_data_file=target_file.name)
assert set(classes) == {"Iris-versicolor", "Iris-setosa"}
def test_badfile(self):
with pytest.raises(DrumCommonException):
possibly_intuit_order(self.one_target_filename, target_col_name="Species")
def test_unsupervised(self):
classes = possibly_intuit_order(
self.regression_filename, target_col_name="MEDV", is_anomaly=True
)
assert classes is None
class TestValidatePredictions:
def test_class_labels(self):
positive_label = "poslabel"
negative_label = "neglabel"
adapter = PythonModelAdapter(model_dir=None, target_type=TargetType.BINARY)
df = pd.DataFrame({positive_label: [0.1, 0.2, 0.3], negative_label: [0.9, 0.8, 0.7]})
adapter._validate_predictions(
to_validate=df, class_labels=[positive_label, negative_label],
)
with pytest.raises(ValueError):
df = pd.DataFrame({positive_label: [0.1, 0.2, 0.3], negative_label: [0.9, 0.8, 0.7]})
adapter._validate_predictions(
to_validate=df, class_labels=["yes", "no"],
)
def test_regression_predictions_header(self):
adapter = PythonModelAdapter(model_dir=None, target_type=TargetType.REGRESSION)
df = pd.DataFrame({"Predictions": [0.1, 0.2, 0.3]})
adapter._validate_predictions(
to_validate=df, class_labels=None,
)
with pytest.raises(ValueError):
df = pd.DataFrame({"other_name": [0.1, 0.2, 0.3]})
adapter._validate_predictions(
to_validate=df, class_labels=None,
)
def test_add_to_one(self):
positive_label = "poslabel"
negative_label = "neglabel"
for predictor in [PythonPredictor(), RPredictor(), JavaPredictor()]:
predictor._target_type = TargetType.BINARY
df_good = pd.DataFrame(
{positive_label: [0.1, 0.2, 0.3], negative_label: [0.9, 0.8, 0.7]}
)
predictor.validate_output(df_good)
df_bad = pd.DataFrame({positive_label: [1, 1, 1], negative_label: [-1, 0, 0]})
with pytest.raises(ValueError):
predictor.validate_output(df_bad)
modelID = "5f1f15a4d6111f01cb7f91f"
environmentID = "5e8c889607389fe0f466c72d"
projectID = "abc123"
@pytest.fixture
def inference_metadata_yaml():
return dedent(
"""
name: drumpush-regression
type: inference
targetType: regression
environmentID: {environmentID}
inferenceModel:
targetName: MEDV
validation:
input: hello
"""
).format(environmentID=environmentID)
@pytest.fixture
def inference_binary_metadata_yaml_no_target_name():
return dedent(
"""
name: drumpush-binary
type: inference
targetType: binary
environmentID: {environmentID}
inferenceModel:
positiveClassLabel: yes
negativeClassLabel: no
validation:
input: hello
"""
).format(environmentID=environmentID)
@pytest.fixture
def inference_binary_metadata_no_label():
return dedent(
"""
name: drumpush-binary
type: inference
targetType: binary
inferenceModel:
positiveClassLabel: yes
"""
)
@pytest.fixture
def multiclass_labels():
return ["GALAXY", "QSO", "STAR"]
@pytest.fixture
def inference_multiclass_metadata_yaml_no_labels():
return dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
validation:
input: hello
"""
).format(environmentID)
@pytest.fixture
def inference_multiclass_metadata_yaml(multiclass_labels):
return dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
classLabels:
- {}
- {}
- {}
validation:
input: hello
"""
).format(environmentID, *multiclass_labels)
@pytest.fixture
def inference_multiclass_metadata_yaml_label_file(multiclass_labels):
with NamedTemporaryFile(mode="w+") as f:
f.write("\n".join(multiclass_labels))
f.flush()
yield dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
classLabelsFile: {}
validation:
input: hello
"""
).format(environmentID, f.name)
@pytest.fixture
def inference_multiclass_metadata_yaml_labels_and_label_file(multiclass_labels):
with NamedTemporaryFile(mode="w+") as f:
f.write("\n".join(multiclass_labels))
f.flush()
yield dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
classLabelsFile: {}
classLabels:
- {}
- {}
- {}
validation:
input: hello
"""
).format(environmentID, f.name, *multiclass_labels)
@pytest.fixture
def training_metadata_yaml():
return dedent(
"""
name: drumpush-regression
type: training
targetType: regression
environmentID: {environmentID}
validation:
input: hello
"""
).format(environmentID=environmentID)
@pytest.fixture
def training_metadata_yaml_with_proj():
return dedent(
"""
name: drumpush-regression
type: training
targetType: regression
environmentID: {environmentID}
trainingModel:
trainOnProject: {projectID}
validation:
input: hello
"""
).format(environmentID=environmentID, projectID=projectID)
@pytest.fixture
def custom_predictor_metadata_yaml():
return dedent(
"""
name: model-with-custom-java-predictor
type: inference
targetType: regression
customPredictor:
arbitraryField: This info is read directly by a custom predictor
"""
)
version_response = {
"id": "1",
"custom_model_id": "1",
"version_minor": 1,
"version_major": 1,
"is_frozen": False,
"items": [{"id": "1", "file_name": "hi", "file_path": "hi", "file_source": "hi"}],
}
@pytest.mark.parametrize(
"config_yaml",
[
"custom_predictor_metadata_yaml",
"training_metadata_yaml",
"training_metadata_yaml_with_proj",
"inference_metadata_yaml",
"inference_multiclass_metadata_yaml",
"inference_multiclass_metadata_yaml_label_file",
],
)
@pytest.mark.parametrize("existing_model_id", [None])
def test_yaml_metadata(request, config_yaml, existing_model_id, tmp_path):
config_yaml = request.getfixturevalue(config_yaml)
if existing_model_id:
config_yaml = config_yaml + "\nmodelID: {}".format(existing_model_id)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
read_model_metadata_yaml(tmp_path)
@pytest.mark.parametrize(
"config_yaml, test_case_number",
[
("custom_predictor_metadata_yaml", 1),
("inference_binary_metadata_no_label", 2),
("inference_multiclass_metadata_yaml_no_labels", 3),
("inference_multiclass_metadata_yaml_labels_and_label_file", 4),
("inference_multiclass_metadata_yaml", 100),
("inference_multiclass_metadata_yaml_label_file", 100),
],
)
def test_yaml_metadata_missing_fields(tmp_path, config_yaml, request, test_case_number):
config_yaml = request.getfixturevalue(config_yaml)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
if test_case_number == 1:
conf = read_model_metadata_yaml(tmp_path)
with pytest.raises(
DrumCommonException, match="Missing keys: \['validation', 'environmentID'\]"
):
validate_config_fields(
conf,
ModelMetadataKeys.CUSTOM_PREDICTOR,
ModelMetadataKeys.VALIDATION,
ModelMetadataKeys.ENVIRONMENT_ID,
)
elif test_case_number == 2:
with pytest.raises(DrumCommonException, match="Missing keys: \['negativeClassLabel'\]"):
read_model_metadata_yaml(tmp_path)
elif test_case_number == 3:
with pytest.raises(
DrumCommonException,
match="Error - for multiclass classification, either the class labels or a class labels file must be provided in model-metadata.yaml file",
):
read_model_metadata_yaml(tmp_path)
elif test_case_number == 4:
with pytest.raises(
DrumCommonException,
match="Error - for multiclass classification, either the class labels or a class labels file should be provided in model-metadata.yaml file, but not both",
):
read_model_metadata_yaml(tmp_path)
elif test_case_number == 100:
read_model_metadata_yaml(tmp_path)
def test_read_model_metadata_properly_casts_typeschema(tmp_path, training_metadata_yaml):
config_yaml = training_metadata_yaml + dedent(
"""
typeSchema:
input_requirements:
- field: number_of_columns
condition: IN
value:
- 1
- 2
- field: data_types
condition: EQUALS
value:
- NUM
- TXT
output_requirements:
- field: number_of_columns
condition: IN
value: 2
- field: data_types
condition: EQUALS
value: NUM
"""
)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
yaml_conf = read_model_metadata_yaml(tmp_path)
output_reqs = yaml_conf["typeSchema"]["output_requirements"]
input_reqs = yaml_conf["typeSchema"]["input_requirements"]
value_key = "value"
expected_as_int_list = next(
(el for el in input_reqs if el["field"] == "number_of_columns")
).get(value_key)
expected_as_str_list = next((el for el in input_reqs if el["field"] == "data_types")).get(
value_key
)
expected_as_int = next((el for el in output_reqs if el["field"] == "number_of_columns")).get(
value_key
)
expected_as_str = next((el for el in output_reqs if el["field"] == "data_types")).get(value_key)
assert all(isinstance(el, int) for el in expected_as_int_list)
assert all(isinstance(el, str) for el in expected_as_str_list)
assert isinstance(expected_as_str_list, list)
assert isinstance(expected_as_int, int)
assert isinstance(expected_as_str, str)
def version_mocks():
responses.add(
responses.GET,
"http://yess/version/",
json={"major": 2, "versionString": "2.21", "minor": 21},
status=200,
)
responses.add(
responses.POST,
"http://yess/customModels/{}/versions/".format(modelID),
json=version_response,
status=200,
)
def mock_get_model(model_type="training", target_type="Regression"):
body = {
"customModelType": model_type,
"id": modelID,
"name": "1",
"description": "1",
"targetType": target_type,
"deployments_count": "1",
"created_by": "1",
"updated": "1",
"created": "1",
"latestVersion": version_response,
}
if model_type == "inference":
body["language"] = "Python"
body["trainingDataAssignmentInProgress"] = False
responses.add(
responses.GET, "http://yess/customModels/{}/".format(modelID), json=body,
)
responses.add(
responses.POST, "http://yess/customModels/".format(modelID), json=body,
)
def mock_post_blueprint():
responses.add(
responses.POST,
"http://yess/customTrainingBlueprints/",
json={
"userBlueprintId": "2",
"custom_model": {"id": "1", "name": "1"},
"custom_model_version": {"id": "1", "label": "1"},
"execution_environment": {"id": "1", "name": "1"},
"execution_environment_version": {"id": "1", "label": "1"},
"training_history": [],
},
)
def mock_post_add_to_repository():
responses.add(
responses.POST,
"http://yess/projects/{}/blueprints/fromUserBlueprint/".format(projectID),
json={"id": "1"},
)
def mock_get_env():
responses.add(
responses.GET,
"http://yess/executionEnvironments/{}/".format(environmentID),
json={
"id": "1",
"name": "hi",
"latestVersion": {"id": "hii", "environment_id": environmentID, "build_status": "yes"},
},
)
def mock_train_model():
responses.add(
responses.POST,
"http://yess/projects/{}/models/".format(projectID),
json={},
adding_headers={"Location": "the/moon"},
)
responses.add(
responses.GET,
"http://yess/projects/{}/modelJobs/the/".format(projectID),
json={
"is_blocked": False,
"id": "55",
"processes": [],
"model_type": "fake",
"project_id": projectID,
"blueprint_id": "1",
},
)
@responses.activate
@pytest.mark.parametrize(
"config_yaml",
[
"training_metadata_yaml",
"training_metadata_yaml_with_proj",
"inference_metadata_yaml",
"inference_multiclass_metadata_yaml",
"inference_multiclass_metadata_yaml_label_file",
],
)
@pytest.mark.parametrize("existing_model_id", [None, modelID])
def test_push(request, config_yaml, existing_model_id, multiclass_labels, tmp_path):
config_yaml = request.getfixturevalue(config_yaml)
if existing_model_id:
config_yaml = config_yaml + "\nmodelID: {}".format(existing_model_id)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
config = read_model_metadata_yaml(tmp_path)
version_mocks()
mock_post_blueprint()
mock_post_add_to_repository()
mock_get_model(model_type=config["type"], target_type=config["targetType"].capitalize())
mock_get_env()
mock_train_model()
push_fn = _push_training if config["type"] == "training" else _push_inference
push_fn(config, code_dir="", endpoint="http://Yess", token="<PASSWORD>")
calls = responses.calls
if existing_model_id is None:
assert calls[1].request.path_url == "/customModels/" and calls[1].request.method == "POST"
if config["targetType"] == TargetType.MULTICLASS.value:
sent_labels = json.loads(calls[1].request.body)["classLabels"]
assert sent_labels == multiclass_labels
call_shift = 1
else:
call_shift = 0
assert (
calls[call_shift + 1].request.path_url == "/customModels/{}/versions/".format(modelID)
and calls[call_shift + 1].request.method == "POST"
)
if push_fn == _push_training:
assert (
calls[call_shift + 2].request.path_url == "/customTrainingBlueprints/"
and calls[call_shift + 2].request.method == "POST"
)
if "trainingModel" in config:
assert (
calls[call_shift + 3].request.path_url
== "/projects/{}/blueprints/fromUserBlueprint/".format(projectID)
and calls[call_shift + 3].request.method == "POST"
)
assert (
calls[call_shift + 4].request.path_url == "/projects/abc123/models/"
and calls[call_shift + 4].request.method == "POST"
)
assert len(calls) == 6 + call_shift
else:
assert len(calls) == 3 + call_shift
else:
assert len(calls) == 2 + call_shift
@responses.activate
@pytest.mark.parametrize(
"config_yaml", ["inference_binary_metadata_yaml_no_target_name",],
)
def test_push_no_target_name_in_yaml(request, config_yaml, tmp_path):
config_yaml = request.getfixturevalue(config_yaml)
config_yaml = config_yaml + "\nmodelID: {}".format(modelID)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
config = read_model_metadata_yaml(tmp_path)
from argparse import Namespace
options = Namespace(code_dir=tmp_path, model_config=config)
with pytest.raises(DrumCommonException, match="Missing keys: \['targetName'\]"):
drum_push(options)
def test_output_in_code_dir():
code_dir = "/test/code/is/here"
output_other = "/test/not/code"
output_code_dir = "/test/code/is/here/output"
assert not output_in_code_dir(code_dir, output_other)
assert output_in_code_dir(code_dir, output_code_dir)
def test_output_dir_copy():
with tempfile.TemporaryDirectory() as tempdir:
# setup
file = Path(tempdir, "test.py")
file.touch()
Path(tempdir, "__pycache__").mkdir()
out_dir = Path(tempdir, "out")
out_dir.mkdir()
# test
create_custom_inference_model_folder(tempdir, str(out_dir))
assert Path(out_dir, "test.py").exists()
assert not Path(out_dir, "__pycache__").exists()
assert not Path(out_dir, "out").exists()
def test_read_structured_input_arrow_csv_na_consistency(tmp_path):
"""
Test that N/A values (None, numpy.nan) are handled consistently when using
CSV vs Arrow as a prediction payload format.
1. Make CSV and Arrow prediction payloads from the same dataframe
2. Read both payloads
3. Assert the resulting dataframes are equal
"""
# arrange
df =
|
pd.DataFrame({"col_int": [1, np.nan, None], "col_obj": ["a", np.nan, None]})
|
pandas.DataFrame
|
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
|
IntervalIndex(5)
|
pandas.IntervalIndex
|
import seaborn as sns
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
from pandas.io.json import json_normalize
from pysal.lib import weights
from sklearn import cluster
from shapely.geometry import Point
# # # # # PET DATA # # # # #
# filename = "pets.json"
# with open(filename, 'r') as f:
# objects = ijson.items
# austin dangerous dog api
urlD = 'https://data.austintexas.gov/resource/ykw4-j3aj.json'
# austin stray dog data
urlS = 'https://data.austintexas.gov/resource/hye6-gvq2.json'
# found_df / austin found pets pandas data frame constructor
pets_df = pd.read_json(urlS, orient='records')
location_df = json_normalize(pets_df['location'])
concat_df =
|
pd.concat([pets_df, location_df], axis=1)
|
pandas.concat
|
from maxcutpy import CrossEntropyMaxCut
import numpy as np
import pandas as pd
def test_ce_initialization():
matrix = np.array([[0,1,1],[1,0,5],[1,5,0]])
ce_cut = CrossEntropyMaxCut(seed=12345, matrix=matrix)
assert ce_cut.batches_split == False
assert ce_cut.best_cut_vector is None
assert ce_cut.best_cut_score is None
def test_ce_functionality():
matrix = np.array([[0,1,1],[1,0,5],[1,5,0]])
ce_cut = CrossEntropyMaxCut(seed=12345, matrix=matrix)
assert ce_cut.batches_split == False
assert ce_cut.best_cut_vector is None
assert ce_cut.best_cut_score is None
best_cut_vector = ce_cut.batch_split()
assert (best_cut_vector == np.array([1, 0, 1])).all()
assert ce_cut.batches_split == True
assert (ce_cut.best_cut_vector == np.array([1, 0, 1])).all()
assert ce_cut.best_cut_score == 6
def test_ce_fromdataframe():
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 21:17:27 2018
@author: wzy
"""
import pandas as pd
from copy import deepcopy
NO_1 = pd.read_csv("lgb.csv")
NO_2 = pd.read_csv("xgb.csv")
NO_3 =
|
pd.read_csv("catboost.csv")
|
pandas.read_csv
|
'''
Tidy HL7 v2 message segments
'''
import itertools
import pandas as pd
from tidy_hl7_msgs.helpers import (
to_df, join_dfs, zip_msg_ids, are_segs_identical
)
from tidy_hl7_msgs.parsers import parse_msgs, parse_msg_id
def tidy_segs(msg_id_locs, report_locs, msgs):
''' Tidy HL7 message segments
Parameters
----------
msg_id_locs : list or dict
Locations (i.e. HL7 message fields or components) that taken together
uniquely identify messages after de-duplication. Locations can be
from different message segments, but each location must return one
value per message. Values must not be missing. Message IDs must
uniquely identify messages.
Location syntax must be either <segment>.<field> or
<segment>.<field>.<component>, delinated by a period (ex. 'MSH.4'
or 'MSH.4.1')
If passed a dictionary, its keys must be ID locations and its values
will be corresponding column names in the returned dataframe.
report_loc : list or dict
Locations (i.e. HL7 message fields or components) to report.
Locations must be from the same segment.
Location syntax must be either <segment>.<field> or
<segment>.<field>.<component>, delinated by a period (ex. 'DG1.4'
or 'DG1.4.1')
If passed a dictionary, its keys must be report locations and its
values will be corresponding column names in the returned dataframe.
msgs : list(string) of HL7 v2 messages
Returns
-------
Dataframe
Columns: one for each message ID/report location and for segment number
Rows: one per segment
Missing values are reported as NAs
If message is missing segment, a single row for this message is
returned with a segment number of NA and NAs for report locations.
Raises
------
ValueError if any parameter is empty
ValueError if report locations are not from the same segment
'''
# pylint: disable=invalid-name
if not msg_id_locs:
raise ValueError("One or more message ID locations required")
if not report_locs:
raise ValueError("One or more report locations required")
if not msgs:
raise ValueError("One of more HL7 v2 messages required")
if not are_segs_identical(report_locs):
raise ValueError("Report locations must be from the same segment")
msgs_unique = set(msgs)
# parse message id locations
msg_ids = parse_msg_id(list(msg_id_locs), msgs_unique)
# parse report locations
report_vals = map(parse_msgs, list(report_locs), itertools.repeat(msgs_unique))
# zip values for each report location w/ message ids
zipped = map(zip_msg_ids, report_vals, itertools.repeat(msg_ids))
# convert each zipped message id + report value to a dataframe
dfs = list(map(to_df, zipped, report_locs))
# join dataframes
df = join_dfs(dfs)
# for natural sorting by segment, then for pretty printing
df['seg'] = df['seg'].astype('float32')
df.sort_values(by=['msg_id', 'seg'], inplace=True)
df['seg'] = df['seg'].astype('object')
# cleanup index
df.reset_index(drop=True, inplace=True)
# tidy message ids
id_cols = df['msg_id'].str.split(",", expand=True)
id_cols.columns = msg_id_locs
df =
|
pd.concat([id_cols, df], axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
import abscplane
import numpy as np
import pandas as pd
""" The module includes a new class ArrayComplexPlane that subclasses the abstract base class AbsComplexPlane which is imported from the abscplane.py module.
"""
class ArrayComplexPlane(abscplane.AbsComplexPlane):
""" This class implements the complex plane with given attributes.
It uses numpy and pandas to represent the 2D grid needed to store
the complex plane. The complex plane is a 2D grid of complex numbers,
having the form (x + y*1j), where 1j is the unit imaginary number in
Python, and one can think of x and y as the coordinates for the horizontal
axis and the vertical axis of the plane respectively. All attributes will
be set during the __init__ constructor, and initialize the plane immediately
upon class instantiation.
Methods:
_create_plane : a private method that creates or refreshs plane
refresh : regenerate plane
apply : apply a given function f
zoom : transform planes going through all functions lists
"""
def __init__(self, xmin=-4,xmax=4,xlen=8,ymin=-4,ymax=4,ylen=8):
"""all attributes will be automatically set when the class becomes
instantiated.
Attributes:
xmax (float) : maximum horizontal axis value
xmin (float) : minimum horizontal axis value
xlen (int) : number of horizontal points
ymax (float) : maximum vertical axis value
ymin (float) : minimum vertical axis value
xunit (int) : grid unit value of x axis
yunit (int) : grid unit value of y axis
ylen (int) : number of vertical points
plane : a list of stored complex plane
fs (list[function]) : function sequence to transform plane
"""
self.xmin = xmin
self.xmax = xmax
self.xlen = xlen
self.ymin = ymin
self.ymax = ymax
self.ylen = ylen
self.xunit = (self.xmax - self.xmin) / self.xlen
self.yunit = (self.ymax - self.ymin) / self.ylen
# See the implementation details of creating a complex plane below
# in _create_plane function.
self.plane = []
self._create_plane()
# store a list of functions that are being applied
# in order to each point of the complex plane, initially empty
self.fs = []
def _create_plane(self):
"""this method creates a list of complext number using the default attributes
(xmax, xmin, xlen, ymin, ymax, ymin) using numpy.
"""
x = np.linspace(self.xmin, self.xmax, self.xlen+1)
y = np.linspace(self.ymin, self.ymax, self.ylen+1)
xx, yy = np.meshgrid(x, y)
z = xx + yy*1j
self.plane =
|
pd.DataFrame(z, columns=x, index=y)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def assignHost(VCs_query,level):
# Host range at the species level
gVC_tax={}
for vc in VCs_query:
gVC_tax[vc]=[]
for i in VCs_query:
idx=int(i.split('_')[1])
for uv in VCs[idx]:
assem=all_uvs_assem.get(uv)
if assem!=None:
for a in assem:
if level=='species':
gVC_tax[i].append(assem_to_spp[a])
if level=='genus':
gVC_tax[i].append(assem_to_genus[a])
if level=='family':
gVC_tax[i].append(assem_to_fam[a])
if level=='order':
gVC_tax[i].append(assem_to_order[a])
if level=='class':
gVC_tax[i].append(assem_to_class[a])
if level=='phylum':
gVC_tax[i].append(assem_to_phyla[a])
for k in gVC_tax.keys():
gVC_tax[k]=list(set(gVC_tax[k]))
return gVC_tax
scaff_to_gca={}
with open('gca_to_scaf.txt') as inFile:
for line in inFile:
scaff_to_gca[line.split()[1].strip()]=line.split()[0]
gca_to_scaff={}
for k in list(scaff_to_gca.keys()):
gca_to_scaff[scaff_to_gca[k]]=k
assem_to_fam={}
assem_to_order={}
assem_to_class={}
assem_to_phyla={}
assem_to_genus={}
assem_to_spp={}
fam_to_phyla={}
order_to_phyla={}
class_to_phyla={}
genus_to_phyla={}
genus_to_fam={}
genus_to_order={}
with open('hgg_bgi_taxonomy.tab') as inFile:
for line in inFile:
assem=line.split('\t')[0]
if len(assem.split('_'))==3:
assem=assem.split('_')[0]+'_'+assem.split('_')[1]+'#'+assem.split('_')[2]
elif 'scaffold' in assem:
assem=scaff_to_gca[assem]
fam=line.split('\t')[5]
phyla=line.split('\t')[2]
order=line.split('\t')[4]
genus=line.split('\t')[-2]
classB=line.split('\t')[3]
spp=line.split('\t')[-1].strip()
if 'Firmicutes' in phyla:
phyla='Firmicutes'
assem_to_fam[assem]=fam
assem_to_order[assem]=order
assem_to_class[assem]=classB
assem_to_phyla[assem]=phyla
assem_to_genus[assem]=genus
assem_to_spp[assem]=spp
fam_to_phyla[fam]=phyla
order_to_phyla[order]=phyla
class_to_phyla[classB]=phyla
genus_to_phyla[genus]=phyla
genus_to_fam[genus]=fam
genus_to_order[genus]=order
all_uvs_assem={} # uv -> assemblies (non-redundant)
with open('WG_crispr_targets.txt') as inFile:
for line in inFile:
try:
all_uvs_assem[line.split()[0]].append(line.strip().split()[1])
except:
all_uvs_assem[line.split()[0]]=[line.strip().split()[1]]
VCs=[]
with open('GPD_VCs.txt') as inFile:
for line in inFile:
toks=line.strip().split('\t')
if len(toks)>1: # No singletons
VCs.append(toks)
X_hq_deep={}
with open('bwa_processed_75_sampleNames.txt') as inFile:
for line in inFile:
toks=line.strip().split(',')
X_hq_deep[toks[0]]=toks[1:]
VC_toGenus={}
for idx in range(len(VCs)):
VC_toGenus[idx]=[]
for idx in range(len(VCs)):
for uv in VCs[idx]:
assem=all_uvs_assem.get(uv)
if assem!=None:
for a in assem:
VC_toGenus[idx].append(assem_to_genus[a])
if len(VC_toGenus[idx])!=0:
VC_toGenus[idx]=list(set(VC_toGenus[idx]))[0]
# I'm mapping uvigs to their VCs
uvs_to_VC={}
for vc_idx in range(len(VCs)):
for uv in VCs[vc_idx]:
uvs_to_VC[uv]=vc_idx
# Fetching metadata
n=1
run_toCountry={}
run_toContinent={}
run_toStatus={}
run_toDepth={}
run_toDisease={}
run_toPub={}
run_toAge={}
run_toStudy={}
run_toLife={}
with open('Gut-metagenomes_29052019.csv') as inFile:
for line in inFile:
if n==1:
n+=1
else:
if line.split(',')[2]=='Yes':
my_run=line.split(',')[0]
run_toCountry[my_run]=line.split(',')[13]
run_toContinent[my_run]=line.split(',')[14]
run_toStatus[my_run]=line.split(',')[5]
run_toDepth[my_run]=float(line.split(',')[1])
run_toDisease[my_run]=line.split(',')[6]
run_toPub[my_run]=line.strip().split(',')[-1]
run_toLife[my_run]=line.strip().split(',')[12]
age=line.split(',')[9]
if age!='NA':
run_toAge[my_run]=float(age)
else:
run_toAge[my_run]='NA'
run_toStudy[my_run]=line.split(',')[4]
all_samples=run_toStatus.keys()
samples_ds=[]
for i in all_samples:
if run_toDepth[i]>=0.5e8:
samples_ds.append(i)
samples_ds_uvs={}
for s in samples_ds:
samples_ds_uvs[s]=[]
for k in list(X_hq_deep.keys()):
for s in X_hq_deep[k]:
samples_ds_uvs[s].append(k)
# S4A
n_conts=[1,2,3,4,5,6]
VCs_conts=[]
VCs_set_glob=[] # Set of VCs found in 1,2...
for my_n in n_conts:
VCs_glob=[]
n=0
with open('VC_continent_span.txt') as inFile:
for line in inFile:
if n==0:
n+=1
else:
z=0
vc=line.split('\t')[0]
toks=line.strip().split('\t')[1:]
for t in toks:
if int(t)>0:
z+=1
if z==my_n: # Change this to control at least or exact
VCs_glob.append(vc)
VCs_conts.append(len(VCs_glob))
VCs_set_glob.append(VCs_glob)
df_contDist=
|
pd.DataFrame()
|
pandas.DataFrame
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# %%
import os
import warnings
warnings.filterwarnings('ignore')
import time as t
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
from imblearn.over_sampling import SMOTE
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, recall_score, precision_score, classification_report, roc_curve, auc, roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
# %% [markdown]
# ## Data Preprocessing
# 1. Data Loading
# 2. Data Cleaning
# 3. X y split
# 4. Data Scaling
# %%
def data_load(
): #check for the availability of the dataset and change cwd if not found
df = pd.read_csv("../input/breast-cancer-prediction/data.csv")
return df
def data_clean(df):
return df
def X_y_split(df):
X = df.drop(['diagnosis'], axis=1)
y = df['diagnosis']
return X, y
def data_split_scale(X, y, sampling):
#Splitting dataset into Train and Test Set
X_tr, X_test, y_tr, y_test = train_test_split(X, y, test_size=0.3)
#Feature Scaling using Standardization
ss = StandardScaler()
X_tr = ss.fit_transform(X_tr)
X_test = ss.fit_transform(X_test)
print(
"'For 'Sampling strategies', I have 3 options. \n \t'1' stands for 'Upsampling'\n \t'2' stands for 'downsampling'. \n \t'3' stands for 'SMOTE''"
)
samp_sel = int(input("Now enter your selection for sampling strategy: \t"))
samp = [sampling.upsample, sampling.downsample, sampling.smote]
temp = samp[samp_sel - 1]
X_train, y_train = temp(X_train=pd.DataFrame(X_tr),
y_train=pd.DataFrame(y_tr))
return pd.DataFrame(X_train), pd.DataFrame(X_test), y_train, y_test
# %% [markdown]
# ## Class Balancing
# 1. Upsampling
# 2. Downsampling
# 3. SMOTE
# %%
class sampling:
def upsample(X_train, y_train):
#combine them back for resampling
train_data = pd.concat([X_train, y_train], axis=1)
# separate minority and majority classes
negative = train_data[train_data.diagnosis == 0]
positive = train_data[train_data.diagnosis == 1]
# upsample minority
pos_upsampled = resample(positive,
replace=True,
n_samples=len(negative),
random_state=30)
# combine majority and upsampled minority
upsampled =
|
pd.concat([negative, pos_upsampled])
|
pandas.concat
|
'''
MIT License
Copyright (c) 2020 Minciencia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import requests
import utils
import pandas as pd
import datetime as dt
import numpy as np
from itertools import groupby
import time
class vacunacion:
def __init__(self,output,indicador):
self.output = output
self.indicador = indicador
self.my_files = {
'vacunacion_fabricante':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination-type.csv',
'vacunacion_region':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination.csv',
'vacunacion_edad':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-ages.csv',
'vacunacion_grupo':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-groups.csv',
}
self.path = '../input/Vacunacion'
def get_last(self):
## baja el archivo que corresponde
if self.indicador == 'fabricante':
print('Retrieving files')
print('vacunacion_fabricante')
r = requests.get(self.my_files['vacunacion_fabricante'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_fabricante' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'campana':
print('Retrieving files')
print('vacunacion_region')
r = requests.get(self.my_files['vacunacion_region'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_region' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'edad':
print('Retrieving files')
print('vacunacion_edad')
r = requests.get(self.my_files['vacunacion_edad'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_edad' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'caracteristicas_del_vacunado':
print('Retrieving files')
print('vacunacion_grupo')
r = requests.get(self.my_files['vacunacion_grupo'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_grupo' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
## selecciona el archivo que corresponde
if self.indicador == 'fabricante':
print('reading files')
print('vacunacion_fabricante')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_fabricante.csv')
elif self.indicador == 'campana':
print('reading files')
print('vacunacion_region')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_region.csv')
elif self.indicador == 'edad':
print('reading files')
print('vacunacion_edad')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_edad.csv')
elif self.indicador == 'caracteristicas_del_vacunado':
print('reading files')
print('vacunacion_grupo')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_grupo.csv')
elif self.indicador == 'vacunas_region':
print('reading files')
print('vacunacion por region por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna':
print('reading files')
print('vacunacion por comuna por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_region':
print('reading files')
print('vacunacion por region por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_sexo':
print('reading files')
print('vacunacion por sexo por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
print('vacunacion por sexo por edad y FECHA')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6_2.csv', sep=';', encoding='ISO-8859-1')
self.last_edad_fecha = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_prioridad':
print('reading files')
print('vacunacion por grupos prioritarios')
self.last_added = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8.csv', sep=';', encoding='ISO-8859-1')
# aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8_2.csv', sep=';', encoding='ISO-8859-1')
# self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna_edad':
print('reading files')
print('vacunacion por comuna por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_establecimiento':
print('reading files')
print('vacunacion por establecimiento')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_fabricante':
print('reading files')
print('vacunacion por fabricante y fecha')
aux =
|
pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
|
pandas.read_csv
|
import numpy
import pandas as pd
import math as m
#Moving Average
def MA(df, n):
MA = pd.Series( df['Close'].rolling(window = n,center=False).mean(), name = 'MA_' + str(n), index=df.index )
# df = df.join(MA)
return MA
#Exponential Moving Average
def EMA(df, n):
EMA = pd.Series(pd.ewma(df['Close'], span = n, min_periods = n - 1), name = 'EMA_' + str(n))
df = df.join(EMA)
return df
#Momentum
def MOM(df, n):
M = pd.Series(df['Close'].diff(n), name = 'Momentum_' + str(n))
df = df.join(M)
return df
#Rate of Change
def ROC(df, n):
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name = 'ROC_' + str(n))
df = df.join(ROC)
return df
#Average True Range
def ATR_2(df, n):
def TR(args):
print(args)
return 0
i = 0
TR_l = [0]
# while i < df.index[-1]:
df.rolling(2,).apply(TR)
for i in range(0, df.shape[0]-1):
# TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR = max( df.ix[i + 1, 'High'], df.ix[i, 'Close']) - min(df.ix[i + 1, 'Low'], df.ix[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l, index=df.index)
ATR = pd.Series(TR_s.rolling(window= n, center=False).mean(), name = 'ATR_' + str(n))
return ATR
#Average True Range
def ATR(df, n):
def TR(args):
print(args)
return 0
atr3 = pd.DataFrame( {'a' : abs( df['High'] - df['Low'] ), 'b' : abs( df['High'] - df['Close'].shift() ), 'c' : abs(df['Low']-df['Close'].shift() ) } )
return atr3.max(axis=1).rolling(window=n).mean()
#Bollinger Bands
def BBANDS(df, n):
MA = pd.Series(pd.rolling_mean(df['Close'], n))
MSD = pd.Series(
|
pd.rolling_std(df['Close'], n)
|
pandas.rolling_std
|
import time
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from scipy.sparse import csr_matrix
from statsmodels.stats.multitest import fdrcorrection as fdr
from joblib import Parallel, delayed, parallel_backend
from typing import List, Tuple, Dict, Union, Optional
import logging
logger = logging.getLogger(__name__)
from anndata import AnnData
from pegasusio import timer, MultimodalData, UnimodalData
from pegasus.tools import eff_n_jobs
def _calc_qvals(
nclust: int,
pvals: np.ndarray,
first_j: int,
second_j: int,
) -> np.ndarray:
""" Calculate FDR
"""
qvals = np.zeros(pvals.shape, dtype = np.float32)
if second_j > 0:
_, qval = fdr(pvals[:, first_j])
qvals[:, first_j] = qvals[:, second_j] = qval
else:
for j in range(nclust):
_, qvals[:, j] = fdr(pvals[:, j])
return qvals
def _de_test(
X: csr_matrix,
cluster_labels: pd.Categorical,
gene_names: List[str],
n_jobs: int,
t: Optional[bool] = False,
fisher: Optional[bool] = False,
temp_folder: Optional[str] = None,
verbose: Optional[bool] = True,
) -> pd.DataFrame:
""" Collect sufficient statistics, run Mann-Whitney U test, calculate auroc (triggering diff_expr_utils.calc_mwu in parallel), optionally run Welch's T test and Fisher's Exact test (in parallel).
"""
from pegasus.cylib.de_utils import csr_to_csc, calc_mwu, calc_stat
start = time.perf_counter()
ords = np.argsort(cluster_labels.codes)
data, indices, indptr = csr_to_csc(X.data, X.indices, X.indptr, X.shape[0], X.shape[1], ords)
cluster_cnts = cluster_labels.value_counts()
n1arr = cluster_cnts.values
n2arr = X.shape[0] - n1arr
cluster_cumsum = cluster_cnts.cumsum().values
nclust = n1arr.size
first_j = second_j = -1
posvec = np.where(n1arr > 0)[0]
if len(posvec) == 2:
first_j = posvec[0]
second_j = posvec[1]
if verbose:
end = time.perf_counter()
logger.info(f"CSR matrix is converted to CSC matrix. Time spent = {end - start:.4f}s.")
start = end
# logger.info(f"Preparation (including converting X to csc_matrix format) for MWU test is finished. Time spent = {time.perf_counter() - start:.2f}s.")
ngene = X.shape[1]
quotient = ngene // n_jobs
residue = ngene % n_jobs
intervals = []
start_pos = end_pos = 0
for i in range(n_jobs):
end_pos = start_pos + quotient + (i < residue)
if end_pos == start_pos:
break
intervals.append((start_pos, end_pos))
start_pos = end_pos
with parallel_backend("loky", inner_max_num_threads=1):
result_list = Parallel(n_jobs=len(intervals), temp_folder=temp_folder)(
delayed(calc_mwu)(
start_pos,
end_pos,
data,
indices,
indptr,
n1arr,
n2arr,
cluster_cumsum,
first_j,
second_j,
verbose,
)
for start_pos, end_pos in intervals
)
Ulist = []
plist = []
alist = []
for U_stats, pvals, aurocs in result_list:
Ulist.append(U_stats)
plist.append(pvals)
alist.append(aurocs)
U_stats = np.concatenate(Ulist, axis = 0)
pvals = np.concatenate(plist, axis = 0)
aurocs = np.concatenate(alist, axis = 0)
qvals = _calc_qvals(nclust, pvals, first_j, second_j)
dfU = pd.DataFrame(U_stats, index = gene_names, columns = [f"{x}:mwu_U" for x in cluster_labels.categories])
dfUp = pd.DataFrame(pvals, index = gene_names, columns = [f"{x}:mwu_pval" for x in cluster_labels.categories])
dfUq = pd.DataFrame(qvals, index = gene_names, columns = [f"{x}:mwu_qval" for x in cluster_labels.categories])
dfUa = pd.DataFrame(aurocs, index = gene_names, columns = [f"{x}:auroc" for x in cluster_labels.categories])
if verbose:
end = time.perf_counter()
logger.info(f"MWU test and AUROC calculation are finished. Time spent = {end - start:.4f}s.")
start = end
# basic statistics and optional t test and fisher test
results = calc_stat(data, indices, indptr, n1arr, n2arr, cluster_cumsum, first_j, second_j, t, fisher, verbose)
dfl2M = pd.DataFrame(results[0][0], index = gene_names, columns = [f"{x}:log2Mean" for x in cluster_labels.categories])
dfl2Mo = pd.DataFrame(results[0][1], index = gene_names, columns = [f"{x}:log2Mean_other" for x in cluster_labels.categories])
dfl2FC = pd.DataFrame(results[0][2], index = gene_names, columns = [f"{x}:log2FC" for x in cluster_labels.categories])
dfpct = pd.DataFrame(results[0][3], index = gene_names, columns = [f"{x}:percentage" for x in cluster_labels.categories])
dfpcto = pd.DataFrame(results[0][4], index = gene_names, columns = [f"{x}:percentage_other" for x in cluster_labels.categories])
dfpfc = pd.DataFrame(results[0][5], index = gene_names, columns = [f"{x}:percentage_fold_change" for x in cluster_labels.categories])
df_list = [dfl2M, dfl2Mo, dfl2FC, dfpct, dfpcto, dfpfc, dfUa, dfU, dfUp, dfUq]
if verbose:
end = time.perf_counter()
logger.info(f"Sufficient statistics are collected. Time spent = {end - start:.4f}s.")
start = end
if t:
qvals = _calc_qvals(nclust, results[1][1], first_j, second_j)
dft = pd.DataFrame(results[1][0], index = gene_names, columns = [f"{x}:t_tstat" for x in cluster_labels.categories])
dftp = pd.DataFrame(results[1][1], index = gene_names, columns = [f"{x}:t_pval" for x in cluster_labels.categories])
dftq = pd.DataFrame(qvals, index = gene_names, columns = [f"{x}:t_qval" for x in cluster_labels.categories])
df_list.extend([dft, dftp, dftq])
if verbose:
end = time.perf_counter()
logger.info(f"Welch's t-test is finished. Time spent = {end - start:.4f}s.")
start = end
if fisher:
from pegasus.cylib.cfisher import fisher_exact
a_true, a_false, b_true, b_false = results[1 if not t else 2]
oddsratios = np.zeros((ngene, n1arr.size), dtype = np.float32)
pvals = np.ones((ngene, n1arr.size), dtype = np.float32)
if second_j > 0:
oddsratio, pval = fisher_exact(a_true[first_j], a_false[first_j], b_true[first_j], b_false[first_j])
oddsratios[:, first_j] = oddsratio
idx1 = oddsratio > 0.0
idx2 = oddsratio < 1e30
oddsratios[idx1 & idx2, second_j] = 1.0 / oddsratio[idx1 & idx2]
oddsratios[~idx1] = 1e30
pvals[:, first_j] = pvals[:, second_j] = pval
else:
with parallel_backend("loky", inner_max_num_threads=1):
result_list = Parallel(n_jobs=n_jobs, temp_folder=temp_folder)(
delayed(fisher_exact)(
a_true[i],
a_false[i],
b_true[i],
b_false[i],
)
for i in posvec
)
for i in range(posvec.size):
oddsratios[:, posvec[i]] = result_list[i][0]
pvals[:, posvec[i]] = result_list[i][1]
qvals = _calc_qvals(nclust, pvals, first_j, second_j)
dff = pd.DataFrame(oddsratios, index = gene_names, columns = [f"{x}:fisher_oddsratio" for x in cluster_labels.categories])
dffp =
|
pd.DataFrame(pvals, index = gene_names, columns = [f"{x}:fisher_pval" for x in cluster_labels.categories])
|
pandas.DataFrame
|
import inspect
import sys
import pandas as pd
pdDatetimeIndex = pd.tseries.index.DatetimeIndex
pdInt64Index = pd.core.index.Int64Index
pdCoreIndex = pd.core.index.Index
import sqlalchemy as sqla
import datetime as dt
class IndexImplementer(object):
"""
IndexImplementer is the base required to implement
an index of a specific type. The
same instance is created at two points in
the Trump dataflow:
1. the datatable getting cached and
2. the data being served.
The IndexImplementer should be
indempotent, and dataframe/series agnostic.
"""
sqlatyp = sqla.Integer
pytyp = int
pindt = pd.Index
def __init__(self, case, **kwargs):
"""
:param case: str
This should match a case used to switch
the logic created in each subclass of IndexImplementer
:param kwargs: dict
"""
self.case = case
self.k = kwargs
def orfs_ind_from_str(self, userinput):
ui = {}
exec("ui = " + userinput)
obj = self.pytyp(**ui)
return obj
def create_empty(self):
return self.pindt([])
def build_ordf(self, orind, orval, colname):
ordf =
|
pd.DataFrame(index=orind, data=orval, columns=[colname])
|
pandas.DataFrame
|
import os
import numpy as np
import pandas as pd
from . import HandlerBase
class ElectrometerBinFileHandler(HandlerBase):
"""Read electrometer *.bin files"""
def __init__(self, fpath):
# It's a text config file, which we don't store in the resources yet, parsing for now
fpath_txt = f'{os.path.splitext(fpath)[0]}.txt'
self.files = [fpath, fpath_txt]
with open(fpath_txt, 'r') as fp:
N = int(fp.readline().split(':')[1])
Gains = [int(x) for x in fp.readline().split(':')[1].split(',')]
Offsets = [int(x) for x in fp.readline().split(':')[1].split(',')]
FAdiv = float(fp.readline().split(':')[1])
fp.readline()
Ranges = [int(x) for x in fp.readline().split(':')[1].split(',')]
FArate = float(fp.readline().split(':')[1])
def Range(val):
ranges = {1: 1,
2: 10,
4: 100,
8: 1000,
16: 100087}
try:
ret = ranges[val]
except:
raise ValueError(f'The value "val" can be one of {ranges.keys()}')
return ret
# 1566332720 366808768 -4197857 11013120 00
raw_data = np.fromfile(fpath, dtype=np.int32)
Ra = Range(Ranges[0])
Rb = Range(Ranges[1])
Rc = Range(Ranges[2])
Rd = Range(Ranges[3])
num_columns = 6
raw_data = raw_data.reshape((raw_data.size // num_columns, num_columns))
derived_data = np.zeros((raw_data.shape[0], raw_data.shape[1] - 1))
derived_data[:, 0] = raw_data[:, -2] + raw_data[:, -1] * 8.0051232 * 1e-9 # Unix timestamp with nanoseconds
derived_data[:, 1] = Ra * ((raw_data[:, 0] / FAdiv) - Offsets[0]) / Gains[0]
derived_data[:, 2] = Rb * ((raw_data[:, 1] / FAdiv) - Offsets[1]) / Gains[1]
derived_data[:, 3] = Rc * ((raw_data[:, 2] / FAdiv) - Offsets[2]) / Gains[2]
derived_data[:, 4] = Rd * ((raw_data[:, 3] / FAdiv) - Offsets[3]) / Gains[3]
self.df =
|
pd.DataFrame(data=derived_data, columns=['timestamp', 'i0', 'it', 'ir', 'iff'])
|
pandas.DataFrame
|
# write_Crosswalk_USGS_WU_Coef.py (scripts)
# !/usr/bin/env python3
# coding=utf-8
"""
Create a crosswalk linking the USGS Water Use Coefficients
(for animals) to NAICS_12. Created by selecting unique
Activity Names and manually assigning to NAICS
"""
import pandas as pd
from flowsa.settings import datapath
from scripts.FlowByActivity_Crosswalks.common_scripts import unique_activity_names, order_crosswalk
def assign_naics(df):
"""
Function to assign NAICS codes to each dataframe activity
:param df: df, a FlowByActivity subset that contains unique activity names
:return: df with assigned Sector columns
"""
# assign sector source name
df['SectorSourceName'] = 'NAICS_2012_Code'
# cattle ranching and farming: 1121
# beef cattle ranching and farming including feedlots: 11211
df.loc[df['Activity'] == 'Beef and other cattle, including calves', 'Sector'] = '11211'
# dairy cattle and milk production: 11212
df.loc[df['Activity'] == 'Dairy cows', 'Sector'] = '11212'
# hog and pig farming: 1122
df.loc[df['Activity'] == 'Hogs and pigs', 'Sector'] = '1122'
# poultry and egg production: 1123
# chicken egg production: 11231
df.loc[df['Activity'] == 'Laying hens', 'Sector'] = '11231'
# broilers and other meat-type chicken production: 11232
df.loc[df['Activity'] == 'Broilers and other chickens', 'Sector'] = '11232'
# turkey production: 11233
df.loc[df['Activity'] == 'Turkeys', 'Sector'] = '11233'
# poultry hatcheries: 11234
# other poultry production: 11239, manually add row
df = df.append(
pd.DataFrame([['USGS_WU_Coef', 'Broilers and other chickens', 'NAICS_2012_Code', '11239']],
columns=['ActivitySourceName', 'Activity', 'SectorSourceName', 'Sector']
), ignore_index=True, sort=True)
# sheep and goat farming: 1124
# sheep farming: 11241
df.loc[df['Activity'] == 'Sheep and lambs', 'Sector'] = '11241'
# goat farming: 11242
df.loc[df['Activity'] == 'Goats', 'Sector'] = '11242'
# animal aquaculture: 1125
# other animal production: 1129
# apiculture: 11291
# horse and other equine production: 11292
df.loc[df['Activity'] == 'Horses (including ponies, mules, burrows, and donkeys)',
'Sector'] = '11292'
# fur-bearing animal and rabbit production: 11293, manually add row
df = df.append(
pd.DataFrame([['USGS_WU_Coef', 'Broilers and other chickens', 'NAICS_2012_Code', '11293']],
columns=['ActivitySourceName', 'Activity', 'SectorSourceName', 'Sector']
), ignore_index=True, sort=True)
# all other animal production: 11299, manually add row
df = df.append(pd.DataFrame([['USGS_WU_Coef', 'Sheep and lambs', 'NAICS_2012_Code', '11299']],
columns=['ActivitySourceName', 'Activity', 'SectorSourceName',
'Sector']), ignore_index=True, sort=True)
return df
if __name__ == '__main__':
# select unique activity names from file
years = ['2005']
# datasource
datasource = 'USGS_WU_Coef'
df_list = []
for y in years:
dfy = unique_activity_names(datasource, y)
df_list.append(dfy)
df =
|
pd.concat(df_list, ignore_index=True)
|
pandas.concat
|
"""
Tests for zipline/utils/pandas_utils.py
"""
from unittest import skipIf
import pandas as pd
from zipline.testing import parameter_space, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.pandas_utils import (
categorical_df_concat,
nearest_unequal_elements,
new_pandas,
skip_pipeline_new_pandas,
)
class TestNearestUnequalElements(ZiplineTestCase):
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements(self, tz):
dts = pd.to_datetime(
['2014-01-01', '2014-01-05', '2014-01-06', '2014-01-09'],
).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-30', None, '2014-01-01'),
('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, '2014-01-05'),
('2014-01-02', '2014-01-01', '2014-01-05'),
('2014-01-03', '2014-01-01', '2014-01-05'),
('2014-01-04', '2014-01-01', '2014-01-05'),
('2014-01-05', '2014-01-01', '2014-01-06'),
('2014-01-06', '2014-01-05', '2014-01-09'),
('2014-01-07', '2014-01-06', '2014-01-09'),
('2014-01-08', '2014-01-06', '2014-01-09'),
('2014-01-09', '2014-01-06', None),
('2014-01-10', '2014-01-09', None),
('2014-01-11', '2014-01-09', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements_short_dts(self, tz):
# Length 1.
dts = pd.to_datetime(['2014-01-01']).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, None),
('2014-01-02', '2014-01-01', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
# Length 0
dts = pd.to_datetime([]).tz_localize(tz)
for dt, before, after in (('2013-12-31', None, None),
('2014-01-01', None, None),
('2014-01-02', None, None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
def test_nearest_unequal_bad_input(self):
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2014']),
pd.Timestamp('2014'),
)
self.assertEqual(str(e.exception), 'dts must be unique')
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2013']),
pd.Timestamp('2014'),
)
self.assertEqual(
str(e.exception),
'dts must be sorted in increasing order',
)
class TestCatDFConcat(ZiplineTestCase):
@skipIf(new_pandas, skip_pipeline_new_pandas)
def test_categorical_df_concat(self):
inp = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B':
|
pd.Series([100, 102, 103], dtype='int64')
|
pandas.Series
|
## Utilities to get stellar parameters
import os
import requests
import pandas as pd
import warnings
from astroquery.mast import Catalogs
from io import BytesIO
import utils
def get_tess_stars_from_sector(sector_num, datapath=utils.TESS_DATAPATH, force_redownload=False, verbose=True):
'''
Queries https://tess.mit.edu/observations/target-lists/ for the input catalog from TESS sector 'sector_num',
and for each target in that list, gets its data from astroquery and joins the two catalogs.
Arguments
---------
sector_num : int
The TESS sector number for which information is being requested.
datapath : str
The top-level path to which data should be stored.
verbose : bool
Whether to print statements on the script's progress.
Returns
-------
stars : pd.DataFrame
The joined TIC and target-list data.
'''
# sets up file paths and names
sector = str(sector_num).zfill(3)
if datapath is None:
datapath = os.getcwd()
subpath = "tesstargets" + os.path.sep + "TESS_targets_S{}.csv".format(sector)
fullpath = os.path.join(datapath, subpath)
noises_path = os.path.join(datapath, "tess_photometric_noise" + os.path.sep + "TESS_noise_S{}.csv".format(sector))
if (not os.path.exists(fullpath)) or force_redownload or utils.GLOBAL_FORCE_REDOWNLOAD:
# queries the target list
url = utils.get_sector_pointings(sector_num)
if verbose or utils.GLOBAL_VERBOSE:
print("Getting sector {0} observed targets from {1}.".format(sector_num, url))
req = requests.get(url)
if not req.ok:
raise requests.exceptions.HTTPError("Data from sector {} is not available.".format(sector_num))
observations = pd.read_csv(BytesIO(req.content), comment='#')[['TICID', 'Camera', 'CCD']] # MAST has Tmag, RA, Dec at higher precision
observed_ticids = observations['TICID'].values
# queries MAST for stellar data
if verbose or utils.GLOBAL_VERBOSE:
print("Querying MAST for sector {0} observed targets.".format(sector_num))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tic_data = Catalogs.query_criteria(catalog='Tic', ID=observed_ticids).to_pandas()
tic_data = tic_data.astype({"ID" : int})
merged_data = tic_data.merge(observations, left_on='ID', right_on='TICID')
if os.path.exists(noises_path):
merged_data = merged_data.merge(pd.read_csv(noises_path, index_col=0, comment='#'), on="ID")
else:
print("Noise values not found on path: change file location or download using get_tess_photometric_noise.py.")
merged_data = merged_data.rename({"ID" : "ticid"})
merged_data.to_csv(fullpath)
if verbose or utils.GLOBAL_VERBOSE:
print("Saved TIC data from TESS sector {0} to path {1}.".format(sector_num, fullpath))
return merged_data
else:
stellar_sector_data =
|
pd.read_csv(fullpath, index_col=0)
|
pandas.read_csv
|
import docx
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_BREAK
from docx.shared import Cm
import os
import math
import pandas as pd
import numpy as np
import re
from datetime import date
import streamlit as st
import json
import glob
from PIL import Image
import smtplib
import docx2pdf
import shutil
import zipfile
from datetime import datetime
import platform
import matplotlib.pyplot as plt
def User_validation():
f=open("Validation/Validation.json","r")
past=json.loads(f.read())
f.close()
now=datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M")
time_past=datetime.strptime(past['Acceso']["Hora"], "%d/%m/%Y %H:%M")
timesince = now - time_past
Time_min= int(timesince.total_seconds() / 60)
bool_negate = Time_min<120
if not bool_negate:
past['Acceso'].update({"Estado":"Negado"})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
bool_aprove= past['Acceso']["Estado"]=="Aprovado"
if not bool_aprove:
colums= st.columns([1,2,1])
with colums[1]:
#st.image("Imagenes/Escudo_unal.png")
st.subheader("Ingrese el usuario y contraseña")
Usuario=st.text_input("Usuario")
Clave=st.text_input("Contraseña",type="password")
Users=["Gestor Comercial"]
bool_user = Usuario in Users
bool_clave = (Clave)==("1234")
bool_user_email = past['Acceso']["User"] == Usuario
bool_time2 = Time_min<1000
bool_1 = bool_time2 and bool_user_email
bool_2 = bool_user and bool_clave
if not bool_user_email and bool_2:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
if not bool_2:
if (Usuario != "") and (Clave!=""):
with colums[1]:
st.warning("Usuario o contraseña incorrectos.\n\n Por favor intente nuevamente.")
elif bool_2 and not bool_1:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
EMAIL_ADDRESS = '<EMAIL>'
EMAIL_PASSWORD = '<PASSWORD>'
try:
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
subject = 'Acceso aplicacion Julia'
body = 'Acceso usuario ' + Usuario +' el '+dt_string
msg = f'Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ADDRESS, EMAIL_ADDRESS, msg)
except:
pass
with colums[1]:
st.button("Acceder a la aplicación")
elif bool_2:
past['Acceso'].update({"Estado":"Aprovado","Hora":dt_string,"User":Usuario})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
with colums[1]:
st.button("Acceder a la aplicación")
return bool_aprove
def Num_dias(leng):
if leng==1:
return "1 día"
else:
return str(leng) + " días"
def day_week(dia):
if dia ==0:
Dia="Lunes"
elif dia ==1:
Dia="Martes"
elif dia ==2:
Dia="Miércoles"
elif dia ==3:
Dia="Jueves"
elif dia ==4:
Dia="Viernes"
elif dia ==5:
Dia="Sábado"
elif dia ==6:
Dia="Domingo-Festivo"
return Dia
def remove_row(table, row):
tbl = table._tbl
tr = row._tr
tbl.remove(tr)
def Range_fecha(dates):
if len(dates)==1:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')
else:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')+" hasta "+ pd.to_datetime(dates[-1]).strftime('%Y-%m-%d')
def any2str(obj):
if isinstance(obj, str):
return obj
elif math.isnan(obj):
return ""
elif isinstance(obj, int):
return str(obj)
elif isinstance(obj, float):
return str(obj)
def dt_fechas(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["Fecha"]== dia]
data_dia_todos=data[data["Fecha"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_2(data,data_user,Fechas,tipo_dia):
dt_Final=
|
pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
|
pandas.DataFrame
|
import requests
import json
import pandas as pd
r = requests.get(
"https://api.opendota.com/api/players/113916764/matches?win=1&date=365&hero_id=5"
)
wins = json.loads(r.text)
df_wins =
|
pd.DataFrame(wins)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
tm.assert_equal(result, idx)
def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi // delta
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
# GH#19125
box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
tdi =
|
TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
|
pandas.TimedeltaIndex
|
# ActivitySim
# See full license in LICENSE.txt.
import os.path
import logging
import pytest
import pandas as pd
from .. import tracing
from .. import inject
def close_handlers():
loggers = logging.Logger.manager.loggerDict
for name in loggers:
logger = logging.getLogger(name)
logger.handlers = []
logger.propagate = True
logger.setLevel(logging.NOTSET)
def teardown_function(func):
inject.clear_cache()
inject.reinject_decorated_tables()
def add_canonical_dirs():
inject.clear_cache()
configs_dir = os.path.join(os.path.dirname(__file__), 'configs')
inject.add_injectable("configs_dir", configs_dir)
output_dir = os.path.join(os.path.dirname(__file__), 'output')
inject.add_injectable("output_dir", output_dir)
def test_config_logger(capsys):
add_canonical_dirs()
tracing.config_logger()
logger = logging.getLogger('activitysim')
file_handlers = [h for h in logger.handlers if type(h) is logging.FileHandler]
assert len(file_handlers) == 1
asim_logger_baseFilename = file_handlers[0].baseFilename
print("handlers:", logger.handlers)
logger.info('test_config_logger')
logger.info('log_info')
logger.warning('log_warn1')
out, err = capsys.readouterr()
# don't consume output
print(out)
assert "could not find conf file" not in out
assert 'log_warn1' in out
assert 'log_info' not in out
close_handlers()
logger = logging.getLogger(__name__)
logger.warning('log_warn2')
with open(asim_logger_baseFilename, 'r') as content_file:
content = content_file.read()
print(content)
assert 'log_warn1' in content
assert 'log_warn2' not in content
def test_print_summary(capsys):
add_canonical_dirs()
tracing.config_logger()
tracing.print_summary('label', df=pd.DataFrame(), describe=False, value_counts=False)
out, err = capsys.readouterr()
# don't consume output
print(out)
assert 'print_summary neither value_counts nor describe' in out
close_handlers()
def test_register_households(capsys):
add_canonical_dirs()
tracing.config_logger()
df = pd.DataFrame({'zort': ['a', 'b', 'c']}, index=[1, 2, 3])
inject.add_injectable('traceable_tables', ['households'])
inject.add_injectable("trace_hh_id", 5)
tracing.register_traceable_table('households', df)
out, err = capsys.readouterr()
# print out # don't consume output
assert "Can't register table 'households' without index name" in out
df.index.name = 'household_id'
tracing.register_traceable_table('households', df)
out, err = capsys.readouterr()
# print out # don't consume output
# should warn that household id not in index
assert 'trace_hh_id 5 not in dataframe' in out
close_handlers()
def test_register_tours(capsys):
add_canonical_dirs()
tracing.config_logger()
inject.add_injectable('traceable_tables', ['households', 'tours'])
# in case another test injected this
inject.add_injectable("trace_tours", [])
inject.add_injectable("trace_hh_id", 3) # need this or register_traceable_table is a nop
tours_df = pd.DataFrame({'zort': ['a', 'b', 'c']}, index=[10, 11, 12])
tours_df.index.name = 'tour_id'
tracing.register_traceable_table('tours', tours_df)
out, err = capsys.readouterr()
assert "can't find a registered table to slice table 'tours' index name 'tour_id'" in out
inject.add_injectable("trace_hh_id", 3)
households_df = pd.DataFrame({'dzing': ['a', 'b', 'c']}, index=[1, 2, 3])
households_df.index.name = 'household_id'
tracing.register_traceable_table('households', households_df)
tracing.register_traceable_table('tours', tours_df)
out, err = capsys.readouterr()
# print out # don't consume output
assert "can't find a registered table to slice table 'tours'" in out
tours_df['household_id'] = [1, 5, 3]
tracing.register_traceable_table('tours', tours_df)
out, err = capsys.readouterr()
print(out) # don't consume output
# should be tracing tour with tour_id 3
traceable_table_ids = inject.get_injectable('traceable_table_ids')
assert traceable_table_ids['tours'] == [12]
close_handlers()
def test_write_csv(capsys):
add_canonical_dirs()
tracing.config_logger()
# should complain if df not a DataFrame or Series
tracing.write_csv(df='not a df or series', file_name='baddie')
out, err = capsys.readouterr()
print(out) # don't consume output
assert "unexpected type" in out
close_handlers()
def test_slice_ids():
df =
|
pd.DataFrame({'household_id': [1, 2, 3]}, index=[11, 12, 13])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# COERbuoy - Data Analyzer
# A small library to simlify the analyses of dta prodiced with the COERbuo platform;
# It is assued that the standard naming sheme is used
# 2021 COER Laboratory, Maynooth University
# in cooperation with CorPower Ocean AB
#
# Author:
# <NAME>, <EMAIL>
#
import os;
import pandas;
import numpy as np;
import matplotlib.pyplot as plt;
import warnings;
def quartil (a, p):
a=np.abs(a);
b=np.sort(a);
aa=np.sum(a);
if (aa==0):
return 0;
return b[np.argmax(np.cumsum(b)/aa>p)];
class Analyzer():
d={"p":"wave period [s]","h":"wave height [m]",
"wave":"wave_type","RAO":"RAO","ctrl":"control method",
"P":"Mean absorbed power [W]"};
table=None;
def read_file(self,file):
data0=pandas.read_csv(file).values.transpose();
s1=np.argmax(data0[:1,:]>=0)
return [data0[:,:s1],data0[:,s1:]];#return transient data - setady state data
def read_folder(self,folder):#get a datatable from a folder
self.table=None;
for f in os.listdir(folder):#look for files in a folder
if f[-4:] == ".csv":#check if csv
info=f[:-4].split("_");
if len(info) == 8 and info[1]=="p" and info[3]=="h":#Check if it follows the naming convention
try:
data = self.read_file(os.path.join(folder,f))[1];
row={"P":[(data[-1,-1]-data[-1,0])/(data[0,-1])],
"z075":[quartil(data[2,:],0.75)],
"z095":[np.max(data[2,:])-np.min(data[2,:])],
"dz075":[quartil(data[3,:],0.75)],
"alpha075":[quartil(data[4,:],0.75)],
"dalpha075":[quartil(data[5,:],0.75)],
"RAO":[quartil(data[2,:],0.95)/float(info[4])],
"wtype":[info[0]],
"p":[float(info[2])],
"h":[float(info[4])*2],
"wave":[info[0]],
"ctrl":[info[5]],
"WEC":[info[6]],
"model":[info[7]],
"data":[data]};
if abs(data[2,-1])>1e2:#Stuff got instable
raise ValueError;
if not isinstance(self.table,pandas.DataFrame):
self.table=
|
pandas.DataFrame(row)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 13:12:22 2019
@author: shlomi
"""
from strat_paths import work_chaim
sound_path = work_chaim / 'sounding'
sean_tropopause_path = work_chaim / 'Sean - tropopause'
def read_ascii_randel(path, filename='h2o_all_timeseries_for_corr.dat'):
import pandas as pd
import numpy as np
with open(path / filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
content = [x.split() for x in content]
content.pop(0)
# flatten list:
flat_content = [item for sublist in content for item in sublist]
# turn it to float:
flat_content = [float(x) for x in flat_content]
# find first bad value:
pos = [i for i, x in enumerate(flat_content) if x == 1e36][0]
# seperate to two list:
dates = pd.to_datetime(flat_content[0:pos-1], origin='julian', unit='D')
start_date = str(dates.year[0]) + '-' + str(dates.month[0]) + '-' + '01'
dates_new = pd.date_range(start_date, freq='MS', periods=len(dates))
wv = flat_content[pos: -1]
df = pd.DataFrame(wv, index=dates_new)
df = df.replace(1e36, np.nan)
df.index.name = 'time'
df.columns = ['wv_anoms_HALOE_MLS']
ds = df.to_xarray()
da = ds.to_array(name='wv_anoms_HALOE_MLS').squeeze(drop=True)
return da
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
import matplotlib.pyplot as plt
import numpy as np
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=["black", "white"],
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
textcolors
A list or array of two color specifications. The first is used for
values below a threshold, the second for those above. Optional.
threshold
Value in data units according to which the colors from textcolors are
applied. If None (the default) uses the middle of the colormap as
separation. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
import matplotlib
import numpy as np
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
def load_cpt_models(lats=[-15, 15], plot=False):
"""load cold_point temperature from various models"""
import xarray as xr
import aux_functions_strat as aux
import pandas as pd
import matplotlib.pyplot as plt
def produce_anoms_from_model(path_and_filename, cpt='ctpt', lats=lats,
time_dim='time'):
if 'cfsr' in path_and_filename.as_posix():
ds = xr.open_dataset(path_and_filename, decode_times=False)
ds[time_dim] = pd.to_datetime(
ds[time_dim], origin='julian', unit='D')
else:
ds = xr.open_dataset(path_and_filename)
ds = ds.sortby('lat')
da = ds[cpt].sel(lat=slice(lats[0], lats[1])).mean('lat')
da_anoms = aux.deseason_xr(da, how='mean')
da_anoms = add_times_to_attrs(da_anoms)
# replace time with monthly means that start with 1-1 of each month:
start_date = pd.to_datetime(
da_anoms[time_dim][0].values).strftime('%Y-%m')
new_time = pd.date_range(
start_date,
periods=da_anoms[time_dim].size,
freq='MS')
da_anoms[time_dim] = new_time
return da_anoms
# era40:
era40_anoms = produce_anoms_from_model(
sean_tropopause_path / 'era40.tp.monmean.zm.nc')
era40_anoms.name = 'era40_cpt_anoms_eq'
# era interim:
erai_anoms = produce_anoms_from_model(
sean_tropopause_path / 'erai.tp.monmean.zm.nc')
erai_anoms.name = 'era_interim_cpt_anoms_eq'
# jra25:
jra25_anoms = produce_anoms_from_model(
sean_tropopause_path / 'jra25.tp.monmean.zm.nc')
jra25_anoms.name = 'jra25_cpt_anoms_eq'
# jra55:
jra55_anoms = produce_anoms_from_model(
sean_tropopause_path / 'jra55.monmean.zm.nc')
jra55_anoms.name = 'jra55_cpt_anoms_eq'
# merra:
merra_anoms = produce_anoms_from_model(
sean_tropopause_path / 'merra.tp.monmean.zm.nc')
merra_anoms.name = 'merra_cpt_anoms_eq'
# merra2:
merra2_anoms = produce_anoms_from_model(
sean_tropopause_path / 'merra2.tp.monmean.zm.nc')
merra2_anoms.name = 'merra2_cpt_anoms_eq'
# ncep:
ncep_anoms = produce_anoms_from_model(
sean_tropopause_path / 'ncep.tp.monmean.zm.nc')
ncep_anoms.name = 'ncep_cpt_anoms_eq'
# cfsr:
cfsr_anoms = produce_anoms_from_model(
sean_tropopause_path / 'cfsr.monmean.zm.nc')
cfsr_anoms.name = 'cfsr_cpt_anoms_eq'
# merge all:
cpt_models = xr.merge([era40_anoms,
erai_anoms,
jra25_anoms,
jra55_anoms,
merra_anoms,
merra2_anoms,
ncep_anoms,
cfsr_anoms])
if plot:
# fig, ax = plt.subplots(figsize=(11, 11), sharex=True)
df = cpt_models.to_dataframe()
model_names = [x.split('_')[0] for x in df.columns]
df = df[df.index > '1979']
for i, col in enumerate(df.columns):
df.iloc[:, i] += i*5.0
ax = df.plot(legend=False, figsize=(11, 11))
ax.grid()
ax.legend(model_names, loc='best', fancybox=True, framealpha=0.5) # , bbox_to_anchor=(0.85, 1.05))
ax.set_title('Cold-Point-Temperature anoms from various models')
return cpt_models
def add_times_to_attrs(da, time_dim='time', mm_only=True):
import pandas as pd
da_no_nans = da.dropna(time_dim)
dt_min = da_no_nans.time.values[0]
dt_max = da_no_nans.time.values[-1]
if mm_only:
dt_min_str = pd.to_datetime(dt_min).strftime('%Y-%m')
dt_max_str = pd.to_datetime(dt_max).strftime('%Y-%m')
else:
dt_min_str = pd.to_datetime(dt_min).strftime('%Y-%m-%d')
dt_max_str = pd.to_datetime(dt_max).strftime('%Y-%m-%d')
da.attrs['first_date'] = dt_min_str
da.attrs['last_date'] = dt_max_str
return da
def load_wv_data(lag=2, plot=False):
import xarray as xr
import numpy as np
import aux_functions_strat as aux
# first load swoosh:
swoosh = xr.open_dataset(work_chaim / 'swoosh_latpress-2.5deg.nc')
com_nofill = swoosh.combinedanomh2oq
com_nofill = com_nofill.sel(level=slice(83, 81)).squeeze(drop=True)
weights = np.cos(np.deg2rad(com_nofill['lat']))
swoosh_combined_near_global = (
weights.sel(lat=slice(-60, 60)) * com_nofill.sel(lat=slice(-60, 60))).sum('lat') / sum(weights)
swoosh_combined_equatorial = (
weights.sel(lat=slice(-15, 15)) * com_nofill.sel(lat=slice(-15, 15))).sum('lat') / sum(weights)
swoosh_anoms_near_global = aux.deseason_xr(
swoosh_combined_near_global, how='mean')
swoosh_anoms_near_global = add_times_to_attrs(swoosh_anoms_near_global)
swoosh_anoms_near_global.name = 'swoosh_anoms_near_global'
swoosh_anoms_equatorial = aux.deseason_xr(
swoosh_combined_equatorial, how='mean')
swoosh_anoms_equatorial.name = 'swoosh_anoms_equatorial'
swoosh_anoms_equatorial = add_times_to_attrs(swoosh_anoms_equatorial)
wv_anoms_randel = read_ascii_randel(cwd)
wv_anoms_randel.name = 'wv_anoms_near_global_from_randel'
wv_anoms_randel = add_times_to_attrs(wv_anoms_randel)
wv_anoms = xr.merge([wv_anoms_randel,
swoosh_anoms_near_global,
swoosh_anoms_equatorial])
print('loaded wv anoms data...')
if plot:
df = wv_anoms.to_dataframe()
#model_names = ['Randel near global', 'SWOOSH near global',
# 'SWOOSH equatorial']
for i, col in enumerate(df.columns):
df.iloc[:, i] += i*0.45
ax = df.plot(legend=True, figsize=(17, 5))
ax.grid()
ax.grid('on', which='minor', axis='x' )
# ax.legend(model_names, loc='best', fancybox=True, framealpha=0.5) # , bbox_to_anchor=(0.85, 1.05))
ax.set_title('Water Vapor anoms from Randel and SWOOSH')
# now add 2 month lag:
if lag is not None:
for da in wv_anoms.data_vars.values():
new_da = da.shift(time=-1 * lag)
new_da.name = da.name + '_' + str(lag) + 'm'
wv_anoms[new_da.name] = new_da
print('added {} month lag to anom data...'.format(lag))
return wv_anoms
def gantt_chart(ds):
import pandas as pd
from matplotlib.pyplot import cm
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
df = ds.to_dataframe()
x2 = df.index[-1].to_pydatetime()
x1 = df.index[0].to_pydatetime()
y = df.index.astype(int)
names = df.columns
labs, tickloc, col = [], [], []
# create color iterator for multi-color lines in gantt chart
color=iter(cm.Dark2(np.linspace(0,1,len(y))))
plt.figure(figsize=(8,10))
fig, ax = plt.subplots()
# generate a line and line properties for each station
for i in range(len(y)):
c=next(color)
plt.hlines(i+1, x1[i], x2[i], label=y[i], color=c, linewidth=2)
labs.append(names[i].title()+" ("+str(y[i])+")")
tickloc.append(i+1)
col.append(c)
plt.ylim(0,len(y)+1)
plt.yticks(tickloc, labs)
# create custom x labels
plt.xticks(np.arange(datetime(np.min(x1).year,1,1),np.max(x2)+timedelta(days=365.25),timedelta(days=365.25*5)),rotation=45)
plt.xlim(datetime(np.min(x1).year,1,1),np.max(x2)+timedelta(days=365.25))
plt.xlabel('Date')
plt.ylabel('USGS Official Station Name and Station Id')
plt.grid()
plt.title('USGS Station Measurement Duration')
# color y labels to match lines
gytl = plt.gca().get_yticklabels()
for i in range(len(gytl)):
gytl[i].set_color(col[i])
plt.tight_layout()
return
def correlate_wv_models_radio(
times1=['1993', '2017'], times2=['2005', '2017']):
from strato_soundings import calc_cold_point_from_sounding
from strato_soundings import get_cold_point_from_wang_sounding
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
radio_cold3_t1 = calc_cold_point_from_sounding(path=sound_path,
times=(
times1[0], times1[1]),
plot=False, return_mean=True)
radio_cold3_t2 = calc_cold_point_from_sounding(path=sound_path,
times=(
times2[0], times2[1]),
plot=False, return_mean=True)
radio_cold3_t1.name = 'radio_cpt_anoms_3_stations_randel'
radio_cold3_t2.name = 'radio_cpt_anoms_3_stations_randel'
ds = get_cold_point_from_wang_sounding()
radio_wang = ds.to_array().mean('variable')
radio_wang.name = 'radio_cpt_anoms_3_stations_wang'
wv_anoms = load_wv_data()
cpt_models = load_cpt_models()
to_compare1 = xr.merge([wv_anoms, cpt_models, radio_cold3_t1, radio_wang])
to_compare2 = xr.merge([wv_anoms, cpt_models, radio_cold3_t2, radio_wang])
to_compare1 = to_compare1.sel(time=slice(times1[0], times1[1]))
to_compare2 = to_compare2.sel(time=slice(times2[0], times2[1]))
corr1 = to_compare1.to_dataframe().corr()
corr2 = to_compare2.to_dataframe().corr()
mask = np.zeros_like(corr1)
mask[np.triu_indices_from(mask)] = True
df_mask = corr1.copy()
df_mask[:] = mask
df_mask = df_mask.astype(bool)
corr1[df_mask] = corr2[df_mask]
fig, ax = plt.subplots(figsize=(11, 11))
# mask = np.zeros_like(corr)
# mask[np.triu_indices_from(mask)] = True
h = sns.heatmap(corr1, annot=True, cmap="YlGn", ax=ax,
cbar=True)
h.set_xticklabels(
h.get_xticklabels(),
rotation=45,
horizontalalignment='right')
# im, cbar = heatmap(corr.values, corr.index.values, corr.columns, ax=ax,
# cmap="YlGn", cbarlabel="correlation")
# texts = annotate_heatmap(im, valfmt="{x:.2f}")
ax.hlines([6, 16], xmin=0, xmax=6, color='r')
ax.vlines([0, 6], ymin=6, ymax=16, color='r')
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
# for lab, annot in zip(ax.get_yticklabels(), ax.texts):
# text = lab.get_text()
# if text == 'radio_cpt_anoms_3_stations_randel': # lets highlight row 2
# # set the properties of the ticklabel
# # lab.set_weight('bold')
# # lab.set_size(20)
# lab.set_color('purple')
# # set the properties of the heatmap annot
# annot.set_weight('bold')
# annot.set_color('purple')
# # annot.set_size(20)
ax.set_title('Upper right heatmap: {} to {}, lower left heatmap: {} to {}.'.format(
times2[0], times2[1], times1[0], times1[1]), fontdict=font)
# fig.tight_layout()
# fig.colorbar(h.get_children()[0], ax=axes[1])
plt.subplots_adjust(left=0.3, bottom=0.25, right=0.95)
# plt.tight_layout()
plt.show()
# plt.subplots_adjust(left=0.35, bottom=0.4, right=0.95)
return fig
def get_randel_corr(lats=[-10, 10], times=['1993', '2017']):
import numpy as np
import xarray as xr
import aux_functions_strat as aux
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from strato_soundings import calc_cold_point_from_sounding
# radio_cold = calc_cold_point_from_sounding(path=sound_path, times=('1993', '2017'),
# plot=False, return_cold=True)
radio_cold3 = calc_cold_point_from_sounding(path=sound_path,
times=(times[0], times[1]),
plot=False, return_mean=True)
radio_cold3.name = 'radiosonde_cold_point_anomalies_3_stations'
radio_smooth = radio_cold3.rolling(time=3, center=True).mean()
radio_smooth.name = 'radiosonde_smooth_3_months'
wv_anoms_HM = read_ascii_randel(cwd)
wv_anoms_HM_2M_lagged = wv_anoms_HM.shift(time=-2)
wv_anoms_HM_2M_lagged.name = wv_anoms_HM.name + ' + 2M lag'
swoosh = xr.open_dataset(work_chaim / 'swoosh_latpress-2.5deg.nc')
haloe = xr.open_dataset(work_chaim /
'swoosh-v02.6-198401-201812/swoosh-v02.6-198401-201812-latpress-2.5deg-L31.nc', decode_times=False)
haloe['time'] = swoosh.time
haloe_names = [x for x in haloe.data_vars.keys()
if 'haloe' in x and 'h2o' in x]
haloe = haloe[haloe_names].sel(level=slice(83, 81), lat=slice(lats[0],
lats[1]))
# com=swoosh.combinedanomfillanomh2oq
com = swoosh.combinedanomfillanomh2oq
com_nofill = swoosh.combinedanomh2oq
com = com.sel(level=slice(83, 81), lat=slice(lats[0], lats[1]))
com_nofill = com_nofill.sel(level=slice(83, 81), lat=slice(lats[0],
lats[1]))
weights = np.cos(np.deg2rad(com['lat'].values))
com_latmean = (weights * com).sum('lat') / sum(weights)
com_latmean_2M_lagged = com_latmean.shift(time=-2)
com_latmean_2M_lagged.name = com_latmean.name + ' + 2M lag'
com_nofill_latmean = (weights * com_nofill).sum('lat') / sum(weights)
com_nofill_latmean_2M_lagged = com_nofill_latmean.shift(time=-2)
com_nofill_latmean_2M_lagged.name = com_nofill_latmean.name + ' + 2M lag'
haloe_latmean = (weights * haloe.haloeanomh2oq).sum('lat') / sum(weights)
era40 = xr.open_dataarray(work_chaim / 'ERA40_T_mm_eq.nc')
era40 = era40.sel(level=100)
weights = np.cos(np.deg2rad(era40['lat'].values))
era40_latmean = (weights * era40).sum('lat') / sum(weights)
era40anom_latmean = aux.deseason_xr(era40_latmean, how='mean')
era40anom_latmean.name = 'era40_100hpa_anomalies'
era5 = xr.open_dataarray(work_chaim / 'ERA5_T_eq_all.nc')
cold_point = era5.sel(level=slice(150, 50)).min(['level', 'lat',
'lon'])
cold_point = aux.deseason_xr(cold_point, how='mean')
cold_point.name = 'cold_point_from_era5'
era5 = era5.mean('lon').sel(level=100)
weights = np.cos(np.deg2rad(era5['lat'].values))
era5_latmean = (weights * era5).sum('lat') / sum(weights)
era5anom_latmean = aux.deseason_xr(era5_latmean, how='mean')
era5anom_latmean.name = 'era5_100hpa_anomalies'
merra = xr.open_dataarray(work_chaim / 'T_regrided.nc')
merra['time'] =
|
pd.date_range(start='1979', periods=merra.time.size, freq='MS')
|
pandas.date_range
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 6 10:41:53 2018
@author: stevechen
"""
import pandas as pd
import numpy as np
import math
# Read file to get wanted variables from the Airbnb data set
def Get_Airbnb_Data(filename):
data=
|
pd.read_csv(filename,sep=',')
|
pandas.read_csv
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_prepModel.ipynb (unless otherwise specified).
__all__ = ['load_s3', 'merge_frame', 'dummies_and_scale', 'full_frame']
# Cell
import pandas as pd
import requests
import boto3
import json
from io import BytesIO
import joblib
import pickle
import numpy as np
|
pd.set_option('display.max_columns', None)
|
pandas.set_option
|
from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from ..model.event import Event
from ..model.event import EventTeam
from ..model.submission import Submission
from ..model.team import Team
from .team import get_event_team_by_name
from .submission import get_bagged_scores
from .submission import get_scores
from .submission import get_submission_max_ram
from .submission import get_time
width = -1 if LooseVersion(pd.__version__) < LooseVersion("1.0.0") else None
pd.set_option('display.max_colwidth', width)
def _compute_leaderboard(session, submissions, leaderboard_type, event_name,
with_links=True):
"""Format the leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
with_links : bool
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : dataframe
The leaderboard in a dataframe format.
"""
record_score = []
event = session.query(Event).filter_by(name=event_name).one()
map_score_precision = {score_type.name: score_type.precision
for score_type in event.score_types}
for sub in submissions:
# take only max n bag
df_scores_bag = get_bagged_scores(session, sub.id)
highest_level = df_scores_bag.index.get_level_values('n_bag').max()
df_scores_bag = df_scores_bag.loc[(slice(None), highest_level), :]
df_scores_bag.index = df_scores_bag.index.droplevel('n_bag')
df_scores_bag = df_scores_bag.round(map_score_precision)
df_scores = get_scores(session, sub.id)
df_scores = df_scores.round(map_score_precision)
df_time = get_time(session, sub.id)
df_time = df_time.stack().to_frame()
df_time.index = df_time.index.set_names(['fold', 'step'])
df_time = df_time.rename(columns={0: 'time'})
df_time = df_time.sum(axis=0, level="step").T
df_scores_mean = df_scores.groupby('step').mean()
df_scores_std = df_scores.groupby('step').std()
# select only the validation and testing steps and rename them to
# public and private
map_renaming = {'valid': 'public', 'test': 'private'}
df_scores_mean = (df_scores_mean.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_std = (df_scores_std.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_bag = (df_scores_bag.rename(index=map_renaming)
.stack().to_frame().T)
df = pd.concat([df_scores_bag, df_scores_mean, df_scores_std], axis=1,
keys=['bag', 'mean', 'std'])
df.columns = df.columns.set_names(['stat', 'set', 'score'])
# change the multi-index into a stacked index
df.columns = df.columns.map(lambda x: " ".join(x))
# add the aggregated time information
df_time.index = df.index
df_time = df_time.rename(
columns={'train': 'train time [s]',
'valid': 'validation time [s]',
'test': 'test time [s]'}
)
df = pd.concat([df, df_time], axis=1)
if leaderboard_type == 'private':
df['submission ID'] = sub.basename.replace('submission_', '')
df['team'] = sub.team.name
df['submission'] = sub.name_with_link if with_links else sub.name
df['contributivity'] = int(round(100 * sub.contributivity))
df['historical contributivity'] = int(round(
100 * sub.historical_contributivity))
df['max RAM [MB]'] = get_submission_max_ram(session, sub.id)
df['submitted at (UTC)'] = pd.Timestamp(sub.submission_timestamp)
record_score.append(df)
# stack all the records
df =
|
pd.concat(record_score, axis=0, ignore_index=True, sort=False)
|
pandas.concat
|
from unittest import TestCase
import pandas as pd
from datamatch.filters import DissimilarFilter, NonOverlappingFilter
class DissimilarFilterTestCase(TestCase):
def test_valid(self):
f = DissimilarFilter('agency')
index = ['agency', 'uid']
self.assertFalse(f.valid(
pd.Series(['slidell pd', '123'], index=index),
pd.Series(['slidell pd', '456'], index=index)
))
self.assertTrue(f.valid(
pd.Series(['gretna pd', '123'], index=index),
|
pd.Series(['slidell pd', '456'], index=index)
|
pandas.Series
|
import os
import sys
import glob
import logging
import psycopg2
import pandas as pd
from sql_queries import *
from typing import Callable
import psycopg2.extensions as psycopg2Ext
logger = logging.getLogger(__name__)
def process_song_file(cur: psycopg2Ext.cursor, filepath: str) -> None:
"""
Description: This method reads each song file to get the artist and
song observations and populate the `artists` and `songs` dim
tables.
Arguments:
cur (psycopg2Ext.cursor): the cursor object.
filepath (str): log data file path.
Returns:
None
"""
# open song file
try:
df = pd.read_json(filepath, lines=True)
except Exception:
msg = f"Error: Could not read JSON content of {filepath}"
logger.warning(msg)
return
# extract artist record from df
try:
artist_data = (
df[
[
"artist_id",
"artist_name",
"artist_location",
"artist_latitude",
"artist_longitude",
]
]
.values[0]
.tolist()
)
except Exception:
msg = "Error: Could not extract artist columns from df"
logger.warning(msg)
return
# insert artist record into artist table
try:
cur.execute(artist_table_insert, artist_data)
except psycopg2.Error as e:
msg = f"Error: Could not insert artist record in artist table"
logger.warning(msg, e)
return
# extract song record from df
try:
song_data = (
df[["song_id", "title", "artist_id", "year", "duration"]].values[0].tolist()
)
except Exception:
msg = "Error: Could not extract song columns from df"
logger.warning(msg)
return
# insert song record into song table
try:
cur.execute(song_table_insert, song_data)
except psycopg2.Error as e:
msg = f"Error: Could not insert song record in song table"
logger.warning(msg, e)
return
def process_log_file(cur: psycopg2Ext.cursor, filepath: str) -> None:
"""
Description: This method reads each log file to extract the
time and user observations and populate the `time` and
`users` dim tables. It then, queries `song_id` and
`artist_id` from `songs` and `artists` tables to insert
with other relevant information into the `songplays`
fact table.
Arguments:
cur (psycopg2Ext.cursor): the cursor object.
filepath (str): log data file path.
Returns:
None
"""
# open log file
try:
df = pd.read_json(filepath, lines=True)
except Exception:
msg = f"Error: Could not read JSON content of {filepath}"
logger.warning(msg)
return
# convert timestamp column to datetime
df["ts"] = pd.to_datetime(df["ts"], unit="ms")
# filter by specific page action
page = "NextSong"
t = df[df["page"] == page]
# insert time data records
time_data = (
t.ts,
t.ts.dt.hour,
t.ts.dt.isocalendar().day,
t.ts.dt.isocalendar().week,
t.ts.dt.month,
t.ts.dt.isocalendar().year,
t.ts.dt.weekday,
)
column_labels = ("start_time", "hour", "day", "week", "month", "year", "weekday")
data = {column_labels[i]: time_data[i] for i in range(len(column_labels))}
time_df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
from pyspark.sql import DataFrame
from imblearn.over_sampling import SMOTE
import pandas as pd
from sparksampling.core.job.base_job import BaseJob
from sparksampling.utilities.utilities import pandas_to_spark
class ImbSmoteSamplingJob(BaseJob):
type_map = {
'k_neighbors': int,
'drop_list': list,
'col_key': str
}
def __init__(self, k_neighbors=3, drop_list=None, col_key=None):
super(ImbSmoteSamplingJob, self).__init__()
if drop_list is None:
drop_list = []
self.k_neighbors = k_neighbors
self.drop_list = drop_list
self.col_key = col_key
self.check_type()
def _generate(self, df: DataFrame, *args, **kwargs) -> DataFrame:
df = df.toPandas()
y = df[[self.col_key]]
if self.col_key not in self.drop_list:
self.drop_list.append(self.col_key)
x = df.drop(self.drop_list, axis=1)
smote = SMOTE(k_neighbors=self.k_neighbors)
x_fit, y_fit = smote.fit_resample(x.values, y.values)
result_df = pd.concat([
|
pd.DataFrame(x_fit, columns=x.columns)
|
pandas.DataFrame
|
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..screenip_exe import Screenip
test = {}
class TestScreenip(unittest.TestCase):
"""
Unit tests for screenip.
"""
print("screenip unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for screenip unittest.
:return:
"""
pass
# screenip2 = screenip_model.screenip(0, pd_obj_inputs, pd_obj_exp_out)
# setup the test as needed
# e.g. pandas to open screenip qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for screenip unittest.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_screenip_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty screenip object
screenip_empty = Screenip(df_empty, df_empty)
return screenip_empty
def test_screenip_unit_fw_bird(self):
"""
unittest for function screenip.fw_bird:
:return:
"""
expected_results = pd.Series([0.0162, 0.0162, 0.0162], dtype='float')
result = pd.Series([], dtype='float')
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
try:
# for i in range(0,3):
# result[i] = screenip_empty.fw_bird()
screenip_empty.no_of_runs = len(expected_results)
screenip_empty.fw_bird()
result = screenip_empty.out_fw_bird
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fw_mamm(self):
"""
unittest for function screenip.fw_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.172, 0.172, 0.172], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.no_of_runs = len(expected_results)
result = screenip_empty.fw_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_bird(self):
"""
unittest for function screenip.dose_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000000., 4805.50175, 849727.21122], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_bird * self.solubility)/(self.bodyweight_assessed_bird / 1000.)
screenip_empty.out_fw_bird = pd.Series([10., 0.329, 1.8349], dtype='float')
screenip_empty.solubility = pd.Series([100., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([1.0, 2.395, 0.98], dtype='float')
result = screenip_empty.dose_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_mamm(self):
"""
unittest for function screenip.dose_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([8000000., 48205.7595, 3808036.37889], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_mamm * self.solubility)/(self.bodyweight_assessed_mammal / 1000)
screenip_empty.out_fw_mamm = pd.Series([20., 12.843, 6.998], dtype='float')
screenip_empty.solubility = pd.Series([400., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([1., 9.32, 0.834], dtype='float')
result = screenip_empty.dose_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_bird(self):
"""
unittest for function screenip.at_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000., 687.9231, 109.3361], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_avian_water) * ((self.bodyweight_assessed_bird / self.bodyweight_tested_bird)**(self.mineau_scaling_factor - 1.))
screenip_empty.ld50_avian_water = pd.Series([2000., 938.34, 345.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([100., 39.49, 183.54], dtype='float')
screenip_empty.ld50_bodyweight_tested_bird = pd.Series([200., 73.473, 395.485], dtype='float')
screenip_empty.mineau_scaling_factor = pd.Series([2., 1.5, 2.5], dtype='float')
result = screenip_empty.at_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_mamm(self):
"""
unittest for function screenip.at_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([11.89207, 214.0572, 412.6864], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_mammal_water) * ((self.bodyweight_tested_mammal / self.bodyweight_assessed_mammal)**0.25)
screenip_empty.ld50_mammal_water = pd.Series([10., 250., 500.], dtype='float')
screenip_empty.ld50_bodyweight_tested_mammal = pd.Series([200., 39.49, 183.54], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([100., 73.473, 395.485], dtype='float')
result = screenip_empty.at_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fi_bird(self):
"""
unittest for function screenip.fi_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results =
|
pd.Series([0.012999, 0.026578, 0.020412], dtype='float')
|
pandas.Series
|
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
|
Timestamp('2024-12-31')
|
pandas.Timestamp
|
import os
import pandas as pd
import numpy as np
import sys
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
datapath = os.path.join(datapath, 'covid_IDPH')
if __name__ == '__main__' :
filename = 'dash_EMS_trajectories_separate_sip_20200419.csv'
output_filename = '20200419_scen3_tracing_estimation_all.csv'
df = pd.read_csv(os.path.join(wdir,'simulation_output/_csv', filename))
df['date'] = pd.to_datetime(df['date'])
first_plot_day =
|
pd.Timestamp.today()
|
pandas.Timestamp.today
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import json
import numpy as np
from matplotlib import cm
def plot_history_ax(ax, history, mode, aggregation_type):
import seaborn as sns
def find_best(arr, ismin=True):
arr = np.array(arr)
if ismin:
best_loss_idx_train = np.where(arr == np.amin(arr))[0][0]
else:
best_loss_idx_train = np.where(arr == np.amax(arr))[0][0]
return best_loss_idx_train, arr[best_loss_idx_train]
sns.lineplot(data=history, ax=ax, x="epoch", y=mode + "_train", label='train', legend="brief")
sns.lineplot(data=history, ax=ax, x="epoch", y=mode + "_valid", label='validation', legend="brief")
if aggregation_type is not "all":
idx, val = find_best(history[mode + "_valid"], mode == 'loss')
ax.plot(idx, val, 'o', color='black')
if mode == 'loss':
ax.set_ylim(bottom=-0.001, top=0.5)
if mode == 'balanced_accuracy':
ax.set_ylim(bottom=-0.001, top=1.1)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels,
bbox_to_anchor=(0.5, -0.14),
loc='upper center',
ncol=2, fontsize=18
)
title=mode
if "_" in title:
title=title.replace("_", " ")
# ax.set_title(title)
ax.tick_params(axis="x", labelsize=18)
ax.tick_params(axis="y", labelsize=18)
plt.xlabel("epoch", fontsize=18)
plt.ylabel(title, fontsize=18)
def plot_results_ax(ax, results, columns):
def reshape_results(results, columns):
reshaped_columns = ["mode", "metric", "value"]
reshaped_df = pd.DataFrame(columns=reshaped_columns)
for col in list(results[columns].columns):
for idx, row in results.iterrows():
new_row = [[row["mode"], col, row[col]]]
row_df =
|
pd.DataFrame(new_row, columns=reshaped_columns)
|
pandas.DataFrame
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import unittest
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from mlos.Optimizers.RegressionModels.Prediction import Prediction
from mlos.Optimizers.RegressionModels.RegressionEnhancedRandomForestModel import \
RegressionEnhancedRandomForestRegressionModel, \
RegressionEnhancedRandomForestRegressionModelConfig
from mlos.Optimizers.RegressionModels.SklearnLassoRegressionModelConfig import SklearnLassoRegressionModelConfig
from mlos.Optimizers.RegressionModels.SklearnRandomForestRegressionModelConfig import\
SklearnRandomForestRegressionModelConfig
from mlos.Spaces import SimpleHypergrid, ContinuousDimension
import mlos.global_values as global_values
class TestRegressionEnhancedRandomForestRegressionModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
global_values.declare_singletons()
def setUp(self):
# Let's create a simple quadratic response function
self.input_space = SimpleHypergrid(
name="2d_X_search_domain",
dimensions=[
ContinuousDimension(name="x1", min=0.0, max=5.0),
ContinuousDimension(name="x2", min=0.0, max=5.0)
]
)
self.output_space = SimpleHypergrid(
name="degree2_polynomial",
dimensions=[
ContinuousDimension(name="degree2_polynomial_y", min=-10 ** 15, max=10 ** 15)
]
)
lasso_model_config = SklearnLassoRegressionModelConfig.DEFAULT
rf_model_config = SklearnRandomForestRegressionModelConfig.DEFAULT
self.model_config = \
RegressionEnhancedRandomForestRegressionModelConfig(
max_basis_function_degree=2,
min_abs_root_model_coef=0.02,
boosting_root_model_name=SklearnLassoRegressionModelConfig.__name__,
boosting_root_model_config=lasso_model_config,
random_forest_model_config=rf_model_config,
perform_initial_root_model_hyper_parameter_search=True,
perform_initial_random_forest_hyper_parameter_search=True)
# @unittest.expectedFailure # The configs don't belong to their respective config spaces
def test_lasso_feature_discovery(self):
rerf = RegressionEnhancedRandomForestRegressionModel(model_config=self.model_config,
input_space=self.input_space,
output_space=self.output_space)
num_x = 100
np.random.seed(17)
x = np.random.uniform(0, 5, [num_x, len(self.input_space.dimensions)])
x_df = pd.DataFrame(x, columns=['x1', 'x2'])
# y = 1 -3*X_1 -4*X_2 -0.5*X_1**2 -2*X_2**2
y_coef_true = np.array([1, -3, -4, -0.5, 0.0, -2.0])
poly_reg = PolynomialFeatures(degree=2)
poly_terms_x = poly_reg.fit_transform(x)
y = np.matmul(poly_terms_x, y_coef_true)
y_df = pd.DataFrame(y, columns=['degree2_polynomial_y'])
# fit model with same degree as true y
# rerf = RegressionEnhancedRandomForest(lasso_degree=2)
rerf.fit(x_df, y_df)
# test if expected non-zero terms were found
expected_fit_model_terms = {1, 2, 3, 5}
expected_symm_diff_found = expected_fit_model_terms - set(rerf.detected_feature_indices_)
num_diffs = len(list(expected_symm_diff_found))
assert num_diffs == 0
# @unittest.expectedFailure # The configs don't belong to their respective config spaces
def test_lasso_coefficients(self):
rerf = RegressionEnhancedRandomForestRegressionModel(
model_config=self.model_config,
input_space=self.input_space,
output_space=self.output_space
)
num_x = 1000
np.random.seed(23)
x = np.random.uniform(0, 5, [num_x, len(self.input_space.dimensions)])
x_df = pd.DataFrame(x, columns=['x1', 'x2'])
# y = 1 -3*X_1 -4*X_2 -0.5*X_1**2 -2*X_2**2
y_coef_true = np.array([1, -3, -4, -0.5, 0.0, -2.0])
poly_reg = PolynomialFeatures(degree=2)
poly_terms_x = poly_reg.fit_transform(x)
y = np.matmul(poly_terms_x, y_coef_true)
y_df = pd.DataFrame(y, columns=['degree2_polynomial_y'])
# fit model with same degree as true y
rerf.fit(x_df, y_df)
# test fit coef match known coef
epsilon = 10 ** -2
expected_non_zero_coef = y_coef_true[np.where(y_coef_true != 0.0)[0]]
fit_poly_coef = [rerf.base_regressor_.intercept_]
fit_poly_coef.extend(rerf.base_regressor_.coef_)
incorrect_terms = np.where(np.abs(fit_poly_coef - expected_non_zero_coef) > epsilon)[0]
num_incorrect_terms = len(incorrect_terms)
assert num_incorrect_terms == 0
# @unittest.expectedFailure # The configs don't belong to their respective config spaces
def test_polynomial_gradient(self):
print(self.model_config)
rerf = RegressionEnhancedRandomForestRegressionModel(model_config=self.model_config,
input_space=self.input_space,
output_space=self.output_space)
num_x = 100
np.random.seed(13)
x = np.random.uniform(0, 5, [num_x, len(self.input_space.dimensions)])
x_df =
|
pd.DataFrame(x, columns=['x1', 'x2'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from pathlib import Path
import pandas as pd, numpy as np
from itertools import combinations
from scipy.spatial.distance import pdist, squareform
from skbio import DistanceMatrix
from skbio.stats.distance import permanova
script_folder = Path.cwd()
outputs_folder = script_folder.parent / 'Outputs'
fname = outputs_folder / 'Seurat_integration_PCA_cell_embeddings.txt'
pca = pd.read_csv(fname, sep='\t', header=0, index_col=0, encoding='utf-8')
pca = pca.iloc[:, :18]
fname = outputs_folder / 'Seurat_integration_SNN_clusters.txt'
clusters = pd.read_csv(fname, sep='\t', header=0, index_col=0, encoding='utf-8')
fname = outputs_folder / 'WT_and_KO_cells_celltypes.txt'
celltypes =
|
pd.read_csv(fname, sep='\t', header=0, index_col=0, encoding='utf-8')
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
author: <NAME>
email: <EMAIL>
license: Apache License 2.0
"""
import numpy as np
import pandas as pd
import itertools
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
class Feature(object):
def __init__(self, data, target, features):
"""
Args:
data: A dataset you wanna play with.
target: The target name of your dataset.
features: The feature names of your dataset.
"""
self.data = data
self.target = target
self.features = features
self.dup = []
self.int_lst, self.float_lst, self.object_lst, self.non_obj = [], [], [], []
self.corr_lst = []
self.new_data_corr = pd.DataFrame()
self.result_COM = pd.DataFrame()
self.result_LDA =
|
pd.DataFrame()
|
pandas.DataFrame
|
import shutil
import warnings
import numpy as np
import pandas as pd
import plotly.express as px
import streamlit as st
import umap
from category_encoders.one_hot import OneHotEncoder
# from cyvcf2 import VCF
from MulticoreTSNE import MulticoreTSNE as TSNE
from sklearn.decomposition import PCA
from sklearn.impute import KNNImputer
from sklearn.neighbors import KNeighborsClassifier
from snps import SNPs
warnings.filterwarnings("ignore")
st.set_option('deprecation.showfileUploaderEncoding', False)
def main():
# Render the readme as markdown using st.markdown.
readme_text = st.markdown(get_file_content_as_string("intro.md"))
st.title("Project Video")
st.video(None)
# Once we have the dependencies, add a selector for the app mode on the sidebar.
st.sidebar.title("Model Settings")
# select which set of SNPs to explore
aisnp_set = st.sidebar.radio(
"Set of model type:",
("Facial classification", "Text classification"),
)
if aisnp_set == "Facial classification":
resize = st.sidebar.number_input(label="Enter an image resize size. Ex. 256", step=1, min_value=1, value=256)
crop = st.sidebar.number_input(label="Enter an image center crop size. Ex. 256 (enter 1 for None)", step=1,
min_value=1, value=256)
if crop > resize:
st.sidebar.error("crop size should be less than or equal to resize size")
normalize = st.sidebar.text_input(label="Enter your desired normalization in the format: [[0.5,0.5,0.5],[0.5,"
"0.5,0.5]]", value="[[0.5,0.5,0.5],[0.5,0.5,0.5]]")
elif aisnp_set == "Text classification":
tokenizer = st.sidebar.radio(
"Tokenizer:", ("T5", "Bert")
)
max_length = st.sidebar.number_input(label="Enter text max length", step=1,
min_value=1, value=256)
classes = st.sidebar.text_input(label="Enter the output classes in a comma separated ascending social class "
"order. Ex. \"1,2,0,3\" ")
# upload the file
user_file = st.sidebar.file_uploader("Upload your model in .pth format:")
# Collapsable user AISNPs DataFrame
if user_file is not None:
try:
with st.spinner("Uploading your model..."):
with open("user_snps_file.txt", "w") as file:
user_file.seek(0)
shutil.copyfileobj(user_file, file)
except Exception as e:
st.error(
f"Sorry, there was a problem processing your model file.\n {e}"
)
st.sidebar.button("Submit")
# filter and encode the user record
# user_record, aisnps_1kg = filter_user_genotypes(userdf, aisnps_1kg)
# user_encoded = encoder.transform(user_record)
# X_encoded = np.concatenate((X_encoded, user_encoded))
# del userdf
#
# # impute the user record and reduce the dimensions
# user_imputed = impute_missing(X_encoded)
# user_reduced = reducer.transform([user_imputed])
# # fit the knn before adding the user sample
# knn.fit(X_reduced, dfsamples[population_level])
#
# # concat the 1kg and user reduced arrays
# X_reduced = np.concatenate((X_reduced, user_reduced))
# dfsamples.loc["me"] = ["me"] * 3
#
# # plot
# plotly_3d = plot_3d(X_reduced, dfsamples, population_level)
# st.plotly_chart(plotly_3d, user_container_width=True)
#
# # predict the population for the user sample
# user_pop = knn.predict(user_reduced)[0]
# st.subheader(f"Your predicted {population_level}")
# st.text(f"Your predicted population using KNN classifier is {user_pop}")
# # show the predicted probabilities for each population
# st.subheader(f"Your predicted {population_level} probabilities")
# user_pop_probs = knn.predict_proba(user_reduced)
# user_probs_df = pd.DataFrame(
# [user_pop_probs[0]], columns=knn.classes_, index=["me"]
# )
# st.dataframe(user_probs_df)
#
# show_user_gts = st.sidebar.checkbox("Show Your Genotypes")
# if show_user_gts:
# user_table_title = "Genotypes of Ancestry-Informative SNPs in Your Sample"
# st.subheader(user_table_title)
# st.dataframe(user_record)
#
# else:
# # plot
# plotly_3d = plot_3d(X_reduced, dfsamples, population_level)
# st.plotly_chart(plotly_3d, user_container_width=True)
# Collapsable 1000 Genomes sample table
# show_1kg = st.sidebar.checkbox("Show 1k Genomes Genotypes")
# if show_1kg is True:
# table_title = (
# "Genotypes of Ancestry-Informative SNPs in 1000 Genomes Project Samples"
# )
# with st.spinner("Loading 1k Genomes DataFrame"):
# st.subheader(table_title)
# st.dataframe(aisnps_1kg)
# Render the readme as markdown using st.markdown.
readme_text = st.markdown(get_file_content_as_string("details.md"))
@st.cache
def get_file_content_as_string(mdfile):
"""Convenience function to convert file to string
:param mdfile: path to markdown
:type mdfile: str
:return: file contents
:rtype: str
"""
mdstring = ""
with open(mdfile, "r") as f:
for line in f:
mdstring += line
return mdstring
def get_1kg_samples():
"""Download the sample information for the 1000 Genomes Project
:return: DataFrame of sample-level population information
:rtype: pandas DataFrame
"""
onekg_samples = "data/integrated_call_samples_v3.20130502.ALL.panel"
dfsamples = pd.read_csv(onekg_samples, sep="\t")
dfsamples.set_index("sample", inplace=True)
dfsamples.drop(columns=["Unnamed: 4", "Unnamed: 5"], inplace=True)
dfsamples.columns = ["population", "super population", "gender"]
return dfsamples
@st.cache(show_spinner=True)
def encode_genotypes(df):
"""One-hot encode the genotypes
:param df: A DataFrame of samples with genotypes as columns
:type df: pandas DataFrame
:return: pandas DataFrame of one-hot encoded columns for genotypes and OHE instance
:rtype: pandas DataFrame, OneHotEncoder instance
"""
ohe = OneHotEncoder(cols=df.columns, handle_missing="return_nan")
X = ohe.fit_transform(df)
return pd.DataFrame(X, index=df.index), ohe
def dimensionality_reduction(X, algorithm="PCA"):
"""Reduce the dimensionality of the AISNPs
:param X: One-hot encoded 1kG AISNPs.
:type X: pandas DataFrame
:param algorithm: The type of dimensionality reduction to perform.
One of {PCA, UMAP, t-SNE}
:type algorithm: str
:returns: The transformed X DataFrame, reduced to 3 components by <algorithm>,
and the dimensionality reduction Transformer object.
"""
n_components = 3
if algorithm == "PCA":
reducer = PCA(n_components=n_components)
elif algorithm == "t-SNE":
reducer = TSNE(n_components=n_components, n_jobs=4)
elif algorithm == "UMAP":
reducer = umap.UMAP(
n_components=n_components, min_dist=0.2, metric="dice", random_state=42
)
else:
return None, None
X_reduced = reducer.fit_transform(X.values)
return pd.DataFrame(X_reduced, columns=["x", "y", "z"], index=X.index), reducer
@st.cache(show_spinner=True)
def filter_user_genotypes(userdf, aisnps_1kg):
"""Filter the user's uploaded genotypes to the AISNPs
:param userdf: The user's DataFrame from SNPs
:type userdf: pandas DataFrame
:param aisnps_1kg: The DataFrame containing snps for the 1kg project samples
:type aisnps_1kg: pandas DataFrame
:return: The user's DataFrame of AISNPs as columns, The 1kg DataFrame with user appended
:rtype: pandas DataFrame
"""
user_record =
|
pd.DataFrame(index=["your_sample"], columns=aisnps_1kg.columns)
|
pandas.DataFrame
|
#####
import matplotlib
matplotlib.use('Agg') ## set backend here
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42 # this keeps most text as actual text in PDFs, not outlines
import sys
import os
import math
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import seaborn as sns
import pandas as pd
from Bio import SeqIO
from Bio.Seq import Seq
import argparse
import importlib
|
pd.set_option('display.max_columns', 40)
|
pandas.set_option
|
'''
.plot() has several optional parameters. Most notably, the kind parameter accepts eleven different string values and determines which kind of plot you’ll create:
"area" is for area plots.
"bar" is for vertical bar charts.
"barh" is for horizontal bar charts.
"box" is for box plots.
"hexbin" is for hexbin plots.
"hist" is for histograms.
"kde" is for kernel density estimate charts.
"density" is an alias for "kde".
"line" is for line graphs.
"pie" is for pie charts.
"scatter" is for scatter plots.
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#import altair as alt
# import data
shelters = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-12-01/shelters.csv')
# see summary of data
shelters.info()
# convert datetime column
shelters['occupancy_date'] =
|
pd.to_datetime(shelters['occupancy_date'])
|
pandas.to_datetime
|
import os
import numpy as np
import pandas as pd
import plotnine
from plotnine import * # Provides a ggplot-like interface to matplotlib.
## CHANGE THESE AS NEEDED - default parameter values for snippets.
DATASET = 'aou-res-curation-output-prod.R2019Q1R2'
MEASUREMENT_OF_INTEREST = 'hemoglobin'
# Tip: the next four parameters could be set programmatically using one row from
# the result of measurements_of_interest_summary.sql
MEASUREMENT_CONCEPT_ID = 3000963 # Hemoglobin
UNIT_CONCEPT_ID = 8713 # gram per deciliter
MEASUREMENT_NAME = '<this should be the measurement name>'
UNIT_NAME = '<this should be the unit name>'
## BigQuery setup.
BILLING_PROJECT_ID = os.environ['GOOGLE_PROJECT']
## Plot setup.
theme_set(theme_minimal()) # Default theme for plots.
def get_boxplot_fun_data(df):
"""Returns a data frame with a y position and a label, for use annotating ggplot boxplots.
Args:
d: A data frame.
Returns:
A data frame with column y as max and column label as length.
"""
d = {'y': max(df), 'label': f'N = {len(df)}'}
return(
|
pd.DataFrame(data=d, index=[0])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression # OLS モデルの構築に使用
dataset =
|
pd.read_csv('resin.csv', index_col=0, header=0)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from math import pi, sqrt, exp
import time
import os
os.sys.path.append(os.path.dirname(os.path.abspath('.'))); from meta.Console import console
# Some methods extracted from:
# Mechanisms of metal-silicate equilibration in the terrestrial magma ocean
# <NAME> a;, <NAME> b, <NAME> a, <NAME> a, <NAME> b
class gravity:
"""
Calculates the gravity a body is subjected to as a function of depth.
"""
def __init__(self):
pass
class move_particle:
def __init__(self, body_type, system_params):
self.body_type = body_type
self.system_params = system_params
def viscosity(self, material, pressure, temperature):
"""
A calculation of viscosity using the diffusion coefficient. Diffusion is an act of Gibbs Free Energy minimization,
where atoms diffuse down a concentration gradient to minimum energy configuration. Diffusion is related to
the viscosity of the material.
:param material: the name of the material, as listed in 'physical_parameters.csv' in this file's working directory
:return: viscosity, Pa*s = (N*s)/m^2=kg/(s*m)
"""
material_properties =
|
pd.read_csv("dynamics/physical_parameters.csv", index_col='Material')
|
pandas.read_csv
|
# app/robo_advisor.py
import requests
import json
import datetime
import csv
from dotenv import load_dotenv
import os
#import statistics
import pandas as pd
load_dotenv()
API_KEY = os.environ["ALPHAVANTAGE_API_KEY"]
#using requests package to access the API
####if symbol numeric/inappropriate length prompt "Oh, expecting a properly-formed stock symbol like 'MSFT'. Please try again."
def get_response(symbol):
request_url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={input_symbol}&outputsize=compact&apikey={API_KEY}"
response = requests.get(request_url)
parsed_response = json.loads(response.text)
if input_symbol.isdigit():
print("Oh, input symbol shouldn't be a number, enter a stock symbol like 'MSFT'. Please try again.")
exit()
elif "Error Message" in parsed_response:
print("Sorry, couldn't find any trading data for that symbol")
exit()
return parsed_response
#1998-12-23': {'1. open': '140.3800', '2. high': '143.8100', '3. low': '139.3800', '4. close': '143.5600', '5. volume': '8735000'}
def transform_response(parsed_response):
x=parsed_response["Time Series (Daily)"]
rows = []
for date, daily_prices in x.items():
row = {
"timestamp": date,
"open": float(daily_prices["1. open"]),
"high": float(daily_prices["2. high"]),
"low": float(daily_prices["3. low"]),
"close": float(daily_prices["4. close"]),
"volume": int(daily_prices["5. volume"])
}
rows.append(row)
return rows
def write_to_csv(rows, csa_filepath):
csv_headers = ["timestamp", "open", "high", "low", "close", "volume"]
with open(csv_filepath, "w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=csv_headers)
writer.writeheader() # uses fieldnames set above
for row in tsd:
writer.writerow(row)
return True
def to_usd(my_price):
# utility function to convert float or integer to usd-formatted string (for printing)
return "${0:,.2f}".format(my_price) #> $12,000.71
#main program
if __name__ == "__main__":
time_now = datetime.datetime.now()
input_symbol = input("Please specify stock symbol (e.g AMZN) and press enter: ")
parsed_response = get_response(input_symbol)
tsd = transform_response(parsed_response)
latest_day = tsd[0]['timestamp']
last_close = tsd[0]['close']
high =[]
low =[]
for p in tsd:
high.append(p['high'])
low.append(p['low'])
recent_high = max(high)
recent_low = min(low)
#BUY-SELL Logic
df_high = pd.DataFrame(high)
df_low =
|
pd.DataFrame(low)
|
pandas.DataFrame
|
# This script creates the instrument for IV for the road networks and pollution project
# Importing required modules
import pandas as pd
from glob import glob
import re
# Defining username and filepaths
username = ''
filepath = 'C:/Users/' + username + '/Documents/Data/road_networks/raw_closures_data/'
# Initializing data structures
df = pd.DataFrame()
dates = []
ids = []
lanes_affected = []
severity = []
coords = []
locations = []
binvals = []
countvals = []
# Defining a list of all files to read
to_read = [x[len(x)-13:len(x)-4] for x in glob(filepath + '*')]
# Main loop
for read_it in to_read:
# Visualize progress
print(read_it)
# Read in the file
with open(filepath + read_it + '.txt') as f:
text = f.readlines()
text = text[0]
# Initializing lists for storing data for this file
# Retrieve the data from the file
flag = True
while flag == True:
start_id = text.find('orci:datagateway_oid')
if start_id == -1:
flag = False
else:
dates.append(read_it[:4])
text = text[start_id+23:]
end1 = text.find('"')
ids.append(text[:end1])
start_id = text.find('orci:lanes_affected')
text = text[start_id+22:]
end1 = text.find('"')
lanes_affected.append(text[:end1])
start_id = text.find('orci:severity')
text = text[start_id+16:]
end1 = text.find('"')
severity.append(text[:end1])
start_id = text.find('gml:pos')
text = text[start_id+10:]
end1 = text.find('"')
coords.append(text[:end1])
start_id = text.find('orci:route_name')
text = text[start_id+17:]
end1 = text.find('(')
end2 = text.find(')')
locations.append(text[end1+1:end2])
# Cleaning locations data
counties = ['AlbemarleCounty', 'ArlingtonCounty', 'CarolineCounty', 'CarrollCounty',
'CharlesCityCounty', 'ChesterfieldCounty', 'CulpeperCounty', 'FairfaxCounty',
'FauquierCounty', 'FrederickCounty', 'GilesCounty', 'HanoverCounty',
'HenricoCounty', 'KingWilliamCounty', 'LoudounCounty', 'MadisonCounty',
'PageCounty', 'PrinceEdwardCounty', 'PrinceWilliamCounty', 'RoanokeCounty',
'RockbridgeCounty', 'RockinghamCounty', 'StaffordCounty', 'WarrenCounty', 'WytheCounty']
cities = ['Alexandria', 'Fredericksburg', 'Hampton', 'Hopewell', 'Lynchburg', 'NewportNews',
'Norfolk', 'Richmond', 'Roanoke', 'Salem', 'Suffolk', 'VirginiaBeach', 'Winchester']
def cc_fx(string):
done = False
while done == False:
for c in counties + cities + ['termination_clause']:
if c in string:
new_string = c
done = True
break
elif c == 'termination_clause':
new_string = ''
done = True
return new_string
locs = [re.sub('[^a-zA-Z]+', '', l) for l in locations]
locations = [cc_fx(l) for l in locs]
# Making a dataframe
dates = pd.Series(dates, name = 'Date')
ids = pd.Series(ids, name = 'CLOSURE_ID')
lanes_affected = pd.Series(lanes_affected, name = 'Lanes_Affected')
severity = pd.Series(severity, name = 'Severity')
coords = pd.Series(coords, name = 'Coordinates')
locations = pd.Series(locations, name = 'Locations')
df = pd.concat([df, ids, dates, coords, locations, lanes_affected, severity], axis = 1)
# Cleaning the data and removing duplicates
dup_id = [df.CLOSURE_ID[x] + df.Date[x] for x in range(len(df))]
dup_id = pd.Series(dup_id, name = 'dup_id')
df = pd.concat([df, dup_id], axis = 1)
df = df.drop_duplicates(subset = 'dup_id', keep = 'first').reset_index(drop = True)
df = df.drop(columns = ['dup_id'])
df = df[df.Locations != ''].reset_index(drop = True)
# Creating the true IV variables
days = list(df.Date.unique())
final_col_names = [c + '_bin' for c in counties + cities] + [c + '_count' for c in counties + cities] + [c + '_lanes' for c in counties + cities]
iv = pd.DataFrame()
for cat in range(3):
for c in counties + cities:
col = []
tmp = df[df.Locations == c]
for d in days:
tmpx = tmp[tmp.Date == d]
if cat == 0:
col.append(int(len(tmpx) > 0))
elif cat == 1:
col.append(len(tmpx))
else:
la = [int(x) if '-' not in x else 0 for x in list(tmpx.Lanes_Affected)]
col.append(sum(la))
iv = pd.concat([iv,
|
pd.Series(col)
|
pandas.Series
|
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(
|
TS('2015-01-04')
|
pandas.Timestamp
|
import datetime as dt
import pytest
from distutils.version import LooseVersion
import numpy as np
try:
import pandas as pd
from pandas._testing import (
makeCustomDataframe, makeMixedDataFrame, makeTimeDataFrame
)
except ImportError:
pytestmark = pytest.mark.skip('pandas not available')
from bokeh.models.widgets.tables import (
NumberFormatter, IntEditor, NumberEditor, StringFormatter,
SelectEditor, DateFormatter, DataCube, CellEditor,
SumAggregator, AvgAggregator, MinAggregator
)
from panel.depends import bind
from panel.widgets import Button, DataFrame, Tabulator, TextInput
pd_old = pytest.mark.skipif(LooseVersion(pd.__version__) < '1.3',
reason="Requires latest pandas")
def test_dataframe_widget(dataframe, document, comm):
table = DataFrame(dataframe)
model = table.get_root(document, comm)
index_col, int_col, float_col, str_col = model.columns
assert index_col.title == 'index'
assert isinstance(index_col.formatter, NumberFormatter)
assert isinstance(index_col.editor, CellEditor)
assert int_col.title == 'int'
assert isinstance(int_col.formatter, NumberFormatter)
assert isinstance(int_col.editor, IntEditor)
assert float_col.title == 'float'
assert isinstance(float_col.formatter, NumberFormatter)
assert isinstance(float_col.editor, NumberEditor)
assert str_col.title == 'str'
assert isinstance(float_col.formatter, StringFormatter)
assert isinstance(float_col.editor, NumberEditor)
def test_dataframe_widget_no_show_index(dataframe, document, comm):
table = DataFrame(dataframe, show_index=False)
model = table.get_root(document, comm)
assert len(model.columns) == 3
int_col, float_col, str_col = model.columns
assert int_col.title == 'int'
assert float_col.title == 'float'
assert str_col.title == 'str'
table.show_index = True
assert len(model.columns) == 4
index_col, int_col, float_col, str_col = model.columns
assert index_col.title == 'index'
assert int_col.title == 'int'
assert float_col.title == 'float'
assert str_col.title == 'str'
def test_dataframe_widget_datetimes(document, comm):
table = DataFrame(makeTimeDataFrame())
model = table.get_root(document, comm)
dt_col, _, _, _, _ = model.columns
assert dt_col.title == 'index'
assert isinstance(dt_col.formatter, DateFormatter)
assert isinstance(dt_col.editor, CellEditor)
def test_dataframe_editors(dataframe, document, comm):
editor = SelectEditor(options=['A', 'B', 'C'])
table = DataFrame(dataframe, editors={'str': editor})
model = table.get_root(document, comm)
model_editor = model.columns[-1].editor
assert isinstance(model_editor, SelectEditor) is not editor
assert isinstance(model_editor, SelectEditor)
assert model_editor.options == ['A', 'B', 'C']
def test_dataframe_formatter(dataframe, document, comm):
formatter = NumberFormatter(format='0.0000')
table = DataFrame(dataframe, formatters={'float': formatter})
model = table.get_root(document, comm)
model_formatter = model.columns[2].formatter
assert model_formatter is not formatter
assert isinstance(model_formatter, NumberFormatter)
assert model_formatter.format == formatter.format
def test_dataframe_triggers(dataframe):
events = []
def increment(event, events=events):
events.append(event)
table = DataFrame(dataframe)
table.param.watch(increment, 'value')
table._process_events({'data': {'str': ['C', 'B', 'A']}})
assert len(events) == 1
def test_dataframe_does_not_trigger(dataframe):
events = []
def increment(event, events=events):
events.append(event)
table = DataFrame(dataframe)
table.param.watch(increment, 'value')
table._process_events({'data': {'str': ['A', 'B', 'C']}})
assert len(events) == 0
def test_dataframe_selected_dataframe(dataframe):
table = DataFrame(dataframe, selection=[0, 2])
pd.testing.assert_frame_equal(table.selected_dataframe, dataframe.iloc[[0, 2]])
def test_dataframe_process_selection_event(dataframe):
table = DataFrame(dataframe, selection=[0, 2])
table._process_events({'indices': [0, 2]})
pd.testing.assert_frame_equal(table.selected_dataframe, dataframe.iloc[[0, 2]])
def test_dataframe_process_data_event(dataframe):
df = dataframe.copy()
table = DataFrame(dataframe, selection=[0, 2])
table._process_events({'data': {'int': [5, 7, 9]}})
df['int'] = [5, 7, 9]
pd.testing.assert_frame_equal(table.value, df)
table._process_events({'data': {'int': {1: 3, 2: 4, 0: 1}}})
df['int'] = [1, 3, 4]
pd.testing.assert_frame_equal(table.value, df)
def test_dataframe_duplicate_column_name(document, comm):
df = pd.DataFrame([[1, 1], [2, 2]], columns=['col', 'col'])
with pytest.raises(ValueError):
table = DataFrame(df)
df = pd.DataFrame([[1, 1], [2, 2]], columns=['a', 'b'])
table = DataFrame(df)
with pytest.raises(ValueError):
table.value = table.value.rename(columns={'a': 'b'})
df = pd.DataFrame([[1, 1], [2, 2]], columns=['a', 'b'])
table = DataFrame(df)
table.get_root(document, comm)
with pytest.raises(ValueError):
table.value = table.value.rename(columns={'a': 'b'})
def test_hierarchical_index(document, comm):
df = pd.DataFrame([
('Germany', 2020, 9, 2.4, 'A'),
('Germany', 2021, 3, 7.3, 'C'),
('Germany', 2022, 6, 3.1, 'B'),
('UK', 2020, 5, 8.0, 'A'),
('UK', 2021, 1, 3.9, 'B'),
('UK', 2022, 9, 2.2, 'A')
], columns=['Country', 'Year', 'Int', 'Float', 'Str']).set_index(['Country', 'Year'])
table = DataFrame(value=df, hierarchical=True,
aggregators={'Year': {'Int': 'sum', 'Float': 'mean'}})
model = table.get_root(document, comm)
assert isinstance(model, DataCube)
assert len(model.grouping) == 1
grouping = model.grouping[0]
assert len(grouping.aggregators) == 2
agg1, agg2 = grouping.aggregators
assert agg1.field_ == 'Int'
assert isinstance(agg1, SumAggregator)
assert agg2.field_ == 'Float'
assert isinstance(agg2, AvgAggregator)
table.aggregators = {'Year': 'min'}
agg1, agg2 = grouping.aggregators
print(grouping)
assert agg1.field_ == 'Int'
assert isinstance(agg1, MinAggregator)
assert agg2.field_ == 'Float'
assert isinstance(agg2, MinAggregator)
def test_none_table(document, comm):
table = DataFrame(value=None)
assert table.indexes == []
model = table.get_root(document, comm)
assert model.source.data == {}
def test_tabulator_selected_dataframe():
df = makeMixedDataFrame()
table = Tabulator(df, selection=[0, 2])
|
pd.testing.assert_frame_equal(table.selected_dataframe, df.iloc[[0, 2]])
|
pandas.testing.assert_frame_equal
|
import unittest
import pandas as pd
import numpy as np
from autopandas_v2.ml.featurization.featurizer import RelationGraph
from autopandas_v2.ml.featurization.graph import GraphEdge, GraphEdgeType, GraphNodeType, GraphNode
from autopandas_v2.ml.featurization.options import GraphOptions
get_node_type = GraphNodeType.get_node_type
class TestRelationGraphFeaturizer(unittest.TestCase):
def test_basic_max(self):
input_df = pd.DataFrame([[1, 2], [2, 3], [2, 0]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
input_20 = GraphNode("I0", '[2,0]', get_node_type(input_df.iat[2, 0]))
input_21 = GraphNode("I0", '[2,1]', get_node_type(input_df.iat[2, 1]))
output_df =
|
pd.DataFrame([[2, 3]])
|
pandas.DataFrame
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training scripts for MolTrans backbone."""
from load_data_pairwise import *
from itertools import combinations
import itertools
from random import *
import paddle
from paddle.io import Dataset
import pdb
import paddle.nn.functional as F
import paddle.nn as nn
import paddle.distributed as dist
from lifelines.utils import concordance_index
import functools
import random
import time
from double_towers import MolTransModel
import numpy as np
import pandas as pd
from preprocess import drug_encoder, target_encoder
import argparse
print = functools.partial(print, flush=True)
# from config import *
np.random.seed(1)
paddle.seed(88)
def group_by(data, qid_index):
"""
group documents by query-id
:param data: input_data which contains multiple query and corresponding documents
:param qid_index: the column num where qid locates in input data
:return: a dict group by qid
"""
qid_doc_map = {}
idx = 0
#print(data)
for record in data:
#print(type(record[qid_index]))
qid_doc_map.setdefault(record[qid_index], [])
qid_doc_map[record[qid_index]].append(idx)
idx += 1
return qid_doc_map
def sample_index(pairs,sampling_method = None):
'''
pairs: the score pairs for train or test
return:
index of x1 and x2
'''
x1_index = []
x2_index = []
for i_data in pairs:
if sampling_method == '500 times':
sampled_data = pd.DataFrame(i_data).sample(n=500,replace=True)
if sampling_method == None:
sampled_data = pd.DataFrame(i_data)
x1_index.append(sampled_data.iloc[:,0].values)
x2_index.append(sampled_data.iloc[:,1].values)
return x1_index, x2_index
def get_pairs(scores,K,eps=0.2,seed=0):
"""
compute the ordered pairs whose firth doc has a higher value than second one.
:param scores: given score list of documents for a particular query
:param K: times of sampling
:return: ordered pairs. List of tuple, like [(1,2), (2,3), (1,3)]
"""
pairs = []
random.seed(seed)
for i in range(len(scores)):
#for j in range(len(scores)):
# sampling K times
for _ in range(K):
idx = random.randint(0, len(scores) - 1)
score_diff = float(scores[i]) - float(scores[idx])
if abs(score_diff) > eps:
pairs.append((i, idx, score_diff, len(scores)))
return pairs
def split_pairs(order_pairs, true_scores):
"""
split the pairs into two list, named relevant_doc and irrelevant_doc.
relevant_doc[i] is prior to irrelevant_doc[i]
:param order_pairs: ordered pairs of all queries
:param ture_scores: scores of docs for each query
:return: relevant_doc and irrelevant_doc
"""
relevant_doc = []
irrelevant_doc = []
score_diff = []
N_smiles = []
doc_idx_base = 0
query_num = len(order_pairs)
for i in range(query_num):
pair_num = len(order_pairs[i])
docs_num = len(true_scores[i])
for j in range(pair_num):
d1, d2, score, N = order_pairs[i][j]
d1 += doc_idx_base
d2 += doc_idx_base
relevant_doc.append(d1)
irrelevant_doc.append(d2)
score_diff.append(score)
N_smiles.append(N)
doc_idx_base += docs_num
return relevant_doc, irrelevant_doc, score_diff, N_smiles
def filter_pairs(data,order_paris,threshold):
# filterred the pairs which have score diff less than 0.2
order_paris_filtered = []
for i_pairs in order_paris:
pairs1_score = data[
|
pd.DataFrame(i_pairs)
|
pandas.DataFrame
|
###/usr/bin/env python
### coding: utf-8
import pandas as pd
import yfinance as yf
import investpy
import numpy as np
df_main = pd.read_excel(r'RawData.xlsx')
df_main = df_main [:-2] #remove last 2 rows so that data is able to update even when there are no new rows. This is ensure the code runs when there is a transfer from GEM to main board.
### Gather data & clean from IPO
page="http://www.aastocks.com/en/stocks/market/ipo/listedipo.aspx?s=3&o=0&page=" + str (1)
dfs = pd.read_html(page)
df = dfs [16]
df = df [:-3]
df = df.iloc [:,1:]
name = df.columns [0]
df2 = df [name]
df2 = df2.map(lambda x: x.rstrip('Sink Below Listing Price'))
df_code = df2.map(lambda x: x[-7:])
df_name = df2.map(lambda x: x[:-8])
df [name] = df_code
df.insert(0, 'Name', df_name)
df = df.rename(columns = {name:'Code'})
df_IPO = df[~df['Code'].isin(df_main['Code'])]
### Gather sponsor data
page= 'http://www.aastocks.com/en/stocks/market/ipo/sponsor.aspx?s=1&o=0&s2=0&o2=0&f1=&f2=&page=' + str(1) + '#sponsor'
dfs = pd.read_html(page)
df = dfs [17]
df = df.iloc[:-2,0:7]
df ['Name▼ / Code▼'] = df_code
df.insert(0, 'Name', df_name)
df = df.rename(columns = {'Name▼ / Code▼':'Code'})
df_sponsor = df[df['Code'].isin(df_IPO['Code'])]
df_sponsor = df_sponsor.drop(columns = ['Name/Code', 'List Date', 'Acc. % Chg.▼', '% Chg. onDebut1▼', 'Name' ],axis = 1)
### merge newly gathered data
df_new = df_IPO.merge(df_sponsor, on = ['Code'], how = 'left')
df_new = df_new.rename( columns={'Industry':'AA Stocks Industry'})
### gather Chinese name data
page="http://www.aastocks.com/sc/stocks/market/ipo/listedipo.aspx?s=3&o=0&page=" + str (1)
dfs =
|
pd.read_html(page)
|
pandas.read_html
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
import pandas.compat as compat
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex, DatetimeIndex, Float64Index, Index, Int64Index,
IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex,
UInt64Index, isna)
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'nbytes']
def setup_indices(self):
for name, idx in self.indices.items():
setattr(self, name, idx)
def test_pickle_compat_construction(self):
# need an object to create with
msg = (r"Index\(\.\.\.\) must be called with a collection of some"
r" kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)")
with pytest.raises(TypeError, match=msg):
self._holder()
def test_to_series(self):
# assert that we are creating a copy of the index
idx = self.create_index()
s = idx.to_series()
assert s.values is not idx.values
assert s.index is not idx
assert s.name == idx.name
def test_to_series_with_arguments(self):
# GH18699
# index kwarg
idx = self.create_index()
s = idx.to_series(index=idx)
assert s.values is not idx.values
assert s.index is idx
assert s.name == idx.name
# name kwarg
idx = self.create_index()
s = idx.to_series(name='__test')
assert s.values is not idx.values
assert s.index is not idx
assert s.name != idx.name
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name):
# see GH-15230, GH-22580
idx = self.create_index()
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_to_frame_datetime_tz(self):
# GH 25809
idx = pd.date_range(start='2019-01-01', end='2019-01-30', freq='D')
idx = idx.tz_localize('UTC')
result = idx.to_frame()
expected = pd.DataFrame(idx, index=idx)
tm.assert_frame_equal(result, expected)
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
msg = "Not supported for type {}".format(type(idx).__name__)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = 'foo'
result = pd.Index(expected)
tm.assert_index_equal(result, expected)
result = pd.Index(expected, name='bar')
expected.name = 'bar'
tm.assert_index_equal(result, expected)
else:
expected.names = ['foo', 'bar']
result = pd.Index(expected)
tm.assert_index_equal(
result, Index(Index([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'),
names=['foo', 'bar']))
result = pd.Index(expected, names=['A', 'B'])
tm.assert_index_equal(
result,
Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')],
dtype='object'), names=['A', 'B']))
def test_numeric_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match="cannot perform __mul__"):
idx * 1
with pytest.raises(TypeError, match="cannot perform __rmul__"):
1 * idx
div_err = "cannot perform __truediv__"
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = div_err.replace(' __', ' __r')
with pytest.raises(TypeError, match=div_err):
1 / idx
with pytest.raises(TypeError, match="cannot perform __floordiv__"):
idx // 1
with pytest.raises(TypeError, match="cannot perform __rfloordiv__"):
1 // idx
def test_logical_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match='cannot perform all'):
idx.all()
with pytest.raises(TypeError, match='cannot perform any'):
idx.any()
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
with pytest.raises(ValueError, match='The truth value of a'):
if idx:
pass
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match='Invalid fill method'):
idx.get_indexer(idx, method='invalid')
def test_get_indexer_consistency(self):
# See GH 16819
for name, index in self.indices.items():
if isinstance(index, IntervalIndex):
continue
if index.is_unique or isinstance(index, CategoricalIndex):
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=e):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert '...' not in str(idx)
def test_copy_name(self):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
for name, index in compat.iteritems(self.indices):
if isinstance(index, MultiIndex):
continue
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_ensure_copied_data(self):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
for name, index in compat.iteritems(self.indices):
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs['freq'] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
continue
index_type = index.__class__
result = index_type(index.values, copy=True, **init_kwargs)
tm.assert_index_equal(index, result)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='copy')
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False,
**init_kwargs)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='same')
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
def test_memory_usage(self):
for name, index in compat.iteritems(self.indices):
result = index.memory_usage()
if len(index):
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_argsort(self):
for k, ind in self.indices.items():
# separately tested
if k in ['catIndex']:
continue
result = ind.argsort()
expected = np.array(ind).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
for k, ind in self.indices.items():
result = np.argsort(ind)
expected = ind.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(ind), (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, kind='mergesort')
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, order=('a', 'b'))
def test_take(self):
indexer = [4, 3, 0, 2]
for k, ind in self.indices.items():
# separate
if k in ['boolIndex', 'tuples', 'empty']:
continue
result = ind.take(indexer)
expected = ind[indexer]
assert result.equals(expected)
if not isinstance(ind,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
with pytest.raises(AttributeError):
ind.freq
def test_take_invalid_kwargs(self):
idx = self.create_index()
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode='clip')
def test_repeat(self):
rep = 2
i = self.create_index()
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(i, rep, axis=0)
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
result = i.where(klass(cond))
expected = i
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(i[1:])
expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize("method", ["intersection", "union",
"difference", "symmetric_difference"])
def test_set_ops_error_cases(self, case, method):
for name, idx in
|
compat.iteritems(self.indices)
|
pandas.compat.iteritems
|
import pandas as pd
import numpy as np
import datetime
import calendar
from math import e
from brightwind.analyse import plot as plt
# noinspection PyProtectedMember
from brightwind.analyse.analyse import dist_by_dir_sector, dist_12x24, coverage, _convert_df_to_series
from ipywidgets import FloatProgress
from IPython.display import display
from IPython.display import clear_output
import re
import warnings
pd.options.mode.chained_assignment = None
__all__ = ['Shear']
class Shear:
class TimeSeries:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', max_plot_height=None,
maximise_data=False):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for each timestamp
of a wind series.
:param wspds: pandas DataFrame, list of pandas.Series or list of wind speeds to be used for calculating shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights.
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param max_plot_height: height to which the wind profile plot is extended.
:type max_plot_height: float
:param maximise_data: If maximise_data is True, calculations will be carried out on all data where two or
more anemometers readings exist for a timestamp. If False, calculations will only be
carried out on timestamps where readings exist for all anemometers.
:type maximise_data: Boolean
:return TimeSeries object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype TimeSeries object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights, maximise_data=True)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law',
max_plot_height=120)
# Get the alpha or roughness values calculated
timeseries_power_law.alpha
timeseries_log_law.roughness
# View plot
timeseries_power_law.plot
timeseries_log_law.plot
# View input anemometer data
timeseries_power_law.wspds
timeseries_log_law.wspds
# View other information
pprint.pprint(timeseries_power_law.info)
pprint.pprint(timeseries_log_law.info)
"""
print('This may take a while...')
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed, maximise_data=maximise_data)
if calc_method == 'power_law':
alpha_c = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_power_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
alpha = pd.Series(alpha_c.iloc[:, 0], name='alpha')
self._alpha = alpha
elif calc_method == 'log_law':
slope_intercept = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_log_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
slope = slope_intercept.iloc[:, 0]
intercept = slope_intercept.iloc[:, 1]
roughness_coefficient = pd.Series(Shear._calc_roughness(slope=slope, intercept=intercept),
name='roughness_coefficient')
self._roughness = roughness_coefficient
clear_output()
avg_plot = Shear.Average(wspds=wspds, heights=heights, calc_method=calc_method,
max_plot_height=max_plot_height)
self.origin = 'TimeSeries'
self.calc_method = calc_method
self.wspds = wspds
self.plot = avg_plot.plot
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
""""
Applies shear calculated to a wind speed time series and scales wind speed from one height to
another for each matching timestamp.
:param self: TimeSeries object to use when applying shear to the data.
:type self: TimeSeries object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled to.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law')
# Scale wind speeds using calculated exponents
timeseries_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
timeseries_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class TimeOfDay:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', by_month=True, segment_start_time=7,
segments_per_day=24, plot_type='line'):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for a wind series
binned by time of the day and (optionally by) month, depending on the user's inputs. The alpha/roughness
coefficient values are calculated based on the average wind speeds at each measurement height in each bin.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights..
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param by_month: If True, calculate alpha or roughness coefficient values for each daily segment and month.
If False, average alpha or roughness coefficient values are calculated for each daily
segment across all months.
:type by_month: Boolean
:param segment_start_time: Starting time for first segment.
:type segment_start_time: int
:param segments_per_day: Number of segments into which each 24 period is split. Must be a divisor of 24.
:type segments_per_day: int
:param plot_type: Type of plot to be generated. Options include 'line', 'step' and '12x24'.
:type plot_type: str
:return: TimeOfDay object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype: TimeOfDay object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeofday_power_law = bw.Shear.TimeOfDay(anemometers, heights, daily_segments=2, segment_start_time=7)
timeofday_log_law = bw.Shear.TimeOfDay(anemometers, heights, calc_method='log_law', by_month=False)
# Get alpha or roughness values calculated
timeofday_power_law.alpha
timeofday_log_law.roughness
# View plot
timeofday_power_law.plot
timeofday_log_law.plot
# View input data
timeofday_power_law.wspds
timeofday_log_law.wspds
# View other information
pprint.pprint(timeofday_power_law.info)
pprint.pprint(timeofday_log_law.info)
"""
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
# initialise empty series for later use
start_times =
|
pd.Series([])
|
pandas.Series
|
import sys
import numpy as np
import pandas as pd
from napari_clusters_plotter._utilities import (
add_column_to_layer_tabular_data,
get_layer_tabular_data,
set_features,
)
sys.path.append("../")
def test_feature_setting(make_napari_viewer):
viewer = make_napari_viewer()
label = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 2, 2, 2],
[3, 3, 0, 0, 0, 0, 0],
[0, 0, 4, 4, 0, 5, 5],
[6, 6, 6, 6, 0, 5, 0],
[0, 7, 7, 0, 0, 0, 0],
]
)
label_layer = viewer.add_labels(label)
some_features =
|
pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
|
pandas.DataFrame
|
import json
import requests
from datetime import datetime
import talib
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_finance import candlestick_ohlc
from matplotlib.pylab import date2num
from matplotlib.dates import AutoDateLocator, DateFormatter
# 超时时间
MARKET_TIMEOUT = 120
class PaperTrading():
"""模拟交易"""
def __init__(self, url: str = "", port: str = "", token: str = None, info: dict = None):
"""构造函数"""
if url and port:
self.home = ':'.join([url, port])
else:
raise ConnectionError("地址或者端口不能为空")
# 连接模拟市场
result, msg = self.connect()
if not result:
raise ConnectionError(msg)
if token:
# 账户登录
status, account = self.login(token)
if status:
# 账户绑定
self.account_bind(account)
else:
raise ValueError("账户不存在")
else:
# 账户创建并登录
status, account = self.creat(info)
if status:
# 账户绑定
self.account_bind(account)
else:
raise ValueError(account)
@property
def token(self):
"""获取token"""
return self.__token
@property
def captial(self):
"""获取账户起始资本"""
return self.__capital
def get_url(self, method_name:str):
"""生成url"""
return "/".join([self.home, method_name])
def connect(self):
"""连接模拟交易程序"""
url = self.get_url("")
r = requests.get(url, timeout=MARKET_TIMEOUT)
if r.status_code == requests.codes.ok:
return True, ""
else:
return False, "模拟交易连接失败"
def url_request(func):
"""请求函数的装饰器"""
def wrapper(self, *args, **kwargs):
if not self.connect():
return False, "模拟交易服务连接失败"
r = func(self, *args, **kwargs)
if r.status_code == requests.codes.ok:
d = json.loads(r.text)
if d["status"]:
return True, d["data"]
else:
return False, d["data"]
else:
return False, "请求状态不正确"
return wrapper
def account_bind(self, account):
"""账户绑定"""
if isinstance(account, dict):
self.__token = account['account_id']
self.__capital = account['capital']
self.__cost = account['cost']
self.__tax = account['tax']
self.__slippoint = account['slippoint']
else:
raise ValueError(account)
@url_request
def login(self, token: str):
"""
登录账户
:return:(status, data) 正确时数据类型(bool, dict) 错误时数据类型(bool, str)
"""
url = self.get_url("login")
data = {'token': token}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def creat(self, info: dict):
"""
创建模拟交易账户
:param info:账户信息,可以在此定制账户参数,例如cost、sex等
:return:(status, data) 正确时数据类型(bool, dict) 错误时数据类型(bool, str)
"""
url = self.get_url("creat")
if not isinstance(info, dict):
raise ValueError("账户信息格式错误")
info = json.dumps(info)
info.encode("utf-8")
data = {'info': info}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def delete(self):
"""
删除模拟交易账户
:return:(status, data) 正确时数据类型(bool, str) 错误时数据类型(bool, str)
"""
url = self.get_url("delete")
data = {'token': self.__token}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def get_list(self):
"""
查询账户列表
:return:(status, data) 正确时数据类型(bool, list) 错误时数据类型(bool, str)
"""
url = self.get_url("list")
r = requests.get(url, timeout=MARKET_TIMEOUT)
return r
@url_request
def account(self):
"""
查询账户信息
:return:(status, data) 正确时数据类型(bool, dict) 错误时数据类型(bool, str)
"""
url = self.get_url("account")
data = {'token': self.__token}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def pos(self):
"""
查询持仓信息
:return:(status, data) 正确时数据类型(bool, list) 错误时数据类型(bool, str)
"""
url = self.get_url("pos")
data = {'token': self.__token}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def orders(self):
"""
查询交割单信息
:return:(status, data) 正确时数据类型(bool, list) 错误时数据类型(bool, str)
"""
url = self.get_url("orders")
data = {'token': self.__token}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def orders_today(self):
"""
查询交割单信息
:return:(status, data) 正确时数据类型(bool, list) 错误时数据类型(bool, str)
"""
url = self.get_url("orders_today")
data = {'token': self.__token}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def order_send(self, order):
"""
发单
:param order:dict格式订单数据
:return:(status, data) 正确时数据类型(bool, str) 错误时数据类型(bool, str)
"""
if isinstance(order, dict):
order = json.dumps(order)
order.encode("utf-8")
url = self.get_url("send")
data = {"order": order}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def order_cancel(self, order_id):
"""
撤单
:param order_id:订单ID
:return:(status, data) 正确时数据类型(bool, str) 错误时数据类型(bool, str)
"""
url = self.get_url("cancel")
data = {'token': self.__token, "order_id": order_id}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def order_status(self, order_id):
"""
查询订单状态
:param order_id:订单ID
:return:(status, data) 正确时数据类型(bool, str) 错误时数据类型(bool, str)
"""
url = self.get_url("status")
data = {'token': self.__token, "order_id": order_id}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def liquidation(self, check_date: str, price_dict: dict):
"""
清算
:param check_date:清算日期
:param price_dict:清算时持仓清算价格
:return:(status, data) 正确时数据类型(bool, str) 错误时数据类型(bool, str)
"""
price_dict_data = json.dumps(price_dict)
url = self.get_url("liquidation")
data = {'token': self.__token, 'check_date': check_date, "price_dict": price_dict_data.encode("utf-8")}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def data_persistance(self):
"""
数据持久化
:return:
"""
url = self.get_url("persistance")
data = {'token': self.__token}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def replenish_captial(self):
"""补充资本"""
pass
@url_request
def return_captial(self):
"""归还资本"""
pass
@url_request
def account_record(self, start: str, end: str):
"""
查询账户逐日记录数据
:param start:数据开始日期
:param end:数据结束日期
:return:(status, data) 正确时数据类型(bool, list) 错误时数据类型(bool, str)
"""
url = self.get_url("account_record")
data = {'token': self.__token, 'start': start, 'end': end}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
@url_request
def pos_record(self, start: str, end: str):
"""
查询持仓记录数据
:param start:数据开始日期
:param end:数据结束日期
:return:(status, data) 正确时数据类型(bool, list) 错误时数据类型(bool, str)
"""
url = self.get_url("pos_record")
data = {'token': self.__token, 'start': start, 'end': end}
r = requests.post(url, data, timeout=MARKET_TIMEOUT)
return r
def get_assets_record(self, start, end, save_data=False):
"""
获取逐日资产记录
:param start:
:param end:
:param save_data:
:return:dataframe数据
"""
status, assets_record = self.account_record(start, end)
if status:
if isinstance(assets_record, list):
assets_df = pd.DataFrame(assets_record)
# 计算net_pnl收益情况
assets_df['net_pnl'] = assets_df['assets'] - self.__capital
assets_df = assets_df[['check_date', 'assets', 'available', 'market_value', 'net_pnl', 'account_id']]
if save_data:
self.downloader(assets_df, start, end, "account.xls")
return assets_df
else:
raise ValueError(assets_record)
else:
raise ValueError(assets_record)
def get_pos_record(self, start, end, save_data=False):
"""
获取逐日持仓记录
:param start:
:param end:
:param save_data:
:return:
"""
status, pos_record = self.pos_record(start, end)
if status:
if isinstance(pos_record, list):
pos_df = pd.DataFrame(pos_record)
pos_df = pos_df[['pt_symbol', 'max_vol', 'first_buy_date','last_sell_date', 'buy_price_mean', 'sell_price_mean', 'profit', 'is_clear', 'account_id']]
if save_data:
self.downloader(pos_df, start, end, "pos.xls")
return pos_df
else:
raise ValueError(pos_record)
else:
raise ValueError(pos_record)
def get_trade_record(self, start, end, save_data=False):
"""
获取交易记录
:param start:
:param end:
:param save_data:
:return:
"""
status, trade_record = self.orders()
if status:
if isinstance(trade_record, list):
trade_df = pd.DataFrame(trade_record)
trade_df = trade_df[trade_df['status'] == "全部成交"]
trade_df['commission'] = 0.
# 计算commission
for i, row in trade_df.iterrows():
commission = 0.
if row['order_type'] == "buy":
commission = row['traded'] * row['trade_price'] * self.__cost
elif row['order_type'] == "sell":
commission = row['traded'] * row['trade_price'] * (self.__cost + self.__tax)
else:
pass
trade_df.loc[i, 'commission'] = commission
trade_df = trade_df[['order_date', 'order_time', 'pt_symbol', 'order_type', 'price_type', 'order_price', 'trade_price', 'volume', 'traded', 'status', 'commission', 'status', 'trade_type','account_id', 'error_msg']]
if save_data:
self.downloader(trade_df, start, end, "orders.xls")
return trade_df
else:
raise ValueError(trade_record)
else:
raise ValueError(trade_record)
def data_statistics(self, assets_df, pos_df, trade_df, save_data=False):
"""交易结果分析"""
# 初始资金
start_date = assets_df.iloc[0]['check_date']
end_date = assets_df.iloc[-1]['check_date']
total_days = len(assets_df)
profit_days = len(assets_df[assets_df["net_pnl"] > 0])
loss_days = len(assets_df[assets_df["net_pnl"] < 0])
end_balance = float(assets_df.iloc[-1].assets)
max_drawdown = self.max_drapdown_cal(assets_df)
max_ddpercent = round((max_drawdown / assets_df['assets'].max()) * 100, 2)
total_net_pnl = round((end_balance - self.__capital), 2)
total_commission = float(trade_df['commission'].sum())
total_slippage = 0
total_turnover = float(trade_df['volume'].sum())
total_trade_count = len(trade_df)
win_num = len(pos_df[pos_df.profit > 0])
loss_num = len(pos_df[pos_df.profit <= 0])
win_rate = round((win_num / (win_num + loss_num) * 100), 2)
total_return = round(((end_balance / self.__capital - 1) * 100), 2)
annual_return = round((total_return / total_days * 240), 2)
return_mean = pos_df['profit'].mean()
return_std = pos_df['profit'].std()
if return_std:
sharpe_ratio = float(return_mean / return_std * np.sqrt(240))
else:
sharpe_ratio = 0
statistics = {
"start_date": start_date,
"end_date": end_date,
"total_days": total_days,
"profit_days": profit_days,
"loss_days": loss_days,
"captial": self.__capital,
"end_balance": end_balance,
"max_drawdown": max_drawdown,
"max_ddpercent": max_ddpercent,
"total_net_pnl": total_net_pnl,
"total_commission": total_commission,
"total_slippage": total_slippage,
"total_turnover": total_turnover,
"total_trade_count": total_trade_count,
"win_num": win_num,
"loss_num": loss_num,
"win_rate": win_rate,
"total_return": total_return,
"annual_return": annual_return,
"daily_return": return_mean,
"return_std": return_std,
"sharpe_ratio": sharpe_ratio,
}
if save_data:
self.downloader(statistics, start_date, end_date, "report.xls")
return statistics
def get_report(self, start: str, end: str):
"""获取交易报告"""
trade_df = self.get_trade_record(start, end)
if not len(trade_df):
print("成交记录为空,无法计算")
return {}
# 展示账户曲线
assets_df = self.get_assets_record(start, end)
# 展示持仓记录
pos_df = self.get_pos_record(start, end)
# 计算分析结果
statistics_result = self.data_statistics(assets_df, pos_df, trade_df)
return statistics_result
def show_report(self, start: str, end: str, save_data=False):
"""显示分析报告"""
trade_df = self.get_trade_record(start, end, save_data)
if not len(trade_df):
return False, "成交记录为空,无法计算"
# 展示账户曲线
assets_df = self.get_assets_record(start, end, save_data)
self.show_account_line(assets_df)
# 展示持仓记录
pos_df = self.get_pos_record(start, end, save_data)
# 计算分析结果
statistics_result = self.data_statistics(assets_df, pos_df, trade_df)
# 展示分析结果
self.show_statistics(statistics_result)
def show_statistics(self, report_dict: dict):
"""显示报告"""
self.output("-" * 30)
self.output(f"首个交易日:\t{report_dict['start_date']}")
self.output(f"最后交易日:\t{report_dict['end_date']}")
self.output(f"总交易日:\t{report_dict['total_days']}")
self.output(f"盈利交易日:\t{report_dict['profit_days']}")
self.output(f"亏损交易日:\t{report_dict['loss_days']}")
self.output(f"起始资金:\t{report_dict['captial']:,.2f}")
self.output(f"结束资金:\t{report_dict['end_balance']:,.2f}")
self.output(f"总收益率:\t{report_dict['total_return']:,.2f}%")
self.output(f"年化收益:\t{report_dict['annual_return']:,.2f}%")
self.output(f"最大回撤:\t{report_dict['max_drawdown']:,.2f}")
self.output(f"百分比最大回撤:\t{report_dict['max_ddpercent']:,.2f}%")
self.output(f"总盈亏:\t{report_dict['total_net_pnl']:,.2f}")
self.output(f"总手续费:\t{report_dict['total_commission']:,.2f}")
self.output(f"总滑点:\t{report_dict['total_slippage']:,.2f}")
self.output(f"总成交金额:\t{report_dict['total_turnover']:,.2f}")
self.output(f"总成交笔数:\t{report_dict['total_trade_count']}")
self.output(f"盈利个股数量:\t{report_dict['win_num']:,.2f}")
self.output(f"亏损个股数量:\t{report_dict['loss_num']:,.2f}")
self.output(f"胜率:\t{report_dict['win_rate']:,.2f}%")
self.output(f"平均收益:\t{report_dict['daily_return']:,.2f}")
self.output(f"收益标准差:\t{report_dict['return_std']:,.2f}%")
self.output(f"Sharpe Ratio:\t{report_dict['sharpe_ratio']:,.2f}")
def show_account_line(self, assets_df):
"""显示资产曲线"""
assets_df.sort_values(by='check_date', ascending=True, inplace=True)
assets_df.index = assets_df['check_date']
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.figure(figsize=(16, 5))
plt.title("总资产曲线")
plt.xlabel("交易日期")
plt.ylabel("资产")
plt.plot(assets_df['assets'])
plt.show()
# 显示持仓曲线
def show_pos_record(self, pos_df):
"""显示持仓情况"""
pos_df.sort_values(by=['first_buy_date'], ascending=True, inplace=True)
pos_df = pos_df[['pt_symbol', 'first_buy_date', 'last_sell_date', 'max_vol', 'buy_price_mean', 'sell_price_mean', 'profit']]
pos_df.columns = ['代码', '首次买入', '最后卖出', '累计买入', '买均价', '卖均价', '盈亏']
print(pos_df)
def show_orders_record(self, order_df):
"""显示订单记录"""
order_df.sort_values(by=['order_id'], ascending=True, inplace=True)
order_df = order_df[
['order_date', 'order_time', 'order_type', 'order_price', 'trade_price', 'volume']]
order_df.columns = ['日期', '时间', '类型', '委托价格', '成交价格', '成交数量']
print(order_df)
def show_order_kline(self, kline_data, order_df):
"""显示K线并标记买入卖出点"""
# TODO 暂时不能通用只能识别pytdx 的get_k_data函数功能
# 设置mpl样式
mpl.style.use('ggplot')
#转换kline_data index类型
kline_data.date = pd.to_datetime(kline_data.date)
kline_data.index = kline_data.date
# 加载策略使用的指标,最多支持三条均线
kline_data['ma3'] = talib.SMA(kline_data.close, 3)
kline_data['ma5'] = talib.SMA(kline_data.close, 5)
kline_data['ma14'] = talib.SMA(kline_data.close, 14)
# 绘制第一个图
fig = plt.figure()
fig.set_size_inches((16, 16))
ax_canddle = fig.add_axes((0, 0.7, 1, 0.3))
ax_vol = fig.add_axes((0, 0.45, 1, 0.2))
data_list = list()
for date, row in kline_data[['open', 'high', 'low', 'close']].iterrows():
t = date2num(date)
open ,high, low, close = row[:]
d = (t, open, high, low, close)
data_list.append(d)
# 绘制蜡烛图
candlestick_ohlc(ax_canddle, data_list, colorup='r', colordown='green', alpha=0.7, width=0.8)
# 将x轴设置为时间类型
ax_canddle.xaxis_date()
ax_canddle.plot(kline_data.index, kline_data['ma3'], label="ma3")
ax_canddle.plot(kline_data.index, kline_data['ma5'], label="ma5")
ax_canddle.plot(kline_data.index, kline_data['ma14'], label="ma14")
ax_canddle.legend()
# 绘制VOL
ax_vol.bar(kline_data.index, kline_data.volume/1000000)
ax_vol.set_ylabel("millon")
ax_vol.set_xlabel("date")
# 标记订单点位
order_df.order_date = pd.to_datetime(order_df.order_date)
for i, row in order_df.iterrows():
if row['status'] == "全部成交":
order_date = row['order_date']
if row['order_type'] == "buy":
ax_canddle.annotate("B",
xy=(order_date, kline_data.loc[order_date].low),
xytext=(order_date, kline_data.loc[order_date].low - 1),
arrowprops=dict(facecolor="r",
alpha=0.3,
headlength=10,
width=10))
else:
ax_canddle.annotate("S",
xy=(order_date, kline_data.loc[order_date].high),
xytext=(order_date, kline_data.loc[order_date].high + 1),
arrowprops=dict(facecolor="g",
alpha=0.3,
headlength=10,
width=10))
def show_pos(self, pos_list: list):
"""显示持仓情况"""
if pos_list:
pos_df = pd.DataFrame(pos_list)
pos_df.sort_values(by=['profit'], ascending=False, inplace=True)
pos_df = pos_df[
['pt_symbol', 'buy_date', 'volume', 'available', 'buy_price', 'now_price',
'profit']]
pos_df.columns = ['证券代码', '买入日期', '总持仓', '可用持仓', '买入均价', '当前价格', '盈亏金额']
print(pos_df)
else:
print("无持仓")
def downloader(self, data, start_date, end_date, file_name):
"""测试结果下载"""
# 获取地址
file_name = "_".join([start_date, end_date, file_name])
file_path = self.get_folder_path(file_name)
if isinstance(data, dict):
data_list = list()
data_list.append(data)
df = pd.DataFrame(data_list)
else:
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# Natural Language Processing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset_train = pd.read_csv('train_data.txt', delimiter = '\t', quoting = 1)
X_test = pd.read_csv('test_data.txt', delimiter = '\t', quoting = 1).iloc[:, [0, 1, 2, 3, 4]]
X_train = dataset_train.iloc[:, [2,3,4,5]]
y_train = dataset_train.iloc[:, [1]].values
df_train = pd.DataFrame(X_train)
df_test = pd.DataFrame(X_test)
df_y =
|
pd.DataFrame(y_train)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from sklearn.externals import joblib
class predictor():
def __init__(self, model_file = 'LogModel.pkl'):
self.model = joblib.load(model_file)
self.graph = None
self.X = None
def prob(self, X):
"""
Predicts the probability of feature vectors being of each class
:param X: a 2-D numpy array containing the feature vectors
"""
return self.model.predict_proba(X)
def predict(self, X):
"""
Predicts the classes for a numpy array of feature vectors
:param X: a 2-D numpy array containing the feature vectors
"""
return self.model.predict(X)
def import_file(self, file, graph_file = 'rel_max.emb.gz', map_file = 'map.csv'):
"""
Imports all necisary files to take curie ids and extract their feature vectors.
:param file: A string containing the filename or path of a csv containing the source and target curie ids to make predictions on (If set to None will just import the graph and map files)
:param graph_file: A string containing the filename or path of the emb file containing the feature vectors for each node
:param map_file: A string containing the filename or path of the csv mapping the curie ids to the integer ids used in emb generation
"""
graph = pd.read_csv(graph_file, sep = ' ', skiprows=1, header = None, index_col=None)
self.graph = graph.sort_values(0).reset_index(drop=True)
self.map_df = pd.read_csv(map_file, index_col=None)
if file is not None:
data =
|
pd.read_csv(file, index_col=None)
|
pandas.read_csv
|
"""Construct the clean data set"""
import pandas as pd
from pathlib import PurePath
import numpy as np
import datetime as dt
from pandas.tseries.holiday import USFederalHolidayCalendar
from scipy.interpolate import interp1d
from sklearn.svm import SVR
#========================================================================#
# interpolation functions #
#========================================================================#
def det_interp(x, kind='linear'):
""" A helper function for deterministic time seres interpolation
Args
----
x -- a dummy variable for the dataframe's columns
kwargs
------
kind -- one of scipy.interpolate.inter1d kwargs
return
------
interpolated values of the whole time series
"""
index = pd.Series(np.arange(x.shape[0]), index=x.index)
notnull = pd.notnull(x)
t = index[notnull]
y = x[notnull]
f = interp1d(t.values, y.values, kind=kind)
return pd.Series(f(index), index=x.index, name=x.name)
def ml_interp(x, model, **model_kwargs):
""" A helper function for ML time seres interpolation
Args
----
x -- a dummy variable for the dataframe's columns
model -- a scikit learn model class
model_kwargs -- tuple of kwargs to pass to the model constructor
return
------
interpolated values of the whole time series
"""
index = pd.Series(np.arange(x.shape[0]), index=x.index)
notnull = pd.notnull(x)
t = index[notnull].values.reshape(-1, 1)
y = x[notnull]
regr = model(**model_kwargs)
regr.fit(t, y)
yhat = regr.predict(index.values.reshape(-1, 1))
return pd.Series(yhat, index=x.index, name=x.name)
def main():
#=====================================================================#
# Data import #
#=====================================================================#
root = PurePath()
raw_data = root / 'raw_data'
sentiment = root / 'sentiment_analysis'
# files
economics_file = 'economics.csv'
yields_file = 'FED-SVENY.csv'
sentiment_file = root / 'daily_sentiment_score.csv'
# import data
economics = pd.read_csv(raw_data / economics_file)
yields = pd.read_csv(raw_data / yields_file)
sent = pd.read_csv(sentiment / sentiment_file)
#=====================================================================#
# clean data #
#=====================================================================#
economics.index = pd.to_datetime(economics['sasdate'],
format="%m/%d/%Y")
economics = economics.iloc[:,1:] # drop date column
# nan strategy is to drop as of now
economics = economics[~(economics.apply(np.isnan)).apply(any, axis=1)]
economics = economics.iloc[:-9,:] # done by inspection
yields.index = pd.to_datetime(yields['Date'], format="%Y-%m-%d")
yield_col = ['SVENY10']
yields = yields[yield_col]
sent.index = pd.to_datetime(sent['date'], format="%Y-%m-%d")
sent = sent.drop('date', axis=1)
#=====================================================================#
# Join data by date #
#=====================================================================#
# Right now we will move econ dates forward to next trading date
bday_us = pd.offsets.CustomBusinessDay(
calendar=USFederalHolidayCalendar())
economics.index = economics.index + bday_us
# 04/01/1999 gets moved when to 04/02/1999 it shouldn't
as_list = economics.index.tolist()
idx = as_list.index(dt.datetime(1999, 4, 2))
as_list[idx] = dt.datetime(1999, 4, 1)
economics.index = as_list
full =
|
pd.concat([economics, yields], axis=1, join="outer")
|
pandas.concat
|
import ifcopenshell as ifc
import plotly.express as px
import pandas as pd
def get_attr_of_pset(_id, ifc_file):
""" Get all attributes of an instance by given Id
param _id: id of instance
return: dict of dicts of attributes
"""
dict_psets = {}
try:
defined_by_type = [x.RelatingType for x in ifc_file[_id].IsDefinedBy if x.is_a("IfcRelDefinesByType")]
defined_by_properties = [x.RelatingPropertyDefinition for x in ifc_file[_id].IsDefinedBy if
x.is_a("IfcRelDefinesByProperties")]
except:
dict_psets.update({ifc_file[_id].GlobalId: "No Attributes found"})
else:
for x in defined_by_type:
if x.HasPropertySets:
for y in x.HasPropertySets:
for z in y.HasProperties:
dict_psets.update({z.Name: z.NominalValue.wrappedValue})
for x in defined_by_properties:
if x.is_a("IfcPropertySet"):
for y in x.HasProperties:
if y.is_a("IfcPropertySingleValue"):
dict_psets.update({y.Name: y.NominalValue.wrappedValue})
# this could be usefull for multilayered walls in Allplan
if y.is_a("IfcComplexProperty"):
for z in y.HasProperties:
dict_psets.update({z.Name: z.NominalValue.wrappedValue})
if x.is_a("IfcElementQuantity"):
for y in x.Quantities:
dict_psets.update({y[0]: y[3]})
finally:
dict_psets.update({"IfcGlobalId": ifc_file[_id].GlobalId})
return dict_psets
def get_structural_storey(_id, ifc_file):
""" Get structural (IfcBuilgingStorey) information of an instance by given Id
param _id: id of instance
return: dict of attributes
"""
dict_structural = {}
instance = ifc_file[_id]
try:
structure = instance.ContainedInStructure
storey = structure[0].RelatingStructure.Name
except:
dict_structural.update({"Storey": "No Information found"})
else:
dict_structural.update({"Storey": storey})
finally:
return dict_structural
def movecol(df, cols_to_move=[], ref_col='', place='After'):
cols = df.columns.tolist()
if place == 'After':
seg1 = cols[:list(cols).index(ref_col) + 1]
seg2 = cols_to_move
if place == 'Before':
seg1 = cols[:list(cols).index(ref_col)]
seg2 = cols_to_move + [ref_col]
seg1 = [i for i in seg1 if i not in seg2]
seg3 = [i for i in cols if i not in seg1 + seg2]
return (df[seg1 + seg2 + seg3])
def parser(contents):
ifc_file = ifc.open(contents)
rooms = ifc_file.by_type("IfcSpace")
instances = ifc_file.by_type("IfcBuildingElement")
project = ifc_file.by_type("IfcProject")[0].Name
for room in rooms:
instances.append(room)
excel_list = []
for inst in instances:
info_pset = get_attr_of_pset(inst.id(), ifc_file=ifc_file)
info = inst.get_info()
for x in inst.IsDefinedBy:
if x.is_a("IfcRelDefinesByType") == True:
info_pset.update({"Type_Name": x.get_info()["RelatingType"].Name})
else:
pass
info_pset.update({"Name": inst.Name})
info_pset.update({"IfcType": info["type"]})
info_pset.update({"Project": project})
if inst.is_a("IfcSpace") == True:
info_structural = inst.Decomposes[0].RelatingObject.Name
info_pset.update({"Storey": info_structural})
else:
info_structural = get_structural_storey(inst.id(), ifc_file=ifc_file)
info_pset.update(info_structural)
excel_list.append(info_pset)
df1 = pd.DataFrame(excel_list)
df2 = movecol(df1,
cols_to_move=['IfcType', 'Storey'],
ref_col=df1.columns[0],
place='Before')
return df2
def all_divide(df2, path):
worterbuch = {}
for item in df2.IfcType.unique():
DF = df2[df2['IfcType'].str.contains(item, na=False)]
DF = DF.dropna(axis='columns', how='all')
worterbuch[item] = DF
with pd.ExcelWriter(path) as writer:
for i in worterbuch.keys():
worterbuch[i].to_excel(writer, sheet_name=i)
def unique(df, path):
names = []
data = []
for column in df.columns:
name = column
value = list(df[name].unique())
names.append(name)
data.append(value)
df2 = pd.DataFrame(data, names)
df2 = df2.transpose()
df2.to_excel(path)
def unique_csv(df, path):
names = []
data = []
for column in df.columns:
name = column
value = list(df[name].unique())
names.append(name)
data.append(value)
df2 = pd.DataFrame(data, names)
df2 = df2.transpose()
df2.to_csv(path, encoding='utf-8')
def unique_divide(df, path):
worterbuch = {}
dfs = dict(tuple(df.groupby('IfcType')))
for key in dfs.keys():
df = dfs[key]
names = []
data = []
for column in df.columns:
name = column
value = list(df[name].unique())
names.append(name)
data.append(value)
df2 =
|
pd.DataFrame(data, names)
|
pandas.DataFrame
|
import copy
import numpy as np
import pandas as pd
import time
import datetime
from itertools import product
from copy import deepcopy
import os
import sys
import inspect
from collections import namedtuple, defaultdict
from tabulate import tabulate
from pprint import pprint, pformat
import traceback
import argparse
import clify
import dps
from dps import cfg
from dps.config import DEFAULT_CONFIG
from dps.utils import gen_seed, Config, ExperimentStore, edit_text, NumpySeed
from dps.train import training_loop
from dps.parallel import Job, ReadOnlyJob
from dps.train import FrozenTrainingLoopData
from dps.hyper.parallel_session import submit_job, ParallelSession
class HyperSearch(object):
""" Interface to a directory storing a hyper-parameter search.
Approximately a `frozen`, read-only handle for a directoy created by ParallelSession.
"""
def __init__(self, path):
self.path = path
job_path = os.path.join(path, 'results.zip')
if not os.path.exists(job_path):
job_path = os.path.join(path, 'orig.zip')
assert os.path.exists(job_path)
self.job = ReadOnlyJob(job_path)
@property
def objects(self):
return self.job.objects
def dist_keys(self):
""" The keys that were searched over. """
distributions = self.objects.load_object('metadata', 'distributions')
if isinstance(distributions, list):
keys = set()
for d in distributions:
keys |= set(d.keys())
keys = list(keys)
else:
distributions = Config(distributions)
keys = list(distributions.keys())
keys.append('idx')
return sorted(set(keys))
def dist(self):
return self.objects.load_object('metadata', 'distributions')
def sampled_configs(self):
pass
@property
def experiment_paths(self):
experiments_dir = os.path.join(self.path, 'experiments')
exp_dirs = os.listdir(experiments_dir)
return [os.path.join(experiments_dir, ed) for ed in exp_dirs]
def extract_stage_data(self, fields=None, bare=False):
""" Extract stage-by-stage data about the training runs.
Parameters
----------
bare: boolean
If True, only returns the data. Otherwise, additionally returns the stage-by-stage config and meta-data.
Returns
-------
A nested data structure containing the requested data.
{param-setting-key: {(repeat, seed): (pd.DataFrame(), [dict()], dict())}}
"""
stage_data = defaultdict(dict)
if isinstance(fields, str):
fields = fields.split()
config_keys = self.dist_keys()
KeyTuple = namedtuple(self.__class__.__name__ + "Key", config_keys)
for exp_path in self.experiment_paths:
try:
exp_data = FrozenTrainingLoopData(exp_path)
md = {}
md['host'] = exp_data.host
for k in config_keys:
md[k] = exp_data.get_config_value(k)
sc = []
records = []
for stage in exp_data.history:
record = stage.copy()
if 'best_path' in record:
del record['best_path']
if 'final_path' in record:
del record['final_path']
sc.append(record['stage_config'])
del record['stage_config']
# Fix and filter keys
_record = {}
for k, v in record.items():
if k.startswith("best_"):
k = k[5:]
if (fields and k in fields) or not fields:
_record[k] = v
records.append(_record)
key = KeyTuple(*(exp_data.get_config_value(k) for k in config_keys))
repeat = exp_data.get_config_value("repeat")
seed = exp_data.get_config_value("seed")
if bare:
stage_data[key][(repeat, seed)] = pd.DataFrame.from_records(records)
else:
stage_data[key][(repeat, seed)] = (pd.DataFrame.from_records(records), sc, md)
except Exception:
print("Exception raised while extracting stage data for path: {}".format(exp_path))
traceback.print_exc()
return stage_data
def extract_step_data(self, mode, fields=None, stage=None):
""" Extract per-step data across all experiments.
Parameters
----------
mode: str
Data-collection mode to extract data from.
fields: str
Names of fields to extract data for. If not supplied, data for all
fields is returned.
stage: int or slice or tuple
Specification of the stages to collect data for. If not supplied, data
from all stages is returned.
Returns
-------
A nested data structure containing the requested data.
{param-setting-key: {(repeat, seed): pd.DataFrame()}}
"""
step_data = defaultdict(dict)
if isinstance(fields, str):
fields = fields.split()
config_keys = self.dist_keys()
KeyTuple = namedtuple(self.__class__.__name__ + "Key", config_keys)
for exp_path in self.experiment_paths:
exp_data = FrozenTrainingLoopData(exp_path)
_step_data = exp_data.step_data(mode, stage)
if fields:
try:
_step_data = _step_data[fields]
except KeyError:
print("Valid keys are: {}".format(_step_data.keys()))
raise
key = KeyTuple(*(exp_data.get_config_value(k) for k in config_keys))
repeat = exp_data.get_config_value("repeat")
seed = exp_data.get_config_value("seed")
step_data[key][(repeat, seed)] = _step_data
return step_data
def print_summary(self, print_config=True, verbose=False, criteria=None, maximize=False):
""" Get all completed ops, get their outputs. Summarize em. """
print("Summarizing search stored at {}.".format(os.path.realpath(self.path)))
criteria_key = criteria if criteria else "stopping_criteria"
if not criteria:
config = self.objects.load_object('metadata', 'config')
criteria_key, max_str = config['stopping_criteria'].split(',')
maximize = max_str == "max"
keys = self.dist_keys()
stage_data = self.extract_stage_data()
best = []
all_keys = set()
# For each parameter setting, identify the stage where it got the lowest/highest value for `criteria_key`.
for i, (key, value) in enumerate(sorted(stage_data.items())):
_best = []
for (repeat, seed), (df, sc, md) in value.items():
try:
idx = df[criteria_key].idxmax() if maximize else df[criteria_key].idxmin()
except KeyError:
idx = -1
record = dict(df.iloc[idx])
if criteria_key not in record:
record[criteria_key] = -np.inf if maximize else np.inf
for k in keys:
record[k] = md[k]
all_keys |= record.keys()
_best.append(record)
_best = pd.DataFrame.from_records(_best)
_best = _best.sort_values(criteria_key)
sc = _best[criteria_key].mean()
best.append((sc, _best))
best = sorted(best, reverse=not maximize, key=lambda x: x[0])
best = [df for _, df in best]
_column_order = [criteria_key, 'seed', 'reason', 'n_steps', 'host']
column_order = [c for c in _column_order if c in all_keys]
remaining = [k for k in all_keys if k not in column_order and k not in keys]
column_order = column_order + sorted(remaining)
with
|
pd.option_context('display.max_rows', None, 'display.max_columns', None)
|
pandas.option_context
|
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform
from hydroDL import kPath, utils
import os
import time
import pandas as pd
import numpy as np
import json
"""
functions for read rawdata and write in caseFolder
"""
__all__ = ['wrapData', 'readSiteTS', 'extractVarMtd', 'label2var']
varTLst = ['datenum', 'sinT', 'cosT']
def caseFolder(caseName):
saveFolder = os.path.join(kPath.dirWQ, 'trainDataFull', caseName)
return saveFolder
def wrapData(caseName, siteNoLst, nFill=5, freq='D',
sdStr='1979-01-01', edStr='2019-12-31',
varF=gridMET.varLst+ntn.varLst+GLASS.varLst,
varQ=usgs.varQ, varG=gageII.varLst, varC=usgs.newC):
# gageII
tabG = gageII.readData(varLst=varG, siteNoLst=siteNoLst)
tabG = gageII.updateCode(tabG)
tR = pd.date_range(np.datetime64(sdStr), np.datetime64(edStr))
fLst, qLst, gLst, cLst = [list() for x in range(4)]
t0 = time.time()
for i, siteNo in enumerate(siteNoLst):
t1 = time.time()
varLst = varQ+varF+varC
df = readSiteTS(siteNo, varLst=varLst, freq=freq)
# streamflow
tempQ = pd.DataFrame({'date': tR}).set_index('date').join(df[varQ])
qLst.append(tempQ.values)
# forcings
tempF = pd.DataFrame({'date': tR}).set_index('date').join(df[varF])
tempF = tempF.interpolate(
limit=nFill, limit_direction='both', limit_area='inside')
fLst.append(tempF.values)
# # water quality
tempC = pd.DataFrame({'date': tR}).set_index('date').join(df[varC])
cLst.append(tempC.values)
# geog
gLst.append(tabG.loc[siteNo].values)
t2 = time.time()
print('{} on site {} reading {:.3f} total {:.3f}'.format(
i, siteNo, t2-t1, t2-t0))
f = np.stack(fLst, axis=-1).swapaxes(1, 2).astype(np.float32)
q = np.stack(qLst, axis=-1).swapaxes(1, 2).astype(np.float32)
c = np.stack(cLst, axis=-1).swapaxes(1, 2).astype(np.float32)
g = np.stack(gLst, axis=-1).swapaxes(0, 1).astype(np.float32)
# save
saveDataFrame(caseName, c=c, q=q, f=f, g=g, varC=varC, varQ=varQ,
varF=varF, varG=varG, sdStr=sdStr, edStr=edStr,
freq=freq, siteNoLst=siteNoLst)
def saveDataFrame(caseName, *, c, q, f, g, varC, varQ, varF, varG,
sdStr, edStr, freq, siteNoLst):
# save
saveFolder = caseFolder(caseName)
if not os.path.exists(saveFolder):
os.mkdir(saveFolder)
np.savez_compressed(os.path.join(saveFolder, 'data'), c=c, q=q, f=f, g=g)
dictData = dict(name=caseName, varC=varC, varQ=varQ, varF=varF, varG=varG,
sd=sdStr, ed=edStr, freq=freq, siteNoLst=siteNoLst)
with open(os.path.join(saveFolder, 'info')+'.json', 'w') as fp:
json.dump(dictData, fp, indent=4)
def initSubset(caseName):
saveFolder = caseFolder(caseName)
subsetFile = os.path.join(saveFolder, 'subset.json')
dictSubset = dict(
all=dict(sd=None, ed=None, siteNoLst=None, mask=False))
with open(subsetFile, 'w') as fp:
json.dump(dictSubset, fp, indent=4)
maskFolder = os.path.join(saveFolder, 'mask')
if not os.path.exists(maskFolder):
os.mkdir(maskFolder)
def readSiteTS(siteNo, varLst, freq='D', area=None,
sd=np.datetime64('1979-01-01'),
ed=np.datetime64('2019-12-31'),
rmFlag=True):
# read data
td =
|
pd.date_range(sd, ed)
|
pandas.date_range
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import sys, getopt
import pandas
import csv
#import statsmodels.formula.api as smf
from sklearn import preprocessing
import math
import time
from heapq import *
import operator
sys.path.append('./')
sys.path.append('../')
from similarity_calculation.category_similarity_matrix import *
from similarity_calculation.category_network_embedding import *
from utils import *
from constraint_definition.LocalRegressionConstraint import *
DEFAULT_RESULT_PATH = './input/query_res.csv'
DEFAULT_QUESTION_PATH = './input/user_question.csv'
DEFAULT_CONSTRAINT_PATH = './input/CONSTRAINTS'
EXAMPLE_NETWORK_EMBEDDING_PATH = './input/NETWORK_EMBEDDING'
EXAMPLE_SIMILARITY_MATRIX_PATH = './input/SIMILARITY_DEFINITION'
DEFAULT_AGGREGATE_COLUMN = 'count'
DEFAULT_CONSTRAINT_EPSILON = 0.05
TOP_K = 5
def build_local_regression_constraint(data, column_index, t, con, epsilon, agg_col, regression_package):
"""Build local regression constraint from Q(R), t, and global regression constraint
Args:
data: result of Q(R)
column_index: index for values in each column
t: target tuple in Q(R)
con: con[0] is the list of fixed attributes in Q(R), con[1] is the list of variable attributes in Q(R)
epsilon: threshold for local regression constraint
regression_package: which package is used to compute regression
Returns:
A LocalRegressionConstraint object whose model is trained on \pi_{con[1]}(Q_{t[con[0]]}(R))
"""
tF = get_F_value(con[0], t)
local_con = LocalRegressionConstraint(con[0], tF, con[1], agg_col, epsilon)
train_data = {agg_col: []}
for v in con[1]:
train_data[v] = []
# for index, row in data['df'].iterrows():
# if get_F_value(con[0], row) == tF:
# for v in con[1]:
# train_data[v].append(row[v])
# train_data[agg_col].append(row[agg_col])
for idx in column_index[con[0][0]][tF[0]]:
row = data['df'].loc[data['df']['index'] == idx]
row = row.to_dict('records')[0]
#print row
if get_F_value(con[0], row) == tF:
for v in con[1]:
train_data[v].append(row[v])
train_data[agg_col].append(row[agg_col])
if regression_package == 'scikit-learn':
train_x = {}
for v in con[1]:
if v in data['le']:
train_data[v] = data['le'][v].transform(train_data[v])
train_data[v] = data['ohe'][v].transform(train_data[v].reshape(-1, 1))
#print data['ohe'][v].transform(train_data[v].reshape(-1, 1))
train_x[v] = train_data[v]
else:
if v != agg_col:
train_x[v] = np.array(train_data[v]).reshape(-1, 1)
train_y = np.array(train_data[agg_col]).reshape(-1, 1)
train_x = np.concatenate(list(train_x.values()), axis=-1)
local_con.train_sklearn(train_x, train_y)
else:
#train_data = pandas.DataFrame(train_data)
formula = agg_col + ' ~ ' + ' + '.join(con[1])
print
local_con.train(train_data, formula)
return local_con
def validate_local_regression_constraint(data, local_con, t, dir, agg_col, regression_package):
"""Check the validicity of the user question under a local regression constraint
Args:
data: data['df'] is the data frame storing Q(R)
data['le'] is the label encoder, data['ohe'] is the one-hot encoder
local_con: a LocalRegressionConstraint object
t: target tuple in Q(R)
dir: whether user thinks t[agg(B)] is high or low
agg_col: the column of aggregated value
regression_package: which package is used to compute regression
Returns:
the actual direction that t[agg(B)] compares to its expected value, and the expected value from local_con
"""
test_tuple = {}
for v in local_con.var_attr:
test_tuple[v] = [t[v]]
if regression_package == 'scikit-learn':
for v in local_con.var_attr:
if v in data['le']:
test_tuple[v] = data['le'][v].transform(test_tuple[v])
test_tuple[v] = data['ohe'][v].transform(test_tuple[v].reshape(-1, 1))
else:
test_tuple[v] = np.array(test_tuple[v]).reshape(-1, 1)
test_tuple = np.concatenate(list(test_tuple.values()), axis=-1)
predictY = local_con.predict_sklearn(test_tuple)
else:
predictY = local_con.predict(pandas.DataFrame(test_tuple))
if t[agg_col] < (1-local_con.epsilon) * predictY[0]:
return -dir, predictY[0]
elif t[agg_col] > (1+local_con.epsilon) * predictY[0]:
return dir, predictY[0]
else:
return 0, predictY[0]
def tuple_similarity(t1, t2, var_attr, cat_sim, num_dis_norm, agg_col):
"""Compute the similarity between two tuples t1 and t2 on their attributes var_attr
Args:
t1, t2: two tuples
var_attr: variable attributes
cat_sim: the similarity measure for categorical attributes
num_dis_norm: normalization terms for numerical attributes
agg_col: the column of aggregated value
Returns:
the Gower similarity between t1 and t2
"""
sim = 0.0
cnt = 0
for col in var_attr:
if t1[col] is None or t2[col] is None:
continue
if cat_sim.is_categorical(col):
s = cat_sim.compute_similarity(col, t1[col], t2[col], agg_col)
sim += s
else:
if col != agg_col and col != 'index':
temp = abs(t1[col] - t2[col]) / num_dis_norm[col]['range']
sim += 1-temp
cnt += 1
return sim / cnt
def find_best_user_questions(data, cons, cat_sim, num_dis_norm, cons_epsilon, agg_col, regression_package):
"""Find explanations for user questions
Args:
data: data['df'] is the data frame storing Q(R)
data['le'] is the label encoder, data['ohe'] is the one-hot encoder
cons: list of fixed attributes and variable attributes of global constraints
cat_sim: the similarity measure for categorical attributes
num_dis_norm: normalization terms for numerical attributes
cons_epsilon: threshold for local regression constraints
agg_col: the column of aggregated value
regression_package: which package is used to compute regression
Returns:
the top-k list of explanations for each user question
"""
index_building_time = 0
constraint_building_time = 0
question_validating_time = 0
score_computing_time = 0
result_merging_time = 0
start = time.clock()
column_index = dict()
for column in data['df']:
column_index[column] = dict()
for index, row in data['df'].iterrows():
for column in data['df']:
val = row[column]
if not val in column_index[column]:
column_index[column][val] = []
column_index[column][val].append(index)
end = time.clock()
index_building_time += end - start
psi = []
# local_cons = []
# start = time.clock()
# for i in range(len(cons)):
# local_cons.append(build_local_regression_constraint(data, column_index, t, cons[i], cons_epsilon, agg_col, regression_package))
# local_cons[i].print_fit_summary()
# end = time.clock()
# constraint_building_time += end - start
explanation_type = 0
max_support = []
candidates = []
for i in range(0, len(cons)):
psi.append(0)
max_support.append([])
f_indexes = dict()
print(cons[i])
for index, row in data['df'].iterrows():
t = get_F_value(cons[i][0], row)
if ','.join(t) in f_indexes:
continue
con_index = None
for j in range(len(cons[i][0])):
idx_j = column_index[cons[i][0][j]][t[j]]
# print(idx_j)
# print(data['df']['index'].isin(idx_j))
if con_index is None:
con_index =
|
pandas.Index(idx_j)
|
pandas.Index
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 21:05:00 2020
Revised on Thur Mar 18 16:04:00 2021
@author: Starlitnightly
New Version 1.2.3
"""
import itertools
import numpy as np
import pandas as pd
from upsetplot import from_memberships
from upsetplot import plot
def FindERG(data, depth=2, sort_num=20, verbose=False, figure=False):
'''
Find out endogenous reference gene
Parameters
----------
data:pandas.DataFrmae
DataFrame of data points with each entry in the form:['gene_id','sample1',...]
depth:int
Accuracy of endogenous reference gene,must be larger that 2
The larger the number, the fewer genes are screened out,Accuracy improvement
sort_num:int
The size of the peendogenous reference gener filter
When the sample is large, it is recommended to increase the value
verbose: bool
Make the function noisy, writing times and results.
Returns
-------
result:list
a list of endogenous reference gene
'''
lp=[]
if verbose:
import time,datetime
start = time.time()
if depth==1:
print('the depth must larger than 2')
return
if len(data.columns)<=2:
print('the number of samples must larger than 2')
return
if depth>(len(data.columns)):
print('depth larger than samples')
return
count=0
result=[]#result
bucket_size = 1000
for i in itertools.combinations(data.columns[0:depth], 2):
if verbose:
start = time.time()
count=count+1
test=data.replace(0,np.nan).dropna()
last_std=pd.DataFrame()
for k in range(0 ,len(data), bucket_size):
test1=test[i[0]].iloc[k:k + bucket_size]
test2=test[i[1]].iloc[k:k + bucket_size]
data_len=len(test1.values)
table1=np.array(test1.values.tolist()*data_len).reshape(data_len,data_len)
table2=
|
pd.DataFrame(table1.T/table1)
|
pandas.DataFrame
|
import sys
sys.path.insert(1, './tad_detection/')
sys.path.insert(1, './tad_detection/preprocessing/')
sys.path.insert(1, './tad_detection/model/')
sys.path.insert(1, './tad_detection/evaluation/')
import seaborn as sns
import numpy as np
import pandas as pd
import os
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score, homogeneity_score, completeness_score, v_measure_score, calinski_harabasz_score, davies_bouldin_score, accuracy_score, precision_score, roc_auc_score, f1_score
from torch_geometric.utils import to_dense_adj
from model.gnn_explainer import GNNExplainer
from utils_general import load_parameters, dump_parameters, set_up_logger
import argparse
from model.utils_model import calculation_graph_matrix_representation
def update_metrics_unsupervised(metrics_n_clust, n_clust, metrics_all_chromosomes):
'''
Function updates the dataframe metrics_all_chromosomes with all metrics from one training of a MinCutTAD model for one n_clust.
:param metrics_n_clust: metrics for unsupervised training for a specific n_clust.
:param n_clust: number of clusters the TAD regions are supposed to be clustered by the model.
:param metrics_all_chromosomes: data frame with the metrics for unsupervised training (n_clust, silhouette_score, calinski_harabasz_score, davies_bouldin_score, epocH)
:return metrics_all_chromosomes:
'''
metrics_all_chromosomes = metrics_all_chromosomes.append({"n_clust": n_clust,
"silhouette_score": max(metrics_n_clust["silhouette_score"]),
"calinski_harabasz_score": max(metrics_n_clust["calinski_harabasz_score"]),
"davies_bouldin_score": max(metrics_n_clust["davies_bouldin_score"]),
"epoch": np.where(metrics_n_clust["silhouette_score"] == max(metrics_n_clust["silhouette_score"]))[0][0]
},ignore_index=True)
return metrics_all_chromosomes
def apply_gnnexplainer(parameters, model, device, dataloader):
'''
Function is a wrapper for the functionality of the GNNExplainer (https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.GNNExplainer). It applies to GNNExplainer to all nodes for one specific chromosome, saves the results and creates visualizations for the importance of the node annotations.
:param parameters: dictionary with parameters set in parameters.json file
:param model: PyTorch model with trained weights
:param device: device (cuda/ cpu)
:param dataloader: dataloader testing
'''
explainer = GNNExplainer(model, epochs=parameters["num_epochs_gnn_explanations"], return_type='log_prob')
node_feat_mask_all = []
for graph_test_batch in dataloader:
if graph_test_batch.source_information[0].split("-")[1] != "X":
X, edge_index, edge_attr, y, source_information = graph_test_batch.x, graph_test_batch.edge_index, graph_test_batch.edge_attr, graph_test_batch.y, graph_test_batch.source_information
break
nodes = list(range(X.shape[0]))
y_nodes = y[nodes].cpu().detach().numpy()
if parameters["generate_graph_matrix_representation"] == True:
edge_index = calculation_graph_matrix_representation(parameters, edge_index)
X, edge_index, edge_attr, y = X.to(device), edge_index.to(device), edge_attr.to(device), y.to(device)
for node_idx in nodes:
node_feat_mask, edge_mask = explainer.explain_node(node_idx, x=X, edge_index=edge_index, edge_attr=edge_attr)
node_feat_mask_all.append(node_feat_mask.cpu().detach().numpy())
#ax, G = explainer.visualize_subgraph(node_idx, edge_index, edge_mask, y=y)
#plt.show()
node_feat_mask_all = np.array(node_feat_mask_all).T
dict_node_feat_mask_all = {}
for index, genomic_annotation in enumerate(parameters["genomic_annotations"]):
dict_node_feat_mask_all[genomic_annotation] = node_feat_mask_all[index]
dict_node_feat_mask_all['label'] = y_nodes
df_node_feat_mask_all = pd.DataFrame(data=dict_node_feat_mask_all, index=nodes)
save_gnn_explanations(parameters, df_node_feat_mask_all)
visualize_gnnexplanations(parameters, df_node_feat_mask_all)
def visualize_gnnexplanations(parameters, df_node_feat_mask_all):
'''
Function generates visualizations of explanations for each node annotation (CTCF, RAD21, SMC3, Housekeeping genes).
:param parameters: dictionary with parameters set in parameters.json file
:param df_node_feat_mask_all: dataframe with importance scores for each node annotation (CTCF, RAD21, SMC3, Housekeeping genes) for each node
'''
for genomic_annotation in parameters["genomic_annotations"]:
plt.hist(list(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 0][genomic_annotation]), 50, alpha=0.5, label='No-TAD')
plt.hist(list(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 1][genomic_annotation]), 50, alpha=0.5, label='TAD')
min_ylim, max_ylim = plt.ylim()
plt.axvline(np.mean(list(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 0][genomic_annotation])), color='k', linestyle='dashed', linewidth=1)
plt.text(np.mean(list(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 0][genomic_annotation])) * 1.01, max_ylim * 0.9, 'Mean: {:.2f}'.format(np.mean(list(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 0][genomic_annotation]))))
plt.axvline(np.mean(list(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 1][genomic_annotation])), color='k', linestyle='dashed', linewidth=1)
plt.text(np.mean(list(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 1][genomic_annotation])) * 1.01, max_ylim * 0.8, 'Mean: {:.2f}'.format( np.mean(list(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 1][genomic_annotation]))))
plt.legend(loc='upper right')
plt.ylabel("Prevalence")
plt.xlabel(f"Importance scores for {genomic_annotation}")
plt.title(f"Explanations for {genomic_annotation}")
#plt.show()
plt.savefig(os.path.join(parameters["output_directory"], parameters["dataset_name"], "node_explanations", "node_explanations_distribution_" + genomic_annotation + ".png"))
plt.close()
df_node_feat_data = {}
for genomic_annotation in parameters["genomic_annotations"]:
df_node_feat_data[genomic_annotation] = [np.mean(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 0][genomic_annotation]), np.mean(df_node_feat_mask_all[df_node_feat_mask_all['label'] == 1][genomic_annotation])]
df_node_feat = pd.DataFrame(index=["No-TAD", "TAD"], data=df_node_feat_data)
df_node_feat.plot(kind='bar', title="Mean importance scores of genomic annotations TAD/ No-TAD", ylabel="Mean importance score", rot=0)
plt.legend(loc='lower right', fontsize="small")
# plt.show()
plt.savefig(os.path.join(parameters["output_directory"], parameters["dataset_name"], "node_explanations", "node_explanations_tad_vs_no_tad.png"))
plt.close()
def save_gnn_explanations(parameters, df_node_feat_mask_all):
'''
Function saves explanations by GNNexplainer.
:param parameters: dictionary with parameters set in parameters.json file
:param df_node_feat_mask_all: dataframe with importance scores for each node annotation (CTCF, RAD21, SMC3, Housekeeping genes) for each node
'''
pd.to_pickle(df_node_feat_mask_all, os.path.join(parameters["output_directory"], parameters["dataset_name"], "node_explanations", "node_explanations.pickle"))
def calculate_classification_metrics(labels_all_chromosomes, scores_all_chromosomes, y_all_chromosomes):
'''
Function calculates metrics used for supervised prediction (Accuracy, Precision, AUROC, F1-Score)
:param y_all_chromosomes: labels for genomic bins generated by model
:param scores_all_chromosomes: prediction confidence scores for labels for genomic bins generated by model
:param y_all_chromosomes: true labels for genomic bins
:return accuracy: accuracy score
:return precision: precision score
:return roc_auc: AUROC
:return f1: F1 score
'''
accuracy = accuracy_score(y_all_chromosomes, labels_all_chromosomes)
precision = precision_score(y_all_chromosomes, labels_all_chromosomes)
roc_auc = roc_auc_score(y_all_chromosomes, scores_all_chromosomes)
f1 = f1_score(y_all_chromosomes, labels_all_chromosomes)
return accuracy, precision, roc_auc, f1
def save_classification_metrics(parameters, scores_all_chromosomes, y_all_chromosomes):
'''
Function saves metrics generated by model when using "supervised" as the task type.
:param parameters: dictionary with parameters set in parameters.json file
:param scores_all_chromosomes: prediction confidence scores for labels for genomic bins generated by model
:param y_all_chromosomes: true labels for genomic bins
'''
metrics_df =
|
pd.DataFrame(data={'predicted_scores': scores_all_chromosomes, 'true_labels': y_all_chromosomes})
|
pandas.DataFrame
|
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
from autots.models.model_list import no_shared
from autots.tools.impute import fill_median
horizontal_aliases = ['horizontal', 'probabilistic']
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def mosaic_or_horizontal(all_series: dict):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
"""
first_value = all_series[next(iter(all_series))]
if isinstance(first_value, dict):
return "mosaic"
else:
return "horizontal"
def parse_horizontal(all_series: dict, model_id: str = None, series_id: str = None):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
model_id (str): name of model to find series for
series_id (str): name of series to find models for
Returns:
list
"""
if model_id is None and series_id is None:
raise ValueError(
"either series_id or model_id must be specified in parse_horizontal."
)
if mosaic_or_horizontal(all_series) == 'mosaic':
if model_id is not None:
return [ser for ser, mod in all_series.items() if model_id in mod.values()]
else:
return list(set(all_series[series_id].values()))
else:
if model_id is not None:
return [ser for ser, mod in all_series.items() if mod == model_id]
else:
# list(set([mod for ser, mod in all_series.items() if ser == series_id]))
return [all_series[series_id]]
def BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime: dict,
prediction_interval: float = 0.9,
):
"""Generate mean forecast for ensemble of models.
Args:
ensemble_params (dict): BestN ensemble param dict
should have "model_weights": {model_id: weight} where 1 is default weight per model
forecasts (dict): {forecast_id: forecast dataframe} for all models
same for lower_forecasts, upper_forecasts
forecast_runtime (dict): dictionary of {forecast_id: timedelta of runtime}
prediction_interval (float): metadata on interval
"""
startTime = datetime.datetime.now()
forecast_keys = list(forecasts.keys())
model_weights = dict(ensemble_params.get("model_weights", {}))
ensemble_params['model_weights'] = model_weights
ensemble_params['models'] = {
k: v
for k, v in dict(ensemble_params.get('models')).items()
if k in forecast_keys
}
model_count = len(forecast_keys)
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
model_divisor = 0
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
current_weight = float(model_weights.get(idx, 1))
ens_df = ens_df + (x * current_weight)
# also .get(idx, 0)
ens_df_lower = ens_df_lower + (lower_forecasts[idx] * current_weight)
ens_df_upper = ens_df_upper + (upper_forecasts[idx] * current_weight)
model_divisor = model_divisor + current_weight
ens_df = ens_df / model_divisor
ens_df_lower = ens_df_lower / model_divisor
ens_df_upper = ens_df_upper / model_divisor
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
X = fill_median(X)
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def mosaic_classifier(df_train, known):
"""CLassify unknown series with the appropriate model for mosaic ensembles."""
known.index.name = "forecast_period"
upload = pd.melt(
known,
var_name="series_id",
value_name="model_id",
ignore_index=False,
).reset_index(drop=False)
upload['forecast_period'] = upload['forecast_period'].astype(int)
missing_cols = df_train.columns[
~df_train.columns.isin(upload['series_id'].unique())
]
if not missing_cols.empty:
forecast_p = np.arange(upload['forecast_period'].max() + 1)
p_full = np.tile(forecast_p, len(missing_cols))
missing_rows = pd.DataFrame(
{
'forecast_period': p_full,
'series_id': np.repeat(missing_cols.values, len(forecast_p)),
'model_id': np.nan,
},
index=None if len(p_full) > 1 else [0],
)
upload = pd.concat([upload, missing_rows])
X = fill_median(
(summarize_series(df_train).transpose()).merge(
upload, left_index=True, right_on="series_id"
)
)
X.set_index("series_id", inplace=True) # .drop(columns=['series_id'], inplace=True)
to_predict = X[X['model_id'].isna()].drop(columns=['model_id'])
X = X[~X['model_id'].isna()]
Y = X['model_id']
Xf = X.drop(columns=['model_id'])
# from sklearn.linear_model import RidgeClassifier
# from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(Xf, Y)
predicted = clf.predict(to_predict)
result = pd.concat(
[to_predict.reset_index(drop=False), pd.Series(predicted, name="model_id")],
axis=1,
)
cols_needed = ['model_id', 'series_id', 'forecast_period']
final = pd.concat(
[X.reset_index(drop=False)[cols_needed], result[cols_needed]], sort=True, axis=0
)
final['forecast_period'] = final['forecast_period'].astype(str)
final = final.pivot(values="model_id", columns="series_id", index="forecast_period")
try:
final = final[df_train.columns]
if final.isna().to_numpy().sum() > 0:
raise KeyError("NaN in mosaic generalization")
except KeyError as e:
raise ValueError(
f"mosaic_classifier failed to generalize for all columns: {repr(e)}"
)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
# here split for mosaic or horizontal
if mosaic_or_horizontal(known_matches) == "mosaic":
# make it a dataframe
mosaicy = pd.DataFrame.from_dict(known_matches)
# remove unavailable models
mosaicy = pd.DataFrame(mosaicy[mosaicy.isin(available_models)])
# so we can fill some missing by just using a forward fill, should be good enough
mosaicy.fillna(method='ffill', limit=5, inplace=True)
mosaicy.fillna(method='bfill', limit=5, inplace=True)
if mosaicy.isna().any().any() or mosaicy.shape[1] != df_train.shape[1]:
if full_models is not None:
k2 = pd.DataFrame(mosaicy[mosaicy.isin(full_models)])
else:
k2 = mosaicy.copy()
final = mosaic_classifier(df_train, known=k2)
return final.to_dict()
else:
return mosaicy.to_dict()
else:
# remove any unavailable models
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
# test if generalization is needed
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
startTime = datetime.datetime.now()
# this is meant to fill in any failures
available_models = [mod for mod, fcs in forecasts.items() if fcs.shape[0] > 0]
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
print("No full models available for horizontal generalization!")
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in all_series.items():
try:
c_fore = forecasts[mod_id][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
print(f"Horizontal ensemble unable to add model {mod_id} {repr(e)}")
# upper
c_fore = upper_forecasts[mod_id][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[mod_id][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
# make sure columns align to original
forecast_df = forecast_df.reindex(columns=org_idx)
u_forecast_df = u_forecast_df.reindex(columns=org_idx)
l_forecast_df = l_forecast_df.reindex(columns=org_idx)
# combine runtimes
try:
ens_runtime = sum(list(forecasts_runtime.values()), datetime.timedelta())
except Exception:
ens_runtime = datetime.timedelta(0)
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for per_series per distance ensembling."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
id_list = list(ensemble_params['models'].keys())
mod_dic = {x: idx for idx, x in enumerate(forecasts_list) if x in id_list}
forecast_length = forecasts[0].shape[0]
dist_n = int(np.ceil(ensemble_params['dis_frac'] * forecast_length))
dist_last = forecast_length - dist_n
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series1'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
forecast_df2, u_forecast_df2, l_forecast_df2 = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series2'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df2 = pd.concat([forecast_df2, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df2 = pd.concat([u_forecast_df2, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df2 = pd.concat([l_forecast_df2, c_fore], axis=1)
forecast_df = pd.concat(
[forecast_df.head(dist_n), forecast_df2.tail(dist_last)], axis=0
)
u_forecast_df = pd.concat(
[u_forecast_df.head(dist_n), u_forecast_df2.tail(dist_last)], axis=0
)
l_forecast_df = pd.concat(
[l_forecast_df.head(dist_n), l_forecast_df2.tail(dist_last)], axis=0
)
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in list(mod_dic.values()):
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def EnsembleForecast(
ensemble_str,
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Return PredictionObject for given ensemble method."""
ens_model_name = ensemble_params['model_name'].lower().strip()
s3list = ['best3', 'best3horizontal', 'bestn']
if ens_model_name in s3list:
ens_forecast = BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
elif ens_model_name == 'dist':
ens_forecast = DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
elif ens_model_name in horizontal_aliases:
ens_forecast = HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
elif ens_model_name == "mosaic":
ens_forecast = MosaicEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
elif ens_model_name == 'hdist':
ens_forecast = HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
else:
raise ValueError("Ensemble model type not recognized.")
def _generate_distance_ensemble(dis_frac, forecast_length, initial_results):
"""Constructs a distance ensemble dictionary."""
dis_frac = 0.5
first_bit = int(np.ceil(forecast_length * dis_frac))
last_bit = int(np.floor(forecast_length * (1 - dis_frac)))
not_ens_list = initial_results.model_results[
initial_results.model_results['Ensemble'] == 0
]['ID'].tolist()
ens_per_ts = initial_results.per_timestamp_smape[
initial_results.per_timestamp_smape.index.isin(not_ens_list)
]
first_model = ens_per_ts.iloc[:, 0:first_bit].mean(axis=1).idxmin()
last_model = (
ens_per_ts.iloc[:, first_bit : (last_bit + first_bit)].mean(axis=1).idxmin()
)
ensemble_models = {}
best3 = (
initial_results.model_results[
initial_results.model_results['ID'].isin([first_model, last_model])
]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
ensemble_models = best3.to_dict(orient='index')
return {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'Dist',
'model_count': 2,
'model_metric': 'smape',
'models': ensemble_models,
'dis_frac': dis_frac,
'FirstModel': first_model,
'SecondModel': last_model,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
def _generate_bestn_dict(
best,
model_name: str = 'BestN',
model_metric: str = "best_score",
model_weights: dict = None,
):
ensemble_models = best.to_dict(orient='index')
model_parms = {
'model_name': model_name,
'model_count': best.shape[0],
'model_metric': model_metric,
'models': ensemble_models,
}
if model_weights is not None:
model_parms['model_weights'] = model_weights
return {
'Model': 'Ensemble',
'ModelParameters': json.dumps(model_parms),
'TransformationParameters': '{}',
'Ensemble': 1,
}
def EnsembleTemplateGenerator(
initial_results,
forecast_length: int = 14,
ensemble: str = "simple",
score_per_series=None,
):
"""Generate class 1 (non-horizontal) ensemble templates given a table of results."""
ensemble_templates = pd.DataFrame()
ens_temp = initial_results.model_results.drop_duplicates(subset='ID')
# filter out horizontal ensembles
ens_temp = ens_temp[ens_temp['Ensemble'] <= 1]
if 'simple' in ensemble:
# best 3, all can be of same model type
best3nonunique = ens_temp.nsmallest(3, columns=['Score']).set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = best3nonunique.shape[0]
if n_models == 3:
best3nu_params = pd.DataFrame(
_generate_bestn_dict(
best3nonunique, model_name='BestN', model_metric="best_score"
),
index=[0],
)
ensemble_templates = pd.concat([ensemble_templates, best3nu_params], axis=0)
# best 3, by SMAPE, RMSE, SPL
bestsmape = ens_temp.nsmallest(1, columns=['smape_weighted'])
bestrmse = ens_temp.nsmallest(2, columns=['rmse_weighted'])
bestmae = ens_temp.nsmallest(3, columns=['spl_weighted'])
best3metric = pd.concat([bestsmape, bestrmse, bestmae], axis=0)
best3metric = (
best3metric.drop_duplicates()
.head(3)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
n_models = best3metric.shape[0]
if n_models == 3:
best3m_params = pd.DataFrame(
_generate_bestn_dict(
best3metric, model_name='BestN', model_metric="mixed_metric"
),
index=[0],
)
ensemble_templates = pd.concat([ensemble_templates, best3m_params], axis=0)
# best 3, all must be of different model types
ens_temp = (
ens_temp.sort_values('Score', ascending=True, na_position='last')
.groupby('Model')
.head(1)
.reset_index(drop=True)
)
best3unique = ens_temp.nsmallest(3, columns=['Score']).set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = best3unique.shape[0]
if n_models == 3:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
best3unique, model_name='BestN', model_metric="best_score_unique"
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
if 'distance' in ensemble:
dis_frac = 0.2
distance_params = pd.DataFrame(
_generate_distance_ensemble(dis_frac, forecast_length, initial_results),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, distance_params], axis=0, ignore_index=True
)
dis_frac = 0.5
distance_params2 = pd.DataFrame(
_generate_distance_ensemble(dis_frac, forecast_length, initial_results),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, distance_params2], axis=0, ignore_index=True
)
# in previous versions per_series metrics were only captured if 'horizontal' was passed
if 'simple' in ensemble:
if score_per_series is None:
per_series = initial_results.per_series_mae
else:
per_series = score_per_series
per_series = per_series[per_series.index.isin(ens_temp['ID'].tolist())]
# make it ranking based! Need bigger=better for weighting
per_series_ranked = per_series.rank(ascending=False)
# choose best n based on score per series
n = 3
chosen_ones = per_series_ranked.sum(axis=1).nlargest(n)
bestn = ens_temp[ens_temp['ID'].isin(chosen_ones.index.tolist())].set_index(
"ID"
)[['Model', 'ModelParameters', 'TransformationParameters']]
n_models = bestn.shape[0]
if n_models == n:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric="bestn_horizontal",
model_weights=chosen_ones.to_dict(),
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
# cluster and then make best model per cluster
if per_series.shape[1] > 4:
try:
from sklearn.cluster import AgglomerativeClustering
max_clusters = 8
n_clusters = round(per_series.shape[1] / 3)
n_clusters = max_clusters if n_clusters > max_clusters else n_clusters
X = per_series_ranked.transpose()
clstr = AgglomerativeClustering(n_clusters=n_clusters).fit(X)
series_labels = clstr.labels_
for cluster in np.unique(series_labels).tolist():
current_ps = per_series_ranked[
per_series_ranked.columns[series_labels == cluster]
]
n = 3
chosen_ones = current_ps.sum(axis=1).nlargest(n)
bestn = ens_temp[
ens_temp['ID'].isin(chosen_ones.index.tolist())
].set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = bestn.shape[0]
if n_models == n:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric=f"cluster_{cluster}",
model_weights=chosen_ones.to_dict(),
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params],
axis=0,
ignore_index=True,
)
except Exception as e:
print(f"cluster-based simple ensemble failed with {repr(e)}")
mods = pd.Series()
per_series_des = per_series.copy()
n_models = 3
# choose best per series, remove those series, then choose next best
for x in range(n_models):
n_dep = 5 if x < 2 else 10
n_dep = (
n_dep if per_series_des.shape[0] > n_dep else per_series_des.shape[0]
)
models_pos = []
tr_df = pd.DataFrame()
for _ in range(n_dep):
cr_df = pd.DataFrame(per_series_des.idxmin()).transpose()
tr_df = pd.concat([tr_df, cr_df], axis=0)
models_pos.extend(per_series_des.idxmin().tolist())
per_series_des[per_series_des == per_series_des.min()] = np.nan
cur_mods = pd.Series(models_pos).value_counts()
cur_mods = cur_mods.sort_values(ascending=False).head(1)
mods = mods.combine(cur_mods, max, fill_value=0)
rm_cols = tr_df[tr_df.isin(mods.index.tolist())]
rm_cols = rm_cols.dropna(how='all', axis=1).columns
per_series_des = per_series.copy().drop(mods.index, axis=0)
per_series_des = per_series_des.drop(rm_cols, axis=1)
if per_series_des.shape[1] == 0:
per_series_des = per_series.copy().drop(mods.index, axis=0)
best3 = (
initial_results.model_results[
initial_results.model_results['ID'].isin(mods.index.tolist())
]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
best3_params = pd.DataFrame(
_generate_bestn_dict(best3, model_name='BestN', model_metric="horizontal"),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3_params], axis=0, ignore_index=True
)
if 'subsample' in ensemble:
try:
import random
if score_per_series is None:
per_series = initial_results.per_series_mae
else:
per_series = score_per_series
per_series = per_series[per_series.index.isin(ens_temp['ID'].tolist())]
# make it ranking based! Need bigger=better for weighting
per_series_ranked = per_series.rank(ascending=False)
# subsample and then make best model per group
num_series = per_series.shape[1]
n_samples = num_series * 2
max_deep_ensembles = 100
n_samples = (
n_samples if n_samples < max_deep_ensembles else max_deep_ensembles
)
col_min = 1 if num_series < 3 else 2
col_max = round(num_series / 2)
col_max = num_series if col_max > num_series else col_max
for samp in range(n_samples):
n_cols = random.randint(col_min, col_max)
current_ps = per_series_ranked.sample(n=n_cols, axis=1)
n_largest = random.randint(9, 16)
n_sample = random.randint(2, 5)
# randomly choose one of best models
chosen_ones = current_ps.sum(axis=1).nlargest(n_largest)
n_sample = (
n_sample
if n_sample < chosen_ones.shape[0]
else chosen_ones.shape[0]
)
chosen_ones = chosen_ones.sample(n_sample).sort_values(ascending=False)
bestn = ens_temp[
ens_temp['ID'].isin(chosen_ones.index.tolist())
].set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
model_weights = random.choice([chosen_ones.to_dict(), None])
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric=f"subsample_{samp}",
model_weights=model_weights,
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
except Exception as e:
print(f"subsample ensembling failed with error: {repr(e)}")
return ensemble_templates
def HorizontalTemplateGenerator(
per_series,
model_results,
forecast_length: int = 14,
ensemble: str = "horizontal",
subset_flag: bool = True,
per_series2=None,
):
"""Generate horizontal ensemble templates given a table of results."""
ensemble_templates = pd.DataFrame()
ensy = ['horizontal', 'probabilistic', 'hdist']
if any(x in ensemble for x in ensy):
if ('horizontal-max' in ensemble) or ('probabilistic-max' in ensemble):
mods_per_series = per_series.idxmin()
mods = mods_per_series.unique()
best5 = (
model_results[model_results['ID'].isin(mods.tolist())]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'Horizontal' if 'horizontal' in ensemble else 'Probabilistic'
metric = 'Score-max' if 'horizontal' in ensemble else 'SPL'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods.shape[0],
'model_metric': metric,
'models': best5.to_dict(orient='index'),
'series': mods_per_series.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params = pd.DataFrame(best5_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best5_params], axis=0, ignore_index=True
)
if 'hdist' in ensemble and not subset_flag:
mods_per_series = per_series.idxmin()
mods_per_series2 = per_series2.idxmin()
mods = pd.concat([mods_per_series, mods_per_series2]).unique()
best5 = (
model_results[model_results['ID'].isin(mods.tolist())]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'hdist'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods.shape[0],
'models': best5.to_dict(orient='index'),
'dis_frac': 0.3,
'series1': mods_per_series.to_dict(),
'series2': mods_per_series2.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params = pd.DataFrame(best5_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best5_params], axis=0, ignore_index=True
)
if ('horizontal' in ensemble) or ('probabilistic' in ensemble):
# first generate lists of models by ID that are in shared and no_shared
no_shared_select = model_results['Model'].isin(no_shared)
shared_mod_lst = model_results[~no_shared_select]['ID'].tolist()
no_shared_mod_lst = model_results[no_shared_select]['ID'].tolist()
lowest_score_mod = [
model_results.iloc[model_results['Score'].idxmin()]['ID']
]
per_series[per_series.index.isin(shared_mod_lst)]
# remove those where idxmin is in no_shared
shared_maxes = per_series.idxmin().isin(shared_mod_lst)
shr_mx_cols = shared_maxes[shared_maxes].index
per_series_shareds = per_series.filter(shr_mx_cols, axis=1)
# select best n shared models (NEEDS improvement)
n_md = 5
use_shared_lst = (
per_series_shareds.median(axis=1).nsmallest(n_md).index.tolist()
)
# combine all of the above as allowed mods
allowed_list = no_shared_mod_lst + lowest_score_mod + use_shared_lst
per_series_filter = per_series[per_series.index.isin(allowed_list)]
# first select a few of the best shared models
# Option A: Best overall per model type (by different metrics?)
# Option B: Best per different clusters...
# Rank position in score for EACH series
# Lowest median ranking
# Lowest Quartile 1 of rankings
# Normalize and then take Min, Median, or IQ1
# then choose min from series of these + no_shared
# make sure no models are included that don't match to any series
# ENSEMBLE and NO_SHARED (it could be or it could not be)
# need to TEST cases where all columns are either shared or no_shared!
# concern: choose lots of models, slower to run initial
mods_per_series = per_series_filter.idxmin()
mods = mods_per_series.unique()
best5 = (
model_results[model_results['ID'].isin(mods.tolist())]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'Horizontal' if 'horizontal' in ensemble else 'Probabilistic'
metric = 'Score' if 'horizontal' in ensemble else 'SPL'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods.shape[0],
'model_metric': metric,
'models': best5.to_dict(orient='index'),
'series': mods_per_series.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params =
|
pd.DataFrame(best5_params, index=[0])
|
pandas.DataFrame
|
import pandas as pd
from covid import ascertainment as asc
import datetime as dt
import us
def floor_to_sat(date: dt.date):
wd = date.weekday()
days_back = 0
if 0 <= wd <= 1:
days_back = wd + 2
if 2 <= wd <= 4:
days_back = -5 + wd
if wd == 6:
days_back = 1
return date - dt.timedelta(days=days_back)
class CountyRisk:
def __init__(self):
print('reading counties')
self.counties = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv')
print('getting ensemble forecasts')
self.forecasts = pd.read_csv("https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master/data-processed/COVIDhub-ensemble/2021-02-15-COVIDhub-ensemble.csv")
today_d = pd.to_datetime(self.counties.date).max()
today_d -= dt.timedelta(days=today_d.weekday() % 5) ## subtract weekday to get to saturday which is 5
self.today = dt.datetime.strftime(today_d, '%Y-%m-%d')
self.past = dt.datetime.strftime(today_d - dt.timedelta(days=7), '%Y-%m-%d')
self.three_weeks = dt.datetime.strftime(today_d + dt.timedelta(days=21), '%Y-%m-%d')
self.two_weeks = dt.datetime.strftime(today_d + dt.timedelta(days=14), '%Y-%m-%d')
#s = self.counties.set_index('date')
self.fore = self.forecasts
self.fore.rename(columns={"location": "fips", "value": "cases"}, inplace=True)
self.all_fips = self.fore.fips.dropna().unique()
#self.active_cases = dict((fips, s[s.fips == fips].cases[self.today] - s[s.fips == fips].cases[self.past]) for fips in self.all_fips)
#for fips in self.all_fips:
# print(fips)
# print(s[s.fips == fips])
# print(s[s.fips == fips].cases[self.three_weeks])
# print(s[s.fips == fips].cases[self.two_weeks])
# self.active_cases = dict((fips, s[s.fips == fips].cases[self.three_weeks] - s[s.fips == fips].cases[self.two_weeks]) for fips in self.all_fips)
print('reading county populations')
pop_csv =
|
pd.read_csv('https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/co-est2019-alldata.csv', encoding='latin1')
|
pandas.read_csv
|
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
import sys
import os
import time
import ntpath
import json
import calendar
from helper.mongod import Mongodb
from datetime import datetime
from datetime import date
from pprint import pprint
from bson import ObjectId
from helper.common import Common
from helper.jaccs import Config
import pandas as pd
import xlsxwriter
common = Common()
base_url = common.base_url()
wff_env = common.wff_env(base_url)
mongodb = Mongodb(MONGODB="worldfone4xs", WFF_ENV=wff_env)
_mongodb = Mongodb(MONGODB="_worldfone4xs", WFF_ENV=wff_env)
now = datetime.now()
subUserType = 'LO'
collection = common.getSubUser(subUserType, 'Reminder_letter_report')
lnjc05_collection = common.getSubUser(subUserType, 'LNJC05')
zaccf_collection = common.getSubUser(subUserType, 'ZACCF_report')
sbv_collection = common.getSubUser(subUserType, 'SBV')
investigation_collection = common.getSubUser(subUserType, 'Investigation_file')
account_collection = common.getSubUser(subUserType, 'List_of_account_in_collection')
report_release_sale_collection = common.getSubUser(subUserType, 'Report_release_sale')
diallist_detail_collection = common.getSubUser(subUserType, 'Diallist_detail')
user_collection = common.getSubUser(subUserType, 'User')
product_collection = common.getSubUser(subUserType, 'Product')
report_due_date_collection = common.getSubUser(subUserType, 'Report_due_date')
stored_collection = common.getSubUser(subUserType, 'SBV_Stored')
log = open(base_url + "cronjob/python/Loan/log/Reminder_letter_log.txt","a")
log.write(now.strftime("%d/%m/%Y, %H:%M:%S") + ': Start Import' + '\n')
try:
data = []
insertData = []
now = datetime.now()
today = date.today()
# today = datetime.strptime('28/03/2020', "%d/%m/%Y").date()
day = today.day
month = today.month
year = today.year
weekday = today.weekday()
lastDayOfMonth = calendar.monthrange(year, month)[1]
todayString = today.strftime("%d/%m/%Y")
todayTimeStamp = int(time.mktime(time.strptime(str(todayString + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
endTodayTimeStamp = int(time.mktime(time.strptime(str(todayString + " 23:59:59"), "%d/%m/%Y %H:%M:%S")))
startMonth = int(time.mktime(time.strptime(str('01/' + str(month) + '/' + str(year) + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
endMonth = int(time.mktime(time.strptime(str(str(lastDayOfMonth) + '/' + str(month) + '/' + str(year) + " 23:59:59"), "%d/%m/%Y %H:%M:%S")))
mongodb.remove_document(MONGO_COLLECTION=collection, WHERE={'createdAt': {'$gte': todayTimeStamp, '$lte': endTodayTimeStamp} })
i = 1
last_five_day = todayTimeStamp - (86400*5)
dueDayOfMonth = mongodb.getOne(MONGO_COLLECTION=report_due_date_collection, WHERE={'for_month': str(month), 'due_date' : last_five_day})
if dueDayOfMonth != None:
due_date = datetime.fromtimestamp(dueDayOfMonth['due_date'])
d2 = due_date.strftime('%d/%m/%Y')
due_date = datetime.strptime(d2, "%d/%m/%Y").date()
day = due_date.day
if day >= 12 and day <= 15:
dept_group = '01'
if day >= 22 and day <= 25:
dept_group = '02'
if (day >= 28 and day <= 31) or (day >= 1 and day <= 5):
dept_group = '03'
aggregate_pipeline = [
{
"$project":
{
'account_number': 1,
'current_balance': 1,
'overdue_amount_this_month': 1,
'mobile_num': 1,
'due_date': 1,
'group_id': 1
# 'dateDifference' :{"$divide" : [{ "$subtract" : [todayTimeStamp,'$due_date']}, 86400]}
}
},{
"$match" :
{
'group_id': {'$regex': dept_group, '$nin': [ 'A'+dept_group ]}
# '$or' : [ {'dateDifference': {"$eq": 35} }, {'dateDifference': {"$eq": 65} }, {'dateDifference': {"$eq": 95} }, {'dateDifference': {"$eq": 185} }]
}
}
]
dataLnjc05 = mongodb.aggregate_pipeline(MONGO_COLLECTION=lnjc05_collection,aggregate_pipeline=aggregate_pipeline)
if dataLnjc05 != None:
for row in dataLnjc05:
zaccf = mongodb.getOne(MONGO_COLLECTION=zaccf_collection, WHERE={'account_number': str(row['account_number']) })
if zaccf != None:
temp = {
'index' : i,
'account_number' : row['account_number'],
'name' : zaccf['name'],
'address' : zaccf['ADDR_1']+ ', '+zaccf['ADDR_2']+', '+zaccf['ADDR_3'],
'contract_date' : zaccf['CIF_CR8'],
'approved_amt' : int(float(zaccf['APPROV_LMT'])),
'cur_bal' : row['current_balance'],
'overdue_amt' : row['overdue_amount_this_month'],
'phone' : row['mobile_num'],
'due_date' : row['due_date'],
'overdue_date' : zaccf['OVER_DY'],
'group' : row['group_id'],
'product_code' : zaccf['PRODGRP_ID'],
'outstanding_bal' : row['current_balance'],
'pic' : '',
'product_name' : zaccf['PROD_NM'],
'dealer_name' : zaccf['WRK_BRN'],
'license_no' : zaccf['LIC_NO'],
'cif_birth_date' : '',
'license_date' : '',
'brand' : '',
'model' : '',
'engine_no' : '',
'chassis_no' : '',
'color' : '',
'license_plates' : '',
'production_time' : '',
'createdBy' : 'system',
'createdAt' : todayTimeStamp
}
if int(zaccf['cif_birth_date']) > 0 :
if len(str(zaccf['cif_birth_date'])) == 7:
zaccf['cif_birth_date'] = '0'+str(zaccf['cif_birth_date'])
cif_birth_date = str(zaccf['cif_birth_date'])
d1 = cif_birth_date[0:2]+'/'+cif_birth_date[2:4]+'/'+cif_birth_date[4:9]
temp['cif_birth_date'] = int(time.mktime(time.strptime(str(d1 + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
if int(zaccf['LIC_DT8']) > 0 :
if len(str(zaccf['LIC_DT8'])) == 7:
zaccf['LIC_DT8'] = '0'+str(zaccf['LIC_DT8'])
LIC_DT8 = str(zaccf['LIC_DT8'])
d1 = LIC_DT8[0:2]+'/'+LIC_DT8[2:4]+'/'+LIC_DT8[4:8]
temp['license_date'] = int(time.mktime(time.strptime(str(d1 + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
product = mongodb.getOne(MONGO_COLLECTION=product_collection, WHERE={'code': str(zaccf['PRODGRP_ID'])},SELECT=['name'])
if product != None:
temp['product_code'] = product['name']
lnjc05Info1 = mongodb.getOne(MONGO_COLLECTION=lnjc05_collection, WHERE={'dealer_no': str(zaccf['WRK_BRN'])},
SELECT=['dealer_name'])
if lnjc05Info1 != None:
temp['dealer_name'] = lnjc05Info1['dealer_name']
contract_date = zaccf['CIF_CR8']
temp['day'] = contract_date[0:2]
temp['month'] = contract_date[2:4]
temp['year'] = contract_date[4:8]
diallistInfo = mongodb.getOne(MONGO_COLLECTION=diallist_detail_collection, WHERE={'account_number': str(row['account_number']), 'createdAt': {'$gte' : todayTimeStamp,'$lte' : endTodayTimeStamp} },SELECT=['assign'])
if diallistInfo != None:
if 'assign' in diallistInfo.keys():
temp['pic'] = diallistInfo['assign']
user = _mongodb.getOne(MONGO_COLLECTION=user_collection, WHERE={'extension': str(diallistInfo['assign'])},SELECT=['agentname'])
if user != None:
temp['pic'] += '-' + user['agentname']
releaseInfo = mongodb.getOne(MONGO_COLLECTION=report_release_sale_collection, WHERE={'account_number': str(row['account_number'])},SELECT=['cus_name','temp_address','address'])
if releaseInfo != None:
temp['name'] = releaseInfo['cus_name']
if releaseInfo['temp_address'] != '' and releaseInfo['temp_address'] != '0':
temp['address'] = releaseInfo['temp_address']
else:
temp['address'] = releaseInfo['address']
if temp['address'] == '0' or temp['address'] == '':
temp['address'] = zaccf['ADDR_1']+ ', '+zaccf['ADDR_2']+', '+zaccf['ADDR_3']
investigationInfo = mongodb.getOne(MONGO_COLLECTION=investigation_collection, WHERE={'contract_no': str(row['account_number'])},SELECT=['brand','model','engine_no','chassis_no','license_plates_no'])
if investigationInfo != None:
temp['brand'] = investigationInfo['brand']
temp['model'] = investigationInfo['model']
temp['engine_no'] = investigationInfo['engine_no']
temp['chassis_no'] = investigationInfo['chassis_no']
temp['license_plates'] = investigationInfo['license_plates_no']
insertData.append(temp)
print(i)
i += 1
# sbv
aggregate_stored = [
{
"$match" :
{
'kydue': dept_group,
'overdue_indicator': {'$nin': ['A']}
}
},{
"$group" :
{
"_id": 'null',
"acc_arr": {'$addToSet': '$contract_no'}
}
}
]
sbvStored = mongodb.aggregate_pipeline(MONGO_COLLECTION=stored_collection,aggregate_pipeline=aggregate_stored)
acc_arr = []
if sbvStored != None:
for row in sbvStored:
acc_arr = row['acc_arr']
aggregate_pipeline = [
{
"$match" :
{
'account_number': {'$in': acc_arr},
}
},{
"$project":
{
'account_number': 1,
'overdue_amt': 1,
'phone': 1,
'overdue_date': 1,
'cur_bal': 1,
# 'dateDifference' :{"$divide" : [{ "$subtract" : [todayTimeStamp,'$overdue_date']}, 86400]}
}
},
# { "$match" : {'$or' : [ {'dateDifference': {"$eq": 35} }, {'dateDifference': {"$eq": 65} }, {'dateDifference': {"$eq": 95} }, {'dateDifference': {"$eq": 185} }]} }
]
dataAccount = mongodb.aggregate_pipeline(MONGO_COLLECTION=account_collection,aggregate_pipeline=aggregate_pipeline)
if dataAccount != None:
for row in dataAccount:
group = ''
storedInfo = mongodb.getOne(MONGO_COLLECTION=stored_collection, WHERE={'contract_no': str(row['account_number']) })
if storedInfo != None:
group = storedInfo['overdue_indicator'] + storedInfo['kydue']
sbv = mongodb.getOne(MONGO_COLLECTION=sbv_collection, WHERE={'contract_no': str(row['account_number']) })
if sbv != None:
temp = {
'index' : i,
'account_number' : row['account_number'],
'name' : sbv['name'],
'address' : sbv['address'],
'contract_date' : sbv['open_card_date'],
'approved_amt' : sbv['approved_limit'],
'cur_bal' : row['cur_bal'],
'overdue_amt' : row['overdue_amt'],
'phone' : row['phone'],
'due_date' : row['overdue_date'],
'overdue_date' : sbv['overdue_days_no'],
'group' : group,
'product_code' : sbv['card_type'],
'outstanding_bal' : row['cur_bal'],
'pic' : '',
'product_name' : '',
'dealer_name' : '',
'license_no' : sbv['license_no'],
'cif_birth_date' : sbv['cif_birth_date'],
'license_date' : '',
'brand' : '',
'model' : '',
'engine_no' : '',
'chassis_no' : '',
'color' : '',
'license_plates' : '',
'production_time' : '',
'createdBy' : 'system',
'createdAt' : todayTimeStamp
}
if int(sbv['card_type']) < 100:
row['product_code'] = '301 - Credit Card'
row['product_name'] = 'Credit Card'
else:
row['product_code'] = '302 - Cash Card'
row['product_name'] = 'Cash Card'
contract_date = sbv['open_card_date']
temp['day'] = contract_date[0:2]
temp['month'] = contract_date[2:4]
temp['year'] = contract_date[4:8]
if int(sbv['license_date']) > 0 :
if len(str(sbv['license_date'])) == 7:
sbv['license_date'] = '0'+str(sbv['license_date'])
license_date = str(sbv['license_date'])
d1 = license_date[0:2]+'/'+license_date[2:4]+'/'+license_date[4:8]
temp['license_date'] = int(time.mktime(time.strptime(str(d1 + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
diallistInfo = mongodb.getOne(MONGO_COLLECTION=diallist_detail_collection, WHERE={'account_number': str(row['account_number']), 'createdAt': {'$gte' : todayTimeStamp,'$lte' : endTodayTimeStamp} },SELECT=['assign'])
if diallistInfo != None:
if 'assign' in diallistInfo.keys():
temp['pic'] = diallistInfo['assign']
user = _mongodb.getOne(MONGO_COLLECTION=user_collection, WHERE={'extension': str(diallistInfo['assign'])},SELECT=['agentname'])
if user != None:
temp['pic'] += '-' + user['agentname']
releaseInfo = mongodb.getOne(MONGO_COLLECTION=report_release_sale_collection, WHERE={'account_number': str(row['account_number'])},SELECT=['cus_name','temp_address','address'])
if releaseInfo != None:
temp['name'] = releaseInfo['cus_name']
if releaseInfo['temp_address'] != '':
temp['address'] = releaseInfo['temp_address']
else:
temp['address'] = releaseInfo['address']
zaccfInfo = mongodb.getOne(MONGO_COLLECTION=zaccf_collection, WHERE={'LIC_NO': str(sbv['license_no'])},SELECT=['WRK_BRN'])
if zaccfInfo != None:
temp['dealer_name'] = zaccfInfo['WRK_BRN']
lnjc05Info1 = mongodb.getOne(MONGO_COLLECTION=lnjc05_collection, WHERE={'dealer_no': str(zaccfInfo['WRK_BRN'])},
SELECT=['dealer_name'])
if lnjc05Info1 != None:
temp['dealer_name'] = lnjc05Info1['dealer_name']
insertData.append(temp)
pprint(i)
i += 1
if len(insertData) > 0:
# mongodb.remove_document(MONGO_COLLECTION=collection)
mongodb.batch_insert(MONGO_COLLECTION=collection, insert_data=insertData)
fileOutput = base_url + 'upload/loan/export/Reminder Letter Report_'+ today.strftime("%d%m%Y") +'.xlsx'
aggregate_acc = [
{
"$match":
{
"createdAt" : {'$gte' : todayTimeStamp,'$lte' : endTodayTimeStamp},
}
},
{
"$project":
{
"_id": 0,
}
}
]
data = mongodb.aggregate_pipeline(MONGO_COLLECTION=collection,aggregate_pipeline=aggregate_acc)
dataReport = []
for row in data:
temp = row
# if 'loan_overdue_amount' in row.keys():
# temp['loan_overdue_amount'] = '{:,.2f}'.format(float(row['loan_overdue_amount']))
# if 'current_balance' in row.keys():
# temp['current_balance'] = '{:,.2f}'.format(float(row['current_balance']))
# if 'outstanding_principal' in row.keys():
# try:
# temp['outstanding_principal'] = '{:,.2f}'.format(float(row['outstanding_principal']))
# except Exception as e:
# temp['outstanding_principal'] = row['outstanding_principal']
try:
if 'due_date' in row.keys():
date_time = datetime.fromtimestamp(int(row['due_date']))
temp['due_date'] = date_time.strftime('%d-%m-%Y')
except Exception as e:
temp['due_date'] = row['due_date']
try:
if 'cif_birth_date' in row.keys():
if row['cif_birth_date'] != None:
date_time = datetime.fromtimestamp(row['cif_birth_date'])
temp['cif_birth_date'] = date_time.strftime('%d-%m-%Y')
else:
temp['cif_birth_date'] = ''
except Exception as e:
temp['cif_birth_date'] = row['cif_birth_date']
try:
if 'license_date' in row.keys():
if row['license_date'] != None:
date_time = datetime.fromtimestamp(row['license_date'])
temp['license_date'] = date_time.strftime('%d-%m-%Y')
except Exception as e:
temp['license_date'] = row['license_date']
if 'createdAt' in row.keys():
if row['createdAt'] != None:
date_time = datetime.fromtimestamp(row['createdAt'])
temp['createdAt'] = date_time.strftime('%d-%m-%Y')
dataReport.append(temp)
df =
|
pd.DataFrame(dataReport, columns= ['index','account_number','name','address','contract_date','day','month','year','approved_amt','cur_bal','overdue_amt','phone','createdAt','due_date','overdue_date','group','product_code','outstanding_bal','pic','product_name','dealer_name','brand','model','engine_no','chassis_no','color','license_plates','production_time','license_no','cif_birth_date','license_date'])
|
pandas.DataFrame
|
# filter_aligned_datasets.py
#
# Name: <NAME>
#
# Usage: python filter_aligned_datasets.py <xml_filename> <exp_csv_filename>
#
# <xml_filename>: filename of the xml file obtained by efecth for many SRA Experiments.
# <exp_csv_filename>: filename of the SRA_Exps csv file originally generated from this corresponding xml file.
#
#import pdb; pdb.set_trace() # Uncomment to debug code using pdb (like gdb)
import sys
import numpy as np
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import pandas as pd
def get_all_aligned_runs(root):
aligned_runs = []
runs = root.findall(".//Table[@name='PRIMARY_ALIGNMENT']/../../..")
for run in runs:
aligned_runs.append(run.get('accession'))
return aligned_runs
def get_unaligned_datasets(root, exp_df):
aligned_runs = get_all_aligned_runs(root)
print("# Aligned RUNs in the full xml file:", len(aligned_runs))
aligned_flag = exp_df['SRA_Run'].isin(aligned_runs)
unaligned_exp_df = exp_df[aligned_flag == False]
print("Unaligned SRA Experiments dataframe:")
print(unaligned_exp_df.info())
aligned_exp_df = exp_df[aligned_flag == True]
print("Aligned SRA Experiments dataframe:")
print(aligned_exp_df.info())
return unaligned_exp_df, aligned_exp_df
if __name__ == "__main__":
xml_file = sys.argv[1]
exp_csv_file = sys.argv[2]
tree = ET.parse(xml_file)
root = tree.getroot()
exp_df = pd.read_csv(exp_csv_file, sep = ",", engine='python')
print("Original SRA Experiments dataframe:")
print(exp_df.info())
unaligned_exp_df, aligned_exp_df = get_unaligned_datasets(root, exp_df)
f_name = exp_csv_file[0:-4]
pd.DataFrame.to_csv(unaligned_exp_df, path_or_buf=f_name+"_unaligned.csv", index=False)
|
pd.DataFrame.to_csv(aligned_exp_df, path_or_buf=f_name+"_aligned.csv", index=False)
|
pandas.DataFrame.to_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.