prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
import math
schoolPosition = {'Longitude':-2.233771, 'Latitude':53.46679}
rc = 6378.137
rj = 6356.725
def ToRad(deg):
return deg * math.pi / 180
def GetR(lat):
return rj + (rc - rj) * (90-lat) / 90
def One2OneDistanceSquare(a, b):
degree = a['Latitude']
r = GetR(degree)
sr = r * math.cos(ToRad(degree))
deltaLatitude = ToRad(a['Latitude'] - b['Latitude'])
deltaLongitude = ToRad(a['Longitude'] - b['Longitude'])
return (deltaLatitude * r)**2 + (deltaLongitude * sr)**2
def One2OneDistance(a, b):
return math.sqrt(One2OneDistanceSquare(a, b))
def OneDistanceSqure(point, targetsdf):
result = np.zeros(len(targetsdf))
for index, row in targetsdf.iterrows():
result[index] = One2OneDistanceSquare(point, row)
return result
def OneDistance(point, targets):
result = OneDistanceSqure(point, targets)
return np.sqrt(result)
def LoadScore(crime, key):
result = np.zeros(len(crime))
for index, row in crime.iterrows():
result[index] = row[key]
return result
def GetCrimeScore(airbnb, crime):
print('build radis data')
result = np.zeros(len(airbnb))
areas = np.zeros(len(airbnb))
minindexs = [0]*len(airbnb)
score = LoadScore(crime, 'score_0')
area = LoadScore(crime, 'area')
meanscore = np.mean(score)
k = 50/meanscore
print('load score data')
for index, row in airbnb.iterrows():
if index % 100 == 0:
print("{}/{}".format(index, len(airbnb)))
distance = OneDistanceSqure(row, crime)
minindex = distance == np.min(distance)
values = score[minindex] * k
result[index] = np.sum(values)
areas[index] = area[index]
for i in range(len(minindex)):
if minindex[i]:
minindexs[index] = i
break
return result, areas, minindexs
def CommuteScoreFunc(distance):
if distance < 0.1:
return 100
elif distance < 0.4:
return 80
elif distance < 1:
return 70
elif distance < 3:
return 50
else:
return 50 * 9 / (distance**2)
def GetCommuteScore(airbnb, school):
result = np.zeros(len(airbnb))
distance = [0]*len(airbnb)
for index, row in airbnb.iterrows():
distance[index] = One2OneDistance(row, school)
result[index] = CommuteScoreFunc(distance[index])
return result, distance
def GetBusDistance(airbnb, bus):
result = np.zeros(len(airbnb))
tempdf = []
for index, row in airbnb.iterrows():
if index % 10 == 0:
print("{}/{}".format(index, len(airbnb)))
distance = OneDistance(row, bus)
result[index] = np.min(distance)
row['bus distance'] = result[index]
tempdf.append(row)
pd.DataFrame(tempdf).to_csv("distance.csv")
return result
def GetBusScore(airbnb, bus):
distance = GetBusDistance(airbnb, bus)
#print(distance)
allScore = 50*(0.1)/distance
allScore[allScore>=50] = 50
return allScore, distance
def GetRoomScore(airbnb):
result = np.zeros(len(airbnb))
for index, row in airbnb.iterrows():
result[index] = row['bed_score'] * 100
return result
def GetPrice(airbnb):
result = [0]*len(airbnb)
for index, row in airbnb.iterrows():
result[index] = row['price']
return result
def GetPriceScore(airbnb):
price = GetPrice(airbnb)
#print('price {}'.format(price))
delta = np.max(price) - np.min(price)
#print('delta {}'.format(delta))
score = price - np.min(price)
#print('score {}'.format(score))
score = score / delta
#print('score {}'.format(score))
score = 1 - score
return score * 100
def GetPriceBonus(airbnb, minindexs):
pricesCount = {}
prices = GetPrice(airbnb)
for index in range(len(prices)):
minIndex = minindexs[index]
if minIndex not in pricesCount:
pricesCount [minIndex] = [0,0]
pricesCount[minIndex][0] += prices[index]
pricesCount[minIndex][1] += 1
for key in pricesCount:
pricesCount[key][0] /= pricesCount[key][1]
result = [0]*len(prices)
average = [0]*len(prices)
for index,row in airbnb.iterrows():
minIndex = minindexs[index]
average[index] = pricesCount[minIndex][0]
price = prices[index]
if price > average[index]:
result[index] = max(30 - (price - average[index]), 0)
else:
result[index] = min(30 + (average[index] - price), 50)
return result, average
def GetRating(airbnb):
rating = np.zeros(len(airbnb))
for index, row in airbnb.iterrows():
rating[index] = row['rating']
return rating
def GetSelfScore(airbnb, bus):
busScore = GetBusScore(airbnb, bus)
print('load bus score')
rating = np.zeros(len(airbnb))
bed = np.zeros(len(airbnb))
price = np.zeros(len(airbnb))
for index, row in airbnb.iterrows():
rating[index] = row['rating']
bed[index] = row['bed_score']
price[index] = row['price']
return rating * busScore * bed / price
airbnbkeys = ['beds','bed_type','bed_score','rating','price','Latitude','Longitude','name','host_id']
def MergeScore(airbnb, scores):
result = []
for index, row in airbnb.iterrows():
result.append(row)
for key in scores:
result[index][key] = scores[key][index]
return pd.DataFrame(result)
print("start")
airbnbData = | pd.read_csv("airbnb.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import random
import networkx as nx
import math
import time, math
import json
import glob
import os
import pickle
from datetime import datetime, timedelta, date
from collections import Counter
import networkx as nx
"""Helper Functions"""
def convert_datetime(dataset, verbose):
"""
Description:
Input:
Output:
"""
if verbose:
print('Converting strings to datetime objects...', end='', flush=True)
try:
dataset['nodeTime'] = pd.to_datetime(dataset['nodeTime'], unit='s')
except:
try:
dataset['nodeTime'] = pd.to_datetime(dataset['nodeTime'], unit='ms')
except:
dataset['nodeTime'] = pd.to_datetime(dataset['nodeTime'])
dataset['nodeTime'] = dataset['nodeTime'].dt.tz_localize(None)
dataset['nodeTime'] = dataset['nodeTime'].dt.tz_localize(None)
if verbose:
print(' Done')
return dataset
def create_dir(x_dir):
if not os.path.exists(x_dir):
os.makedirs(x_dir)
print("Created new dir. %s"%x_dir)
else:
print("Dir. already exists")
def get_parent_uids(df, parent_node_col="parentID", node_col="nodeID", root_node_col="rootID", user_col="nodeUserID"):
"""
:return: adds parentUserID column with user id of the parent if it exits in df_
if it doesn't exist, uses the user id of the root instead
if both doesn't exist: NaN
"""
df_ = df.copy()
tweet_uids = pd.Series(df_[user_col].values, index=df_[node_col]).to_dict()
df_['parentUserID'] = df_[parent_node_col].map(tweet_uids)
df_.loc[(df_[root_node_col] != df_[node_col]) & (df_['parentUserID'].isnull()), 'parentUserID'] = df_[(df_[root_node_col] != df_[node_col]) & (df_['parentUserID'].isnull())][root_node_col].map(tweet_uids)
df_ = df_[df_['nodeUserID'] != df_['parentUserID']]
return df_
def get_random_id():
hash = random.getrandbits(64)
return "%16x"%hash
def get_random_ids(size):
return [get_random_id() for i in range(size)]
def get_random_id_new_user():
hash = random.getrandbits(64)
return "new_%16x"%hash
def get_random_new_user_ids(size):
return [get_random_id_new_user() for i in range(size)]
def getActProbSetOldUsers(df_, users):
"""
df: dataframe of simulation outputs for a particular infoid
users: list of users to get probability from
return:
user_dict: list of old users keyed per topic
prob_dict: list of probabilities based on act. level keyed per topic
"""
df = df_.copy()
### Remove new users in cascade ouputs from probability
df = df.loc[df["nodeUserID"].isin(users)].reset_index(drop=True)
### Count the number of total activities per topic
total_acts = df.groupby("informationID")["nodeID"].nunique().reset_index(name="total_count")
### Count old user activities per topic
df_act = df.groupby(["informationID", "nodeUserID"])["nodeID"].nunique().reset_index(name="count")
df_act = pd.merge(df_act, total_acts, on="informationID", how="left")
df_act["prob"] = df_act["count"]/df_act["total_count"]
df_act = df_act.sort_values(["informationID", "prob"], ascending=[True, False]).reset_index(drop=True)
user_dict = df_act.groupby("informationID")["nodeUserID"].apply(list)
user_dict = dict(zip(user_dict.index, user_dict.values))
prob_dict = df_act.groupby("informationID")["prob"].apply(list)
prob_dict = dict(zip(prob_dict.index, prob_dict.values))
return user_dict, prob_dict
"""End of Helper Functions"""
"""User Replacement Helper Functions"""
def newuser_replacement(df_, df_nusers_, infoid, platform="", seedType="", responseType="", tmp_path="", conflict_path=""):
"""
df_: cascade output file
df_nusers_: dataframe for predicted values
"""
conflict_dict = {}
### Global variables to hold df outputs
concat_to_finaldf = []
df = df_.copy()
df_nusers = df_nusers_.copy()
n = infoid
### Get simulation periods
periods = sorted(list(set(df_nusers["nodeTime"])))
### Get records for platform
df = df.query("platform==@platform").reset_index(drop=True)
### Get new user predictions for platform and informationID
df_nusers = df_nusers.query("platform==@platform and informationID==@n").reset_index(drop=True)
df_nusers = df_nusers.set_index("nodeTime")
### Extract seeds records for informationID
df_seeds = df.query("actionType==@seedType and informationID==@n").reset_index(drop=True)
### Extract response records for informationID
df_responses = df.query("actionType==@responseType and informationID==@n").reset_index(drop=True)
### Iterate in a timely-based manner
for period in periods:
### Obtain predicted number of new users at particular period
num_nu = int(df_nusers.loc[period]["new_users"])
### Obtain records pertain to new users already in cascade responses outputs
df_nusers_cas = df_responses.query("nodeTime==@period").reset_index(drop=True)
df_nusers_cas = df_nusers_cas.loc[df_nusers_cas["nodeUserID"].str.match(r'^new_')==True].reset_index(drop=True)
### Get list of new users in cascade responses
list_nu_cas = list(df_nusers_cas["nodeUserID"].unique())
### Get number of new users in cascade responses
num_nu_cas = int(df_nusers_cas["nodeUserID"].nunique())
### Obtain records in cascade responses without new users (i.e., only old records)
df_ousers_cas = df_responses.query("nodeTime==@period").reset_index(drop=True)
df_ousers_cas = df_ousers_cas.loc[~df_ousers_cas["nodeUserID"].isin(list_nu_cas)].reset_index(drop=True)
### Get difference between predicted new users and new users already in system
diff_nusers = num_nu - num_nu_cas
if diff_nusers == 0: ### There are no conflicts
print("InfoID: {0}, Predicted new users and new users already in the system are equal...".format(n))
### Append again all records in response cascades
concat_to_finaldf.append(df_ousers_cas)
concat_to_finaldf.append(df_nusers_cas)
elif diff_nusers < 0: ### There are more new users in the system than predicted
print("InfoID: {0}, Predicted new users is LESS than new users already in the system...".format(n))
key = "Prediction < Cur New Users"
conflict_dict.setdefault(key, 0)
conflict_dict[key] += 1
### Number of users we need to replace with old identity
num_replace = int(abs(diff_nusers))
### Rank new users cascade outputs by cascade size on a particular time
rank_cas = df_nusers_cas.groupby(["parentUserID", "parentID", "rootUserID", "rootID"])["nodeID"].nunique().reset_index(name="cascade_size")
df_nusers_cas = pd.merge(df_nusers_cas, rank_cas, on=["parentUserID", "parentID", "rootUserID", "rootID"], how="left")
### Replace new users with old user identities from smaller cascades first
df_nusers_cas = df_nusers_cas.sort_values("cascade_size", ascending=True).reset_index(drop=True)
df_nusers_cas.loc[0:num_replace-1, "nodeUserID"] = "old_" + df_nusers_cas["nodeUserID"]
### Drop cascade size attribute
df_nusers_cas = df_nusers_cas.drop(columns=["cascade_size"])
### Append again all records in response cascades
concat_to_finaldf.append(df_nusers_cas)
concat_to_finaldf.append(df_ousers_cas)
else: ### There are more new users predicted than there are in the system, so we need to add more
print("InfoID: {0}, Predicted new users is GREATER than new users already in the system...".format(n))
key = "Prediction < Cur New Users"
conflict_dict.setdefault(key, 0)
conflict_dict[key] += 1
num_add = int(abs(diff_nusers))
### Retrieve most recent cascades
recent_cas = df_responses.query("nodeTime<=@period").reset_index(drop=True)
### If there are previous records, we can attach new users to previous cascades
if len(recent_cas)!=0: ### No records at all from cascade outputs. but there are previous records.
print("InfoID: {0}, Cascade Outputs did not predict any records...".format(n))
key = "Only Prev Cascade Output Records"
conflict_dict.setdefault(key, 0)
conflict_dict[key] += 1
### Add completely new records to most recent and larger cascade
# rank cascades and retrieve parent and root information
rank_cas = recent_cas.groupby(["parentUserID", "parentID", "rootUserID", "rootID"])["nodeID"].nunique().reset_index(name="cascade_size")
recent_cas = pd.merge(recent_cas, rank_cas, on=["parentUserID", "parentID", "rootUserID", "rootID"], how="left")
recent_cas = recent_cas.sort_values(["nodeTime","cascade_size"], ascending=[False,False]).reset_index(drop=True)
recent_cas = recent_cas.loc[0:0]
if num_add > 1:
recent_cas = recent_cas.append([recent_cas]*(num_add-1),ignore_index=True)
nodeuserids = get_random_new_user_ids(num_add)
new_nodeids = get_random_ids(num_add)
### Change proper columns
recent_cas["nodeUserID"] = nodeuserids
recent_cas["actionType"] = responseType
recent_cas["nodeID"] = new_nodeids
recent_cas["nodeTime"] = period
recent_cas = recent_cas.drop(columns=["cascade_size"])
concat_to_finaldf.append(recent_cas)
concat_to_finaldf.append(df_nusers_cas)
concat_to_finaldf.append(df_ousers_cas)
else: ### There are no previous records, so we need to add completely new records
print("InfoID: {0},{1}, Cascade Outputs have no records at all...".format(n, period))
key = "No Cascade Output Records"
conflict_dict.setdefault(key, 0)
conflict_dict[key] += 1
dict_records = dict()
new_nodeids = get_random_ids(num_add)
new_rootids = new_nodeids
new_parentids = new_nodeids
nodeuserids = get_random_new_user_ids(num_add)
rootuserids = nodeuserids
parentuserids = nodeuserids
dict_records['rootID'] = new_rootids
dict_records['parentID'] = new_parentids
dict_records['nodeID'] = new_nodeids
dict_records['nodeUserID'] = nodeuserids
dict_records['parentUserID']=parentuserids
dict_records['rootUserID'] = rootuserids
dict_records['informationID'] = [n]*num_add
dict_records['platform'] = [platform]*num_add
dict_records['nodeTime'] = [period]*num_add
dict_records['actionType'] = [responseType]*num_add
sample_records = pd.DataFrame(dict_records, columns=columns)
concat_to_finaldf.append(sample_records)
concat_to_finaldf.append(df_nusers_cas)
concat_to_finaldf.append(df_ousers_cas)
final_df = pd.concat(concat_to_finaldf, ignore_index=True, sort=True)
final_df = pd.concat([df_seeds, final_df], ignore_index=True, sort=True)
final_df = final_df.sort_values('nodeTime').reset_index(drop=True)
filename = n.replace('/', '-')
final_df.to_pickle(tmp_path+'_'+filename+'_'+platform+'.pkl.gz')
if conflict_path != "":
with open(conflict_path+filename+'_newuser.pkl.gz', "wb") as f:
pickle.dump(conflict_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
del final_df
del conflict_dict
def olduser_replacement(df_, df_ousers_, infoid, platform="", seedType="", responseType="", tmp_path="", conflict_path=""):
"""
df_: simulation cascade output file
df_ousers_: dataframe with old users predictions
"""
conflict_dict = {}
df = df_.copy()
df_ousers = df_ousers_.copy()
n = infoid
### Global variables to hold df outputs
concat_to_finaldf = []
### Get simulation periods
periods = sorted(list(set(df_ousers["nodeTime"])))
### Get records for platform
df = df.query("platform==@platform and informationID==@n").reset_index(drop=True)
### Get new user predictions for platform and informationID
df_ousers = df_ousers.query("platform==@platform and informationID==@n").reset_index(drop=True)
df_ousers = df_ousers.set_index("nodeTime")
for period in periods:
### Extract all new users records from df in particular period
df_nusers_cas = df.query("nodeTime==@period").reset_index(drop=True)
df_nusers_cas = df_nusers_cas.loc[df_nusers_cas["nodeUserID"].str.match(r'^new_')==True].reset_index(drop=True)
### Extract all old users records from df in particular period
df_ousers_cas = df.query("nodeTime==@period").reset_index(drop=True)
df_ousers_cas = df_ousers_cas.loc[df_ousers_cas["nodeUserID"].str.match(r'^new_')==False].reset_index(drop=True)
### Extract seeds from old users
df_ou_seeds = df_ousers_cas.query("actionType==@seedType").reset_index(drop=True)
### Extract responses from old users
df_ou_responses = df_ousers_cas.query("actionType==@responseType").reset_index(drop=True)
### Obtain predicted number of new users at particular period
num_ou = int(df_ousers.loc[period]["old_users"])
### Check number of old users in seeds already in system
num_ou_seeds = int(df_ou_seeds["nodeUserID"].nunique())
### Check number of old users in responses
num_ou_reponses = int(df_ou_responses["nodeUserID"].nunique())
### Difference between predicted old users and old users in seeds
diff_n = num_ou - num_ou_seeds
if diff_n == 0: ### There are enough oldies in seeds already
print("InfoID: {0},{1}, There are enough old users in seeds...".format(n, period))
k = "Old users in seed equal to predictions"
conflict_dict.setdefault(k, 0)
conflict_dict[k] +=1
### Replace all oldies in responses with seed users based on activity probability
list_ou_seeds = list(df_ou_seeds["nodeUserID"].unique())
user_dict, prob_dict = getActProbSetOldUsers(df, list_ou_seeds)
users_list = user_dict[n]
prob_list = prob_dict[n]
### Get list of users we need to replace
list_ou_responses = list(df_ou_responses["nodeUserID"].unique())
### Draw with replacement
new_user_ids = random.choices(users_list, weights=prob_list, k=len(list_ou_responses))
### Map old users in responses to new old identities
new_user_ids_map = dict(zip(list_ou_responses, new_user_ids))
df_ou_responses["nodeUserID"] = df_ou_responses["nodeUserID"].map(new_user_ids_map).fillna(df_ou_responses['nodeUserID'])
df_nusers_cas["parentUserID"] = df_nusers_cas["parentUserID"].map(new_user_ids_map).fillna(df_nusers_cas["parentUserID"])
df_nusers_cas["rootUserID"] = df_nusers_cas["rootUserID"].map(new_user_ids_map).fillna(df_nusers_cas["rootUserID"])
concat_to_finaldf.append(df_nusers_cas)
concat_to_finaldf.append(df_ou_seeds)
concat_to_finaldf.append(df_ou_responses)
elif diff_n < 0: ### There are too many oldies in seeds, we need to trim and replace all user identities for responses
print("InfoID: {0},{1}, Need to TRIM old users in seeds...".format(n, period))
k = "Old users in seed > than predictions"
conflict_dict.setdefault(k, 0)
conflict_dict[k] +=1
### Replace those users with low cascade activity
list_ou_seeds = list(df_ou_seeds["nodeUserID"].unique())
rank_seed_users = df.query("nodeTime==@period").reset_index(drop=True)
### Obtain cascade size for each seed user
rank_seed_users =rank_seed_users.loc[rank_seed_users["rootUserID"].isin(list_ou_seeds)].reset_index(drop=True)
rank_seed_users=rank_seed_users.groupby("rootUserID")["nodeID"].nunique().reset_index(name="size")
rank_seed_users = rank_seed_users.sort_values("size", ascending=False).reset_index(drop=True)
### Pick the users in tail to replace
replace_users = list(rank_seed_users.tail(int(abs(diff_n)))["rootUserID"])
keep_users = list(rank_seed_users.loc[~rank_seed_users["rootUserID"].isin(replace_users)]["rootUserID"])
### Get activity probability of these users to keep
user_dict, prob_dict = getActProbSetOldUsers(df, keep_users)
users_list = user_dict[n]
prob_list = prob_dict[n]
### Get new ids for users to replace
new_seeds_ids = np.random.choice(users_list, size=int(abs(diff_n)), replace=True, p=prob_list)
### Get mapping
new_user_ids_map = dict(zip(replace_users, new_seeds_ids))
df_ou_seeds["nodeUserID"] = df_ou_seeds["nodeUserID"].map(new_user_ids_map).fillna(df_ou_seeds['nodeUserID'])
df_ou_seeds["parentUserID"] = df_ou_seeds["parentUserID"].map(new_user_ids_map).fillna(df_ou_seeds['parentUserID'])
df_ou_seeds["rootUserID"] = df_ou_seeds["rootUserID"].map(new_user_ids_map).fillna(df_ou_seeds['rootUserID'])
### Change responses identities
### Get list of users we need to replace
list_ou_responses = list(df_ou_responses["nodeUserID"].unique())
### Draw with replacement
new_user_ids = random.choices(users_list, weights=prob_list, k=len(list_ou_responses))
### Map old users in responses to new old identities
new_user_ids_resp_map = dict(zip(list_ou_responses, new_user_ids))
df_ou_responses["nodeUserID"] = df_ou_responses["nodeUserID"].map(new_user_ids_resp_map).fillna(df_ou_responses['nodeUserID'])
df_ou_responses["parentUserID"] = df_ou_responses["parentUserID"].map(new_user_ids_map).fillna(df_ou_responses['parentUserID'])
df_ou_responses["rootUserID"] = df_ou_responses["rootUserID"].map(new_user_ids_map).fillna(df_ou_responses['rootUserID'])
df_nusers_cas["parentUserID"] = df_nusers_cas["parentUserID"].map(new_user_ids_map).fillna(df_nusers_cas["parentUserID"])
df_nusers_cas["rootUserID"] = df_nusers_cas["rootUserID"].map(new_user_ids_map).fillna(df_nusers_cas["rootUserID"])
concat_to_finaldf.append(df_nusers_cas)
concat_to_finaldf.append(df_ou_responses)
concat_to_finaldf.append(df_ou_seeds)
else: ### There are more old users predicted than there are old seed users
print("InfoID: {0},{1}, There are more old users predicted than seeds...".format(n, period))
k = "Old users in seed < than predictions"
conflict_dict.setdefault(k, 0)
conflict_dict[k] +=1
### Check difference between old users predicted vs. old users in responses
diff_m = diff_n - num_ou_reponses
if diff_m == 0: ### Old users in cascade outputs and predictions match
print("InfoID: {0},{1}, There are enough old users in responses...".format(n, period))
k = "Old users in responses equal to predictions"
conflict_dict.setdefault(k, 0)
conflict_dict[k] +=1
### Only need to remove those users with old_ tag in responses if any
old_tag_users_df = df_ou_responses.loc[df_ou_responses["nodeUserID"].str.match(r'^old_')==True].reset_index(drop=True)
if len(old_tag_users_df) > 0:
old_tag_users = list(old_tag_users_df["nodeUserID"].unique())
### Get activity probability of all old users not already in outputs
users = set(df.loc[(df["nodeUserID"].str.match(r'^old_')==False)&(df["nodeUserID"].str.match(r'^new_')==False)]["nodeUserID"].unique())
users = users - set(df_ousers_cas["nodeUserID"].unique())
user_dict, prob_dict = getActProbSetOldUsers(df, users)
users_list = user_dict[n]
prob_list = prob_dict[n]
### Get old users without replacement
new_user_ids = np.random.choice(users_list, size=len(old_tag_users), replace=False, p=prob_list)
new_user_ids_map = dict(zip(old_tag_users, new_user_ids))
df_ou_responses["nodeUserID"] = df_ou_responses["nodeUserID"].map(new_user_ids_map).fillna(df_ou_responses["nodeUserID"])
concat_to_finaldf.append(df_nusers_cas)
concat_to_finaldf.append(df_ou_seeds)
concat_to_finaldf.append(df_ou_responses)
elif diff_m < 0: ### We need to reduce responses since there are more old users than predicted
print("InfoID: {0},{1}, There are more old users in system than predicted in responses...".format(n, period))
k = "Old users in responses > than predictions"
conflict_dict.setdefault(k, 0)
conflict_dict[k] +=1
### Replace all old users in responses with new old user identities
old_users_to_replace = list(df_ou_responses["nodeUserID"].unique())
### Take out users in seeds from pool
users = set(df.loc[(df["nodeUserID"].str.match(r'^old_')==False)&(df["nodeUserID"].str.match(r'^new_')==False)]["nodeUserID"].unique())
users = users - set(df_ou_seeds["nodeUserID"].unique())
user_dict, prob_dict = getActProbSetOldUsers(df, users)
users_list = user_dict[n]
prob_list = prob_dict[n]
### Get old users without replacement (only the amount of users we need from predictions)
new_user_ids = np.random.choice(users_list, size=int(diff_n), replace=False, p=prob_list)
### Now assign to all response user one of this old users
new_user_ids = np.random.choice(new_user_ids, size=len(old_users_to_replace), replace=True)
new_user_ids_map = dict(zip(old_users_to_replace, new_user_ids))
df_ou_responses["nodeUserID"] = df_ou_responses["nodeUserID"].map(new_user_ids_map).fillna(df_ou_responses["nodeUserID"])
concat_to_finaldf.append(df_nusers_cas)
concat_to_finaldf.append(df_ou_seeds)
concat_to_finaldf.append(df_ou_responses)
else: ### There are more old users predicted than in current responses
print("InfoID: {0},{1}, There are more old users predicted than in system...".format(n, period))
k = "Old users in responses < than predictions"
conflict_dict.setdefault(k, 0)
conflict_dict[k] +=1
### First make sure to assign old_user tags with old_users identities if any
old_tag_users_df = df_ou_responses.loc[df_ou_responses["nodeUserID"].str.match(r'^old_')==True].reset_index(drop=True)
old_tag_users = list(old_tag_users_df["nodeUserID"].unique())
### Get activity probability of all old users not already in outputs
users = set(df.loc[(df["nodeUserID"].str.match(r'^old_')==False)&(df["nodeUserID"].str.match(r'^new_')==False)]["nodeUserID"].unique())
users = users - set(df_ousers_cas["nodeUserID"].unique())
if len(old_tag_users_df) > 0:
user_dict, prob_dict = getActProbSetOldUsers(df, users)
users_list = user_dict[n]
prob_list = prob_dict[n]
### Get old users without replacement
new_user_ids = np.random.choice(users_list, size=len(old_tag_users), replace=False, p=prob_list)
new_user_ids_map = dict(zip(old_tag_users, new_user_ids))
df_ou_responses["nodeUserID"] = df_ou_responses["nodeUserID"].map(new_user_ids_map).fillna(df_ou_responses["nodeUserID"])
### Now introduce the remaining old users needed to most recent largest cascade (ignore those newly introduced old users)
users = users - set(df_ou_responses["nodeUserID"])
user_dict, prob_dict = getActProbSetOldUsers(df, users)
users_list = user_dict[n]
prob_list = prob_dict[n]
new_old_user_ids = np.random.choice(users_list, size=int(diff_m), replace=False, p=prob_list)
### Retrieve most recent cascades
recent_cas = df.query("nodeTime<=@period").reset_index(drop=True)
if len(recent_cas) != 0: ### Attatch old users to largest cascade
rank_cas = recent_cas.groupby(["parentUserID", "parentID", "rootUserID", "rootID"])["nodeID"].nunique().reset_index(name="cascade_size")
recent_cas = pd.merge(recent_cas, rank_cas, on=["parentUserID", "parentID", "rootUserID", "rootID"], how="left")
recent_cas = recent_cas.sort_values(["nodeTime","cascade_size"], ascending=[False,False]).reset_index(drop=True)
recent_cas = recent_cas.loc[0:0]
if diff_m > 1:
recent_cas = recent_cas.append([recent_cas]*(diff_m-1),ignore_index=True)
new_nodeids = get_random_ids(diff_m)
### Change proper columns
recent_cas["nodeUserID"] = new_old_user_ids
recent_cas["actionType"] = responseType
recent_cas["nodeID"] = new_nodeids
recent_cas["nodeTime"] = period
recent_cas = recent_cas.drop(columns=["cascade_size"])
concat_to_finaldf.append(recent_cas)
concat_to_finaldf.append(df_ou_responses)
concat_to_finaldf.append(df_nusers_cas)
concat_to_finaldf.append(df_ou_seeds)
else: ### Introduce completely new records
dict_records = dict()
new_nodeids = get_random_ids(diff_m)
new_rootids = new_nodeids
new_parentids = new_nodeids
nodeuserids = new_old_user_ids
rootuserids = nodeuserids
parentuserids = nodeuserids
dict_records['rootID'] = new_rootids
dict_records['parentID'] = new_parentids
dict_records['nodeID'] = new_nodeids
dict_records['nodeUserID'] = nodeuserids
dict_records['parentUserID']=parentuserids
dict_records['rootUserID'] = rootuserids
dict_records['informationID'] = [n]*diff_m
dict_records['platform'] = [platform]*diff_m
dict_records['nodeTime'] = [period]*diff_m
dict_records['actionType'] = [responseType]*diff_m
sample_records = pd.DataFrame(dict_records, columns=columns)
concat_to_finaldf.append(sample_records)
concat_to_finaldf.append(df_nusers_cas)
concat_to_finaldf.append(df_ou_responses)
concat_to_finaldf.append(df_ou_seeds)
final_df = pd.concat(concat_to_finaldf, ignore_index=True, sort=True)
final_df = final_df.sort_values('nodeTime').reset_index(drop=True)
filename = n.replace('/', '-')
final_df.to_pickle(tmp_path+'_'+filename+'_'+platform+'.pkl.gz')
if conflict_path != "":
with open(conflict_path+filename+'_olduser.pkl.gz', "wb") as f:
pickle.dump(conflict_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
del final_df
del conflict_dict
"""End of user replacement helper functions"""
def newuser_replacement_v2(df_, df_nusers_, infoid, platform="", seedType="", responseType="", tmp_path="", conflict_path=""):
"""
df_: cascade output file
df_nusers_: dataframe for predicted values
"""
conflict_dict = {}
### Global variables to hold df outputs
concat_to_finaldf = []
df = df_.copy()
df_nusers = df_nusers_.copy()
n = infoid
### Get simulation periods
periods = sorted(list(set(df_nusers["nodeTime"])))
### Get records for platform
df = df.query("platform==@platform and informationID==@n").reset_index(drop=True)
### Get new user predictions for platform and informationID
df_nusers = df_nusers.query("platform==@platform and informationID==@n").reset_index(drop=True)
df_nusers = df_nusers.set_index("nodeTime")
### Extract seeds records for informationID
df_seeds = df.query("actionType==@seedType").reset_index(drop=True)
### Extract response records for informationID
df_responses = df.query("actionType==@responseType").reset_index(drop=True)
### Iterate in a timely-based manner
for period in periods:
### Get seeds at this period
df_seeds_cas = df_seeds.query("nodeTime==@period").reset_index(drop=True)
### Get responses at this period
df_responses_cas = df_responses.query("nodeTime==@period").reset_index(drop=True)
### Obtain predicted number of new users at particular period
num_nu = int(df_nusers.loc[period]["new_users"])
### No new users predicted
if num_nu == 0:
print("InfoID: {0},{1}, No new users predicted...".format(n, period))
concat_to_finaldf.append(df_seeds_cas)
concat_to_finaldf.append(df_responses_cas)
continue
### Conflict if number of new users predicted exceeds responses on this day
if num_nu > len(df_responses_cas):
print("InfoID: {0},{1}, New Users predicted is greater than number of records...".format(n, period))
### 1. replace all users in responses with new identities
### 2. Introduce new users proportionally to high in-degree users in previous cascades, if any
### 3. Introduce completely new users and records
new_userids = get_random_new_user_ids(int(len(df_responses_cas)))
df_responses_cas["nodeUserID"] = new_userids
### Difference
diff_nu = num_nu - len(df_responses_cas)
### Introduce new users
df_prev = df_responses.query("nodeTime<=@period").reset_index(drop=True)
df_add = addNewUsers(df_prev, diff_nu, n, period,responseType, platform)
### Concat to final df
concat_to_finaldf.append(df_seeds_cas)
concat_to_finaldf.append(df_responses_cas)
concat_to_finaldf.append(df_add)
### Conflict, there are enough responses to introduce new users by replacing old users
elif num_nu <= len(df_responses_cas):
print("InfoID: {0},{1}, New Users predicted is less than number of records...".format(n, period))
n_users = int(df["nodeUserID"].nunique())
in_deg_parent_df = df_responses_cas.groupby("parentUserID")["nodeUserID"].nunique().reset_index(name="in_deg")
in_deg_parent_df = dict(zip(in_deg_parent_df["parentUserID"], in_deg_parent_df["in_deg"]))
# user_out_degree_df = df_responses_cas.groupby("nodeUserID")["nodeUserID"].nunique().reset_index(name="out_deg")
# user_out_degree_df = dict(zip(user_out_degree_df["nodeUserID"], user_out_degree_df["out_deg"]))
df_responses_cas["in_deg"] = df_responses_cas["parentUserID"].map(in_deg_parent_df)
# df_responses_cas["out_deg"] = df_responses_cas["nodeUserID"].map(user_out_degree_df)
### Sort by parentUser in-degree and then replace old with new users
df_responses_cas=df_responses_cas.sort_values("in_deg", ascending=False).reset_index(drop=True)
new_userids = get_random_new_user_ids(num_nu)
df_responses_cas.loc[0:num_nu-1, "nodeUserID"] = new_userids
df_responses_cas=df_responses_cas.drop(columns=["in_deg"])
### concat to final df
concat_to_finaldf.append(df_seeds_cas)
concat_to_finaldf.append(df_responses_cas)
final_df = | pd.concat(concat_to_finaldf, ignore_index=True, sort=True) | pandas.concat |
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
#-------------------------------------------------------------------------------------------------------------------------------
# By <NAME> (July 2018)
#
# Generate input files
#
# Dataset: Pseudomonas aeruginosa gene expression compendium referenced in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5069748/
#
# Use map_file to group samples into phenotype groups (condition A and B) based on experimental design annotations
# Example: control vs treatment with antibiotics
#
# Then group samples into training and test sets
#
# Generate offset vector using gene expression data in the original space (train_offset_original):
# average gene expression for condition A - average gene expression for condition B using all genes/dimensions
#-------------------------------------------------------------------------------------------------------------------------------
import os
import pandas as pd
import numpy as np
from scipy.stats import variation
import seaborn as sns
import matplotlib.pyplot as plt
randomState = 123
from numpy.random import seed
seed(randomState)
# In[3]:
# load arguments
data_file = os.path.join(os.path.dirname(os.getcwd()), "data", "all-pseudomonas-gene-normalized.zip") # repo file is zipped
map_file = os.path.join(os.path.dirname(os.getcwd()), "metadata", "mapping_PA1673.txt")
# output
train_max_file = os.path.join(os.path.dirname(os.getcwd()), "data", "PA1673", "train_minExp.txt")
train_min_file = os.path.join(os.path.dirname(os.getcwd()), "data", "PA1673", "train_maxExp.txt")
train_input_file = os.path.join(os.path.dirname(os.getcwd()), "data", "PA1673", "train_model_input.txt.xz")
original_offset_file = os.path.join(os.path.dirname(os.getcwd()), "data", "PA1673", "train_offset_original.txt")
# In[4]:
# read in data
data = pd.read_table(data_file, header = 0, sep = '\t', index_col = 0, compression='zip')
X = data.transpose()
X.head(5)
# In[5]:
# read in metadata file containing grouping of each sample into training/test and phenotypic group
grp = pd.read_table(map_file, header=0, sep='\t', index_col=None)
grp
# In[6]:
# Group samples into training and test sets
# Training: min and max levels of O2
# Test: all intermediate levels
maxO2 = | pd.DataFrame() | pandas.DataFrame |
# # Planning
# ## Challenge
# This is an open-ended challenge to find something interesting and useful (with a business case!) from a dataset of New York City's restaurant health inspections. The inspections are performed by the Department of Health and Mental Hygiene (DOHMH). Some suggestions include identifying trends or actionable insights, or providing recommendations. The audience could be restaurant customers, inspectors, or restauranteurs.
# I came up with some questions I was interested in answering:
# 1. What factors contribute to inspection failures?
# 2. Is there any evidence of geographic bias in inspections?
# 3. Is there any evidence of cuisine bias in inspections?
# 4. Is there any evidence of inspection timing affecting results?
# ## Approach
# I cleaned, plotted, and examined the data. Documentation describing the inspection process suggested two possible outcome variables to look into: 1) initial inspection failure and 2) closure after reinspection. I wanted to investigate both, but started with initial inspection failure.
# I investigated both logistic regression and random forest classification models. I chose to focus on the logistic regression results because I wanted to be able to interpret the coefficients and odds ratios. I tuned hyperparameters and evaluated the model using AUC ROC, because it is a good overall summary of model performance, considering all cells of the confusion matrix. A logistic regression model with L2 (ridge) regression and a penalty of 0.1 classifies initial inspection failures with an AUC of 0.932.
# ## Results
# ### 1. What factors contribute to inspection failures?
# Looking at the odds ratios for each of the features in the logistic regression model, here are some of the most important factors affecting initial inspection failure.
# - Features associated with lower odds of passing initial inspection:
# - Violation codes related to the presence of mice, rats, cockroaches, or flies
# - Violation codes related to lack of washing facilities, lack of food safety plan, improper food storage temperature, and lack of a required certificate
# - The borough Queens
# - Many kinds of cuisine, including Bangladeshi, Indian, Moroccan, Asian, Malaysian, Spanish, African, Turkish, Latin, Chinese, Mediterranean, Hawaiian, Egyptian, Thai, etc.
# - The number of violations cited
# - Features associated with higher odds of passing initial inspection:
# - Violation codes with lower stakes issues, such as violation of a recently-introduced ban on styrofoam, improper lighting or ventilation, or reuse of single use items
# - The borough Staten Island
# - Many kinds of cuisine including ice cream, hot dogs, donuts, soups/sandwiches, hamburgers, Continental, cafe/coffee/tea shops, juices/smoothies, Ethiopian, steak, sandwiches, bakeries, bagel/pretzel shops, etc. Many of these seem to be shops that would have less food prep and smaller facilities to maintain, so they make sense.
# - Increasing day of the week
# ### 2. Is there any evidence of geographic bias in inspections?
# Yes, there is some evidence for Queens establishments having lower odds of passing the initial inspection and for Staten Island establishments having higher odds of passing. It's difficult to answer this question without a more sophisticated version of logistic regression to use.
# ### 3. Is there any evidence of cuisine bias in inspections?
# Yes, the cuisine types with the lowest odds of passing the initial inspection include many of the "ethnic" cuisines. Other information is needed to determine if this is a cause or an effect.
# ### 4. Is there any evidence of inspection timing affecting results?
# There might be a slight increase in odds of passing the initial inspection for inspections happening later in the week, but it was slight and of unknown significance. There is no evidence of any effect of the time of year (month) on the odds of passing inspection.
# ## Takeaways
# - Restauranteurs in Queens or those running establishments serving at-risk cuisines (e.g. Bangladeshi, Indian, Moroccan, Malaysian, etc.) should be extra vigilant before inspections.
# - Restauranteurs should pay special attention to the violations most associated with lower odds of passing the inspection, such as presence of vermin, lack of washing facilities, improper food storage temperature, and lack of required certficiations or food safety plans.
# - NYC food inspectors should carefully examine their inspection process to see if it is being affected by bias against certain cuisines.
# - Aspiring restauranteurs could open an ice cream, hot dog, donut, soup & sandwich, or coffee & tea shop to start out with lower odds of failing the initial food saftey inspection.
# +
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
import seaborn as sns
from datetime import datetime
from IPython.display import display
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import plot_confusion_matrix, plot_roc_curve
from sklearn.model_selection import cross_validate, GridSearchCV, train_test_split, StratifiedKFold
from sklearn.preprocessing import MultiLabelBinarizer, OneHotEncoder
from treeinterpreter import treeinterpreter as ti
sns.set(style="whitegrid", font_scale=1.25)
plt.figure(figsize=(12.8, 9.6), dpi=400)
# -
# +
data_dir = '~/devel/insight-data-challenges/05-nyc-restaurant-inspections/data'
output_dir = '~/devel/insight-data-challenges/05-nyc-restaurant-inspections/output'
# -
# ## Read in and clean the user data
# +
inspections = pd.read_csv(
os.path.join(os.path.expanduser(data_dir), 'DOHMH_New_York_City_Restaurant_Inspection_Results.csv'),
parse_dates=['INSPECTION DATE', 'GRADE DATE', 'RECORD DATE']
)
display(inspections.info())
display(inspections.head(15))
# -
# ### Fix data types
# Find the categorical variables
# +
# Are there any that look categorical based on number of unique values?
values_per_variable = inspections.apply('nunique', 0)
variable_dtypes = inspections.dtypes.apply(lambda x: x.name)
variable_info = pd.DataFrame({'n_categories': values_per_variable,
'dtype': variable_dtypes,
'variable': values_per_variable.index}).reset_index(drop=True)
display(variable_info)
# Convert columns to categorical
cat_threshold = 110 # If n unique values is below this, it's probably categorical
known_cat_cols = [
'ACTION', 'BORO', 'GRADE', 'INSPECTION TYPE', 'CRITICAL FLAG', 'CUISINE DESCRIPTION',
'VIOLATION CODE', 'VIOLATION DESCRIPTION', 'Community Board', 'Council District'
]
variable_info['to_category'] = (variable_info['n_categories'] < cat_threshold)\
& (~variable_info['dtype'].isin(('datetime64[ns]', )))
display(variable_info)
# Are there any known categorical variables missing? Or vice versa?
set(variable_info['variable'].loc[variable_info['to_category']].to_list()) - set(known_cat_cols)
set(known_cat_cols) - set(variable_info['variable'].loc[variable_info['to_category']].to_list())
for v in variable_info['variable'].loc[variable_info['to_category']]:
inspections[v] = inspections[v].astype('category')
display(inspections.info())
variable_info['dtype'] = inspections.dtypes.apply(lambda x: x.name).to_numpy()
# -
# ### Convert zipcode to an int
# +
display(inspections['ZIPCODE'].describe())
display(inspections['ZIPCODE'].isna().sum()) # 5500 NaN values, which is why it's not an int. Leave it for now.
# -
# ### Fix missing value codes
# +
inspections['BORO'] = inspections['BORO'].replace('0', np.NaN)
for v in inspections.select_dtypes(include='category').columns:
print('_' * 20)
print(v)
display(inspections[v].value_counts(dropna=False))
new_establishment_inspection_date = datetime(1900, 1, 1)
inspections['INSPECTION DATE'] = inspections['INSPECTION DATE'].replace(new_establishment_inspection_date, pd.NaT)
for v in inspections.select_dtypes(include='datetime').columns:
print('_' * 20)
print(v)
display(inspections[v].value_counts(dropna=False))
display(inspections.select_dtypes(include='number').describe())
variable_info['n_missing'] = inspections.apply(lambda x: x.isna().sum()).to_numpy()
# -
# ### Make a map from violation code to violation description
# +
# Check if there's more than one description per violation code, to see if it will work to select the first one
display(
inspections[['VIOLATION CODE', 'VIOLATION DESCRIPTION']].groupby(
'VIOLATION CODE').aggregate('nunique')['VIOLATION DESCRIPTION'].value_counts()
)
# -
# There are 15 violation codes without any matching description.
# +
inspections['VIOLATION CODE'].nunique()
violation_descriptions = inspections[['VIOLATION CODE', 'VIOLATION DESCRIPTION']].groupby(
'VIOLATION CODE').aggregate('first')
with pd.option_context('display.max_rows', 200):
display(violation_descriptions)
# -
# ## Add some derived variables
# ### Use documentation instructions to label gradeable/ungradeable inspections
# +
gradeable_inspection_types = (
'Cycle Inspection / Initial Inspection',
'Cycle Inspection / Re-Inspection',
'Pre-Permit (Operational) / Initial Inspection',
'Pre-Permit (Operational)/Re-Inspection',
)
gradeable_actions = (
'Violations were cited in the following area(s).',
'No violations were recorded at the time of this inspection.',
'Establishment Closed by DOHMH.',
)
gradeable_inspection_date_min = datetime(2010, 7, 27)
inspections['INSPECTION TYPE'].isin(gradeable_inspection_types).sum()
inspections['ACTION'].isin(gradeable_actions).sum()
np.sum(inspections['INSPECTION DATE'] >= gradeable_inspection_date_min)
inspections['is_gradeable'] = ((inspections['INSPECTION TYPE'].isin(gradeable_inspection_types))
& (inspections['ACTION'].isin(gradeable_actions))
& (inspections['INSPECTION DATE'] >= gradeable_inspection_date_min)
)
display(inspections['is_gradeable'].value_counts(dropna=False))
# -
# ### Add variables for what kind of inspection it was
# +
inspections['INSPECTION TYPE'].value_counts()
inspections['is_cycle_inspection'] = inspections['INSPECTION TYPE'].str.contains('Cycle')
inspections['is_opening_inspection'] = inspections['INSPECTION TYPE'].str.contains(
'Pre-permit (Operational)', regex=False)
inspections['is_initial_inspection'] = inspections['INSPECTION TYPE'].str.contains('Initial')
inspections['is_reinspection'] = inspections['INSPECTION TYPE'].str.contains('Re-inspection')
inspections['is_compliance_inspection'] = inspections['INSPECTION TYPE'].str.contains('Compliance')
# -
# ### Add variables for date components
# +
inspections['inspection_year'] = inspections['INSPECTION DATE'].dt.year.astype('category')
inspections['inspection_month'] = inspections['INSPECTION DATE'].dt.month.astype('category')
inspections['inspection_day'] = inspections['INSPECTION DATE'].dt.day
inspections['inspection_dayofyear'] = inspections['INSPECTION DATE'].dt.dayofyear
inspections['inspection_dayofweek'] = inspections['INSPECTION DATE'].dt.dayofweek.astype('category')
inspections['inspection_isweekday'] = inspections['inspection_dayofweek'].isin(range(5))
inspections['inspection_week'] = inspections['INSPECTION DATE'].dt.week.astype('category')
display(inspections.info())
# -
# ## Plot everything
# +
# Try the Pandas built in histogram function, even though it's mediocre
inspections.select_dtypes(exclude='bool').hist(figsize=(20, 15))
plt.show()
# And it fails on boolean columns!
# -
# ### Histograms of the numeric variables
# +
g = sns.FacetGrid(
inspections.select_dtypes(include='number').melt(), col='variable', col_wrap=4,
sharex=False, sharey=False, height=4
)
g.map(plt.hist, 'value', color='steelblue', bins=20)
plt.show()
# -
# ### Barplots of the categorical & boolean variables
# Individual plots for variables with too many categories
# +
cat_col_n_values = inspections.select_dtypes(include='category').apply('nunique', 0)
many_values_cat_vars = cat_col_n_values.loc[cat_col_n_values > 20].index
other_cat_vars = cat_col_n_values.loc[cat_col_n_values <= 20].index
# for v in many_values_cat_vars:
# g = sns.countplot(data=inspections, x=v)
# g.set_xticklabels(g.get_xticklabels(), rotation=60, horizontalalignment='right')
# plt.tight_layout()
# plt.show()
# The best is really just a sorted table of value counts.
for v in many_values_cat_vars:
print('_' * 20)
print(v)
with pd.option_context('display.max_rows', cat_threshold):
display(inspections[v].value_counts(dropna=False))
# -
# A facet grid for those with fewer categories
# +
# tmp = inspections[other_cat_vars].melt()
# tmp['value_trunc'] = tmp['value'].str.slice(stop=25)
# g = sns.catplot(
# data=tmp, col='variable', col_wrap=3,
# x='value_trunc', kind='count',
# facet_kws={'sharex': False, 'sharey': False},
# margin_titles=False
# )
# for ax in g.axes.flat:
# for label in ax.get_xticklabels():
# label.set_rotation(70)
# plt.show()
# I can't get the sharex/sharey arguments to work properly. God do I miss ggplot!
for v in other_cat_vars:
g = sns.countplot(data=inspections, x=v)
g.set_xticklabels(g.get_xticklabels(), rotation=60, horizontalalignment='right')
plt.tight_layout()
plt.show()
# -
# ### Histograms of the datetime variables
# +
g = sns.FacetGrid(
inspections.select_dtypes(include='datetime').melt(), col='variable', col_wrap=3,
sharex=False, sharey=False, height=4
)
g.map(plt.hist, 'value', color='steelblue', bins=20)
plt.show()
# -
# ### Head and tail of the object variables
# +
for v in inspections.select_dtypes(include='object').columns:
print('_' * 20)
print(v)
display(inspections[v].head(15))
display(inspections[v].tail(15))
# -
# ## Filter to most important core inspection types
# +
core_inspections = inspections.loc[(inspections['is_cycle_inspection'] | inspections['is_opening_inspection'])
& (inspections['is_initial_inspection'] | inspections['is_reinspection']), ]
# Make sure it's sorted by ascending inspection date
core_inspections = core_inspections.sort_values('INSPECTION DATE', ascending=True)
# -
# ## Summary of inspections
# ### Summary by business
# +
business_summary = core_inspections.groupby('CAMIS').aggregate(
n_rows=('CAMIS', 'count'),
n_inspections=('INSPECTION DATE', 'nunique'),
avg_inspection_frequency=('INSPECTION DATE', lambda x: np.mean(np.diff(x.unique())).astype('timedelta64[D]'))
)
business_summary['avg_inspection_frequency'] = business_summary['avg_inspection_frequency'].dt.days
display(business_summary.info())
g = sns.FacetGrid(
business_summary.melt(), col='variable',
sharex=False, sharey=False, height=4
)
g.map(plt.hist, 'value', color='steelblue', bins=20)
plt.show()
# -
# ### Summary of initial inspection failures
# +
passing_grades = ('A', )
nonpassing_grades = ('B', 'C', )
pending_grades = ('N', 'Z', 'P', )
# Since there are NaNs in both gradeable and ungradeable, I'm going to infer that GRADE of NaN means non-passing
core_inspections.loc[core_inspections['is_gradeable'], 'GRADE'].value_counts(dropna=False)
core_inspections.loc[~core_inspections['is_gradeable'], 'GRADE'].value_counts(dropna=False)
# When using categorical variables in a groupby, Pandas will by default plan to have NaN values for each empty
# group as well, and that led to an array allocation error here. Using observed=True fixed it.
initial_inspections = core_inspections.loc[core_inspections['is_initial_inspection'], ].groupby(
['CAMIS', 'BORO', 'INSPECTION DATE', 'inspection_month', 'inspection_dayofweek',
'CUISINE DESCRIPTION', 'INSPECTION TYPE'], observed=True).aggregate(
passed=('GRADE', lambda x: x.iloc[0] == 'A'),
grade=('GRADE', 'first'),
has_critical_flag=('CRITICAL FLAG', lambda x: np.any(x == 'Y')),
n_violations=('VIOLATION CODE', lambda x: x.loc[~x.isna()].nunique()),
violation_codes=('VIOLATION CODE', lambda x: x.loc[~x.isna()].to_list())
).reset_index()
for v in ['passed', 'grade', 'has_critical_flag', 'n_violations']:
g = sns.countplot(data=initial_inspections, x=v)
g.set_xticklabels(g.get_xticklabels(), rotation=60, horizontalalignment='right')
plt.tight_layout()
plt.show()
# Add one-hot encoding for each violation code, BORO, and CUISINE DESCRIPTION
initial_inspections['violation_codes']
mlb = MultiLabelBinarizer()
expanded_violation_codes = mlb.fit_transform(initial_inspections['violation_codes'])
initial_inspections_violation_code_vars = 'violation_' + mlb.classes_
expanded_violation_codes = pd.DataFrame(expanded_violation_codes, columns=initial_inspections_violation_code_vars)
initial_inspections = pd.concat([initial_inspections, expanded_violation_codes], axis=1)
ohe = OneHotEncoder(sparse=False)
boro_encoding = ohe.fit_transform(initial_inspections['BORO'].to_numpy().reshape(-1, 1))
initial_inspections_boro_vars = 'BORO_' + ohe.categories_[0]
boro_encoding = pd.DataFrame(boro_encoding, columns=initial_inspections_boro_vars)
initial_inspections = | pd.concat([initial_inspections, boro_encoding], axis=1) | pandas.concat |
from bs4 import BeautifulSoup
from django.utils.text import slugify
import requests
import random
import csv
import pandas as pd
import os,time
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.142 Safari/535.19',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:8.0.1) Gecko/20100101 Firefox/8.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.151 Safari/535.19',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0'
]
def get_requests_single_hero(url, hero):
headers={'User-Agent':user_agents[random.randint(0,8)]}
r = requests.get(url, headers=headers)
html = r.text.encode('utf8')
soup = BeautifulSoup(html, 'lxml')
ex = soup.find('table', attrs={'class':"sortable"})
table_rows = ex.findAll('tr')
final_csv_row = []
for table_row in table_rows[1:]:
row = table_row.findAll('td')
hero_name = row[1].text
win_rate = row[2]['data-value']
final_csv_row.append(dict(name=hero_name, win_rate=win_rate))
final_csv_row.append(dict(name=str(hero,'utf-8'), win_rate=0))
final_csv_row = sorted(final_csv_row, key=lambda hero: hero['name'])
returnable_list = []
for data in final_csv_row:
returnable_list.append(data['win_rate'])
return returnable_list
def readcsv_and_update():
df = pd.DataFrame()
#getting herolist
with open('hero_list.txt', 'rb') as file:
for thing in file:
try:
time.sleep(2)
name = thing[:len(thing)-1]
final_url = "http://www.dotabuff.com/heroes/" + slugify(name) + "/matchups?date=week"
current_attribute = get_requests_single_hero(final_url, name)
df[name] = 4
df[name] = | pd.Series(current_attribute) | pandas.Series |
import pandas as pd
import FLUCCOplus.utils as utils
import FLUCCOplus.config as config
import FLUCCOplus.web as web
import FLUCCOplus.transform as traffo
from pathlib import Path
PEEXCEL_PATH = config.DATA_PROCESSED / Path("peexcel_normalized.csv")
WEB_PATH = config.DATA_PROCESSED / Path("WEB_normalized.csv")
SPOT_PATH = config.DATA_PROCESSED / Path("DrexelCO2/Signale_CO2mix_SpotMarket.xlsx")
MANUTZ_PATH = config.DATA_PROCESSED / Path("MANutz/maxnutz_normalized.csv")
def load_peexcel():
return pd.read_csv(PEEXCEL_PATH, index_col=0, parse_dates=True)
def load_web(year=None):
df = web.read(WEB_PATH, decimal=",")
if year:
df = df[df.index.year==year]
return df
def load_spotprice(year=None):
import numpy as np
dates = np.arange(f"{year}-01-01", f"{year+1}-01-01 00:00", dtype="datetime64[h]")
df = pd.read_excel(SPOT_PATH, index_col=(0))
df.index = dates
return df
def load_pypsa_avg(year=None):
utils.maxnutz()
import numpy as np
dates = np.arange(f"{year}-01-01", f"{year+1}-01-01 00:00", dtype="datetime64[h]")
df = pd.read_csv(MANUTZ_PATH, sep=";", decimal=",", index_col=(0))
df.index = dates
return df
def signal_properties(df, separator:float):
disc = traffo.discretize(df=df, separator=separator)
anzahl = pd.DataFrame()
sig = disc.where(disc > 0)
sig = pd.DataFrame(sig)
anzahl = sig.count()
df_step = | pd.DataFrame() | pandas.DataFrame |
# © Copyright 2021, PRISMA’s Authors
import ipywidgets as wdg
import pandas as pd
import functools
from .views.fitpeaks import ViewFitPeaks
from .views.load import ViewLoad
from .views.plots import PlotPeaks
import prisma.parsers
import prisma.fitpeaks
AVAILABLE_PARSERS = {'Single .csv': {'multiple files': False,'file format':'.csv'},
'Single .txt (Bruker)': {'multiple files': False,'file format':'.txt'},
'Multiple .txt': {'multiple files': True, 'file format':'*'}}
class PeakFitting:
def __init__(self):
self.interface = None
self.subapps = {}
self.spectra = {}
self.__load_subapps()
self.__assemble_interface()
self.control_events()
# -------------------- GUI APPEARANCE ------------------
def __load_subapps(self):
self.subapps = {'Load':ViewLoad(AVAILABLE_PARSERS),
'FitPeaks':ViewFitPeaks(),
'Plot Peaks': PlotPeaks(x_label='Raman shift [cm-1]',y_label='Counts [a.u.]',title='Raw')}
def __assemble_interface(self):
plot_box = wdg.HBox([self.subapps['Load'].interface,
self.subapps['Plot Peaks'].interface])
self.interface = wdg.VBox([plot_box,self.subapps['FitPeaks'].interface])
# ----------------- Auxiliary Functions ---------------
def aux_get_payload_from_file_upload(self, upload):
parser_name = self.subapps['Load'].current_parser_name
if AVAILABLE_PARSERS[parser_name]['multiple files']:
payload = {key:value['content'] for key, value in upload.items()}
else:
filename = list(upload.keys())[0]
payload = upload[filename]['content']
return payload, parser_name
def aux_run_available_parser(self, payload, parser):
if parser == 'Single .csv':
return prisma.parsers.single_csv(payload)
elif parser == 'Single .txt (Bruker)':
return prisma.parsers.single_txt_bruker(payload)
elif parser == 'Multiple .txt':
return prisma.parsers.multiple_txt(payload)
else:
raise KeyError('The parser is not defined')
def aux_refresh_plots(self,label):
try:
self.subapps['Plot Peaks'].update_marks(spectrum_original = self.spectra[label]['root'],
spectrum_fit = self.spectra[label]['fitted'])
except KeyError:
self.subapps['Plot Peaks'].update_marks(spectrum_original =self.spectra[label]['root'],
spectrum_fit = None)
def aux_fit_spectrum(self,label):
peakfit_parameters = self.subapps['FitPeaks'].inputs
self.spectra[label]['fitted'] = prisma.fitpeaks.fit_peaks(self.spectra[label]['root'],
peak_bounds = peakfit_parameters['Bounds'],
guess_widths = peakfit_parameters['Widths'],
lineshape = peakfit_parameters['Lineshape'])
self.subapps['FitPeaks'].render_parameter_table(self.spectra[label]['fitted'])
def aux_update_download_payload(self, label):
individual_dataframes = [ | pd.DataFrame(self.spectra[label]['root'].counts, columns=['Original'], index=self.spectra[label]['root'].indexes) | pandas.DataFrame |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
import pandas as pd
import pytest
from mars.dataframe import DataFrame, Series, ArrowStringDtype
from mars.tests.core import require_cudf
@pytest.mark.parametrize(
'distinct_opt',
['0'] if sys.platform.lower().startswith('win') else ['0', '1']
)
def test_sort_values_execution(setup, distinct_opt):
os.environ['PSRS_DISTINCT_COL'] = distinct_opt
df = pd.DataFrame(np.random.rand(100, 10), columns=['a' + str(i) for i in range(10)])
# test one chunk
mdf = DataFrame(df)
result = mdf.sort_values('a0').execute().fetch()
expected = df.sort_values('a0')
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a6', 'a7'], ascending=False).execute().fetch()
expected = df.sort_values(['a6', 'a7'], ascending=False)
pd.testing.assert_frame_equal(result, expected)
# test psrs
mdf = DataFrame(df, chunk_size=10)
result = mdf.sort_values('a0').execute().fetch()
expected = df.sort_values('a0')
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a3', 'a4']).execute().fetch()
expected = df.sort_values(['a3', 'a4'])
pd.testing.assert_frame_equal(result, expected)
# test ascending=False
result = mdf.sort_values(['a0', 'a1'], ascending=False).execute().fetch()
expected = df.sort_values(['a0', 'a1'], ascending=False)
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a7'], ascending=False).execute().fetch()
expected = df.sort_values(['a7'], ascending=False)
pd.testing.assert_frame_equal(result, expected)
# test multiindex
df2 = df.copy(deep=True)
df2.columns = pd.MultiIndex.from_product([list('AB'), list('CDEFG')])
mdf = DataFrame(df2, chunk_size=5)
result = mdf.sort_values([('A', 'C')]).execute().fetch()
expected = df2.sort_values([('A', 'C')])
pd.testing.assert_frame_equal(result, expected)
# test rechunk
mdf = DataFrame(df, chunk_size=3)
result = mdf.sort_values('a0').execute().fetch()
expected = df.sort_values('a0')
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a3', 'a4']).execute().fetch()
expected = df.sort_values(['a3', 'a4'])
pd.testing.assert_frame_equal(result, expected)
# test other types
raw = pd.DataFrame({'a': np.random.rand(10),
'b': np.random.randint(1000, size=10),
'c': np.random.rand(10),
'd': [np.random.bytes(10) for _ in range(10)],
'e': [pd.Timestamp(f'201{i}') for i in range(10)],
'f': [pd.Timedelta(f'{i} days') for i in range(10)]
},)
mdf = DataFrame(raw, chunk_size=3)
for label in raw.columns:
result = mdf.sort_values(label).execute().fetch()
expected = raw.sort_values(label)
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a', 'b', 'e'], ascending=False).execute().fetch()
expected = raw.sort_values(['a', 'b', 'e'], ascending=False)
pd.testing.assert_frame_equal(result, expected)
# test nan
df = pd.DataFrame({
'col1': ['A', 'A', 'B', 'B', 'D', 'C'],
'col2': [2, 1, 9, np.nan, 7, 4],
'col3': [0, 1, 9, 4, 2, 3],
})
mdf = DataFrame(df)
result = mdf.sort_values(['col2']).execute().fetch()
expected = df.sort_values(['col2'])
pd.testing.assert_frame_equal(result, expected)
mdf = DataFrame(df, chunk_size=3)
result = mdf.sort_values(['col2']).execute().fetch()
expected = df.sort_values(['col2'])
pd.testing.assert_frame_equal(result, expected)
# test None (issue #1885)
df = pd.DataFrame(np.random.rand(1000, 10))
df[0][df[0] < 0.5] = 'A'
df[0][df[0] != 'A'] = None
mdf = DataFrame(df)
result = mdf.sort_values([0, 1]).execute().fetch()
expected = df.sort_values([0, 1])
pd.testing.assert_frame_equal(result, expected)
mdf = DataFrame(df, chunk_size=100)
result = mdf.sort_values([0, 1]).execute().fetch()
expected = df.sort_values([0, 1])
pd.testing.assert_frame_equal(result, expected)
# test ignore_index
df = pd.DataFrame(np.random.rand(10, 3), columns=['a' + str(i) for i in range(3)])
mdf = DataFrame(df, chunk_size=3)
result = mdf.sort_values(['a0', 'a1'], ignore_index=True).execute().fetch()
try: # for python3.5
expected = df.sort_values(['a0', 'a1'], ignore_index=True)
except TypeError:
expected = df.sort_values(['a0', 'a1'])
expected.index = pd.RangeIndex(len(expected))
pd.testing.assert_frame_equal(result, expected)
# test inplace
mdf = DataFrame(df)
mdf.sort_values('a0', inplace=True)
result = mdf.execute().fetch()
df.sort_values('a0', inplace=True)
pd.testing.assert_frame_equal(result, df)
# test unknown shape
df = pd.DataFrame({'a': list(range(10)),
'b': np.random.random(10)})
mdf = DataFrame(df, chunk_size=4)
filtered = mdf[mdf['a'] > 2]
result = filtered.sort_values(by='b').execute().fetch()
pd.testing.assert_frame_equal(result, df[df['a'] > 2].sort_values(by='b'))
# test empty dataframe
df = pd.DataFrame({'a': list(range(10)),
'b': np.random.random(10)})
mdf = DataFrame(df, chunk_size=4)
filtered = mdf[mdf['b'] > 100]
result = filtered.sort_values(by='b').execute().fetch()
pd.testing.assert_frame_equal(result, df[df['b'] > 100].sort_values(by='b'))
# test chunks with zero length
df = pd.DataFrame({'a': list(range(10)),
'b': np.random.random(10)})
df.iloc[4:8, 1] = 0
mdf = DataFrame(df, chunk_size=4)
filtered = mdf[mdf['b'] != 0]
result = filtered.sort_values(by='b').execute().fetch()
pd.testing.assert_frame_equal(result, df[df['b'] != 0].sort_values(by='b'))
# test Series.sort_values
raw = pd.Series(np.random.rand(10))
series = Series(raw)
result = series.sort_values().execute().fetch()
expected = raw.sort_values()
pd.testing.assert_series_equal(result, expected)
series = Series(raw, chunk_size=3)
result = series.sort_values().execute().fetch()
expected = raw.sort_values()
pd.testing.assert_series_equal(result, expected)
series = Series(raw, chunk_size=2)
result = series.sort_values(ascending=False).execute().fetch()
expected = raw.sort_values(ascending=False)
pd.testing.assert_series_equal(result, expected)
# test empty series
series = pd.Series(list(range(10)), name='a')
mseries = Series(series, chunk_size=4)
filtered = mseries[mseries > 100]
result = filtered.sort_values().execute().fetch()
pd.testing.assert_series_equal(result, series[series > 100].sort_values())
# test series with None
series = pd.Series(np.arange(1000,))
series[series < 500] = 'A'
series[series != 'A'] = None
mseries = Series(series, chunk_size=100)
result = mseries.sort_values().execute().fetch()
expected = series.sort_values()
pd.testing.assert_series_equal(result.reset_index(drop=True), expected.reset_index(drop=True))
def test_sort_index_execution(setup):
raw = pd.DataFrame(np.random.rand(100, 20), index=np.random.rand(100))
mdf = DataFrame(raw)
result = mdf.sort_index().execute().fetch()
expected = raw.sort_index()
pd.testing.assert_frame_equal(result, expected)
mdf = DataFrame(raw)
mdf.sort_index(inplace=True)
result = mdf.execute().fetch()
expected = raw.sort_index()
pd.testing.assert_frame_equal(result, expected)
mdf = DataFrame(raw, chunk_size=30)
result = mdf.sort_index().execute().fetch()
expected = raw.sort_index()
pd.testing.assert_frame_equal(result, expected)
mdf = DataFrame(raw, chunk_size=20)
result = mdf.sort_index(ascending=False).execute().fetch()
expected = raw.sort_index(ascending=False)
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# In[102]:
get_ipython().system('pip install category_encoders')
get_ipython().system('pip install xgboost')
get_ipython().system('pip install plotly_express')
get_ipython().system('pip install dash')
# In[103]:
import pandas as pd
import plotly_express as px
import numpy as np
df = pd.read_csv('https://raw.githubusercontent.com/GitNick88/GitNick88.github.io/master/insurance.csv')
# Display more columns
pd.set_option('display.max_rows', 500)
print(df.shape)
df.head(25)
# In[104]:
df['smoker'] = df['smoker'].astype(str)
# In[105]:
# Types of data in the df
df.dtypes
# In[106]:
df.describe()
# In[107]:
# Checking for high cardinality columns
df.describe(exclude='number').T.sort_values(by='unique')
# In[108]:
# Train/test split 80/20
from sklearn.model_selection import train_test_split
train, test = train_test_split(df, test_size=0.2)
print(train.shape)
test.shape
# In[109]:
# Train/val split 80/20
from sklearn.model_selection import train_test_split
train, val = train_test_split(train, test_size=0.2)
print(train.shape)
print(val.shape)
test.shape
# In[110]:
from sklearn.linear_model import LinearRegression
target = 'charges'
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
y_test = test[target]
model = LinearRegression()
print(X_train.shape)
print(X_val.shape)
print(X_test.shape)
print(y_val.shape)
print(y_train.shape)
y_test.shape
# In[111]:
from sklearn.metrics import r2_score
from xgboost import XGBRegressor
gb = make_pipeline(
ce.OrdinalEncoder(),
StandardScaler(),
XGBRegressor(n_estimators=200, objective='reg:squarederror', n_jobs=-1)
)
gb.fit(X_train, y_train)
y_pred = gb.predict(X_val)
print('Gradient Boosting R^2', r2_score(y_val, y_pred))
# In[112]:
# BASELINE
guess = df['charges'].mean()
print(f'Just guessing, we would predict that insurance will cost ${guess:,.2f} per customer. This is our model baseline.')
# In[113]:
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae_train = mean_absolute_error(y_train, y_pred)
print(f'Train Error: ${mae_train:.2f}')
# Test Error
y_pred = [guess] * len(y_test)
mae_test = mean_absolute_error(y_test, y_pred)
print(f'Test Error: ${mae_test:.2f}')
print('The train and test error are better than our baseline:')
# In[114]:
# What would be the average cost if I didn't insure anyone who smokes?
df_no_smoke= df.loc[df['smoker'] == 'no']
df_no_smoke.head()
# In[115]:
# New baseline to my costs if I do not insure smokers
no_smoke_y_pred = df_no_smoke['charges'].mean()
no_smoke_y_pred
# In[116]:
# Let's see what happens to average costs when we remove obesity values > 30
df_obese = df.loc[df['bmi'] < 26]
df_obese.head()
# In[117]:
# Average healthcare costs for non obese people
df_obese_cost = df_obese['charges'].mean()
df_obese_cost
# In[118]:
# Remove obese (bmi > 30) and smokers to see total charges
# Baseline prediction of avg total charges was $13,152.18
df_goodbmi_nosmoke = df.loc[df['smoker'] == 'no']
df_goodbmi_nosmoke.head(25)
# In[119]:
df_goodbmi_nosmoke = df_goodbmi_nosmoke[df_goodbmi_nosmoke['bmi'] < 26]
df_goodbmi_nosmoke.head(25)
# In[120]:
# Average cost to the insurance company without obese (bmi > 30) and without smoker is $7977.
# What if we dropped the bmi paramter down to 26 (still considered overweight)?
df_goodbmi_nosmoke['charges'].mean()
# In[121]:
df.head()
# In[122]:
# Who is more expensive? Men or women?
df_male = df.loc[df['sex'] == 'male']
df_female = df.loc[df['sex'] == 'female']
# In[123]:
df_male['charges'].mean()
# In[124]:
df_female['charges'].mean()
# In[125]:
df.describe()
# In[126]:
gb.fit(X_train, y_train)
# In[127]:
# Predict on age sex bmi children smoker region charges
# Apply the model to new data
age = 27
sex = 1
bmi = 24
children = 2
smoker = 1
region = 1
X_test1 = [age, sex, bmi, children, smoker, region]
y_pred = pipeline.predict(X_test1)
print(y_pred1)
# In[ ]:
# Visual Ideas:
# At least one visual of my code
# Feature importances chart
# A visual of the r^2 score
# A visual for the mean of costs with and without smokers
# In[128]:
# Get feature importances
rf = gb.named_steps['xgbregressor']
importances = | pd.Series(rf.feature_importances_, X_train.columns) | pandas.Series |
import scipy.signal
import numpy as np
import math
import pandas as pd
import csv
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
# Cut out extremely high or low values, as they are probably measuring errors
def removeArtifacts(data_input, events_input, upper_limit_one, upper_limit_two):
data = []
events = []
u = 0
for i in data_input:
extreme_value_found = False
for x in i:
c = 0
while c < 2:
if c == 0:
if x[c] == upper_limit_one:
extreme_value_found = True
break
else:
if x[c] == upper_limit_two:
extreme_value_found = True
break
c += 1
if not extreme_value_found:
data.append(i)
events.append(events_input[u])
u += 1
return data, events
# Define bandpass filter functions, which will be used to filter the data to different frequencies
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
sos = scipy.signal.butter(order, [low, high], analog=False, btype='band', output='sos')
return sos
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
sos = butter_bandpass(lowcut, highcut, fs, order=order)
y = scipy.signal.sosfilt(sos, data)
return y
# Define Tapering function
# Each interval consists of 200 elements. The first and last elements are not as relevant as the
# elements in the middle of the interval. There are many cases, in which these marginal values are very high or low,
# which falsifies computation of mean, standard deviation, etc. This is, why tapering is needed.
w = np.hamming(200)
tapering_function = []
for i in w:
tapering_function.append(i * 1) # --> das muss in Produktion 1x durchlaufen... [TODO]
def applyTapering(data, zeros):
res = []
for x in data:
c = 0
res_row = []
res_row_mini = []
zero_list = []
for y in x:
for i in y:
res_row_mini.append(i * tapering_function[c])
res_row.append(res_row_mini)
c += 1
res.append(res_row)
return res
# Define function for extracting features, that describe the 200 datapoints of an interval as a whole.
# This function extracts arrithmetic mean, standard deviation, the highest or lowest value of an interval (= top_val),
# the greatest differences between two datapoints on the positive and negative side (= baseline_difference_top,
# baseline_difference_bottom) and each of these values after the interval runs through a Fourier transformation.
def computeFeatures(data, temp_top_val):
mean_row = []
std_row = []
temp_baseline_difference_bottom = 0
temp_baseline_difference_top = 0
for i in data:
i = float(i)
if temp_baseline_difference_bottom == 0:
temp_baseline_difference_bottom = math.sqrt(i ** 2)
else:
if math.sqrt(i ** 2) < temp_baseline_difference_bottom:
temp_baseline_difference_bottom = math.sqrt(i ** 2)
if math.sqrt(i ** 2) > temp_baseline_difference_top:
temp_baseline_difference_top = math.sqrt(i ** 2)
if math.sqrt(i ** 2) > temp_top_val:
temp_top_val = math.sqrt(i ** 2)
mean_row.append(math.sqrt(i ** 2))
std_row.append(i)
if math.sqrt(i ** 2) > temp_top_val:
temp_top_val = math.sqrt(i ** 2)
return [mean_row, std_row, temp_baseline_difference_bottom, temp_baseline_difference_top, temp_top_val]
def getFeatures(lowcut, highcut, input_data):
means = []
std = []
top_val = []
temp_top_val = []
baseline_difference = []
# Apply fourier transform to get energy distribution on different frequencies
means_fft = []
std_fft = []
top_val_fft = []
temp_top_val_fft = []
baseline_difference_fft = []
#print("INPUT DATA [0][0]: ", input_data[0][0])
for i in input_data[0][0]:
#print("COURIOUS: ", i)
temp_top_val.append(0)
temp_top_val_fft.append(0)
#print("FIRST EPOCH INPUT DATA: ", input_data[0])
for epoch in input_data:
c = 0
means_row = []
std_row = []
top_val_row = []
baseline_difference_row = []
means_fft_row = []
std_fft_row = []
top_val_fft_row = []
temp_top_val_fft_row = []
baseline_difference_fft_row = []
for x in np.transpose(epoch):
#print("X IN EPOCH TRANSPOSED: ", x)
filtered = butter_bandpass_filter(x - np.mean(x), lowcut=lowcut, highcut=highcut, fs=100)
filtered_fft = np.fft.fftn(filtered)
res = computeFeatures(filtered, temp_top_val[c])
res_fft = computeFeatures(filtered_fft, temp_top_val_fft[c])
baseline_difference_row.append(res[3] - res[2])
baseline_difference_fft_row.append(res_fft[3] - res_fft[2])
top_val_row.append(res[4])
top_val_fft_row.append(res_fft[4])
means_row.append(np.average(res[0]))
means_fft_row.append(np.average(res_fft[0]))
std_row.append(np.std(res[1]))
std_fft_row.append(np.std(res_fft[1]))
c += 1
baseline_difference.append(baseline_difference_row)
baseline_difference_fft.append(baseline_difference_fft_row)
top_val.append(top_val_row)
top_val_fft.append(top_val_fft_row)
means.append(means_row)
means_fft.append(means_fft_row)
std.append(std_row)
std_fft.append(std_fft_row)
return [means, std, top_val, baseline_difference, means_fft, std_fft, top_val_fft, baseline_difference_fft]
# Define function to get averaged datapoints for the different event classes (in this case hand up or down).
# This will be used to measure distances between a given interval and the averaged intervals for the event classes
# to determine, which class is the nearest to the given interval.
def getAverages(data, events):
# data: [ [ [x,y], [x,y], [x,y], ... ], [ [x,y], [x,y], [x,y], ... ], ... ]
average_up = []
average_down = []
c = 0
for i in data:
if events[c] == 1:
average_up.append(i)
else:
average_down.append(i)
c += 1
average_up_transpose = np.transpose(average_up)
average_down_transpose = np.transpose(average_down)
average_up_res = []
average_down_res = []
for sensor in average_up_transpose:
average_up_res.append(np.average(i))
for sensor in average_down_transpose:
average_down_res.append(np.average(i))
return average_up_res, average_down_res
def getAveragesMain(data):
average = []
average_transpose = np.transpose(data)
average_res = []
for sensor in average_transpose:
average_res.append(np.average(i))
return average_res
# Define functions to find extreme points in the intervals, average them for the different events and measure the distance
# from a given interval to the averaged extreme points from the different classes.
def findLocalExtremes(up, down, scaler):
minima_up = []
maxima_up = []
minima_down = []
maxima_down = []
i = 0
while i < len(up):
minima_up.append(np.min(up[i:i+scaler]))
maxima_up.append(np.max(up[i:i+scaler]))
minima_down.append(np.min(down[i:i+scaler]))
maxima_down.append(np.max(down[i:i+scaler]))
i += scaler
return minima_up, maxima_up, minima_down, maxima_down
def findLocalExtremesMain(data, scaler):
# [[x,y], [x,y], [x,y], ...]
minima = []
maxima = []
i = 0
for i in np.transpose(data):
minima_row, maxima_row = findLocalExtremesRow(i, scaler)
minima.append(minima_row)
maxima.append(maxima_row)
return minima, maxima
def findLocalExtremesRow(row, scaler):
minima = []
maxima = []
i = 0
while i < len(row):
minima.append(np.min(row[i:i+scaler]))
maxima.append(np.max(row[i:i+scaler]))
i += scaler
return minima, maxima
def extremePointsCorrelation(data, events, scaler):
# zuerst Sensor 1, dann Sensor 2...
avg_up, avg_down = getAverages(data, events)
# compute extreme points for averaged data
minima_up, maxima_up, minima_down, maxima_down = findLocalExtremes(avg_up, avg_down, scaler)
corr_res_minima = []
corr_res_maxima = []
minima_array = []
maxima_array = []
for epoch in data:
corr_res_maxima_row = []
minima_array_row = []
maxima_array_row = []
for i in np.transpose(epoch):
minima, maxima = findLocalExtremesRow(i, scaler)
minima_array_row.append(minima) # Consists of local minima per epoch --> onedimensional
maxima_array_row.append(maxima) # Consists of local maxima per epoch --> onedimensional
minima_array.append(minima_array_row) # Consists of local minima per epoch --> multidimensional --> Just reduced data array
maxima_array.append(maxima_array_row) # Consists of local maxima per epoch --> multidimensional
minima_res = []
maxima_res = []
for epoch in np.transpose(minima_array):
c = 0
for i in epoch:
minima_res.append(epoch[c])
c+=1
for epoch in np.transpose(maxima_array):
c = 0
append = False
for i in epoch:
#if math.sqrt(np.corrcoef(i, events)[0][1] ** 2) > 0.1:
maxima_res.append(epoch[c])
c+=1
return minima_res, maxima_res
def extremePointsCorrelationMain(data, scaler):
minima, maxima = findLocalExtremesMain(data, scaler)
return minima, maxima
# CORRELATIONS FOR EPOCHS AS A WHOLE
def getFrequencies(min, max, data):
corr = []
corr_tapered = []
freqs = []
i = min
limit = max
while i < limit - 1:
min = i
c = i + 1
while c < limit:
max = c
corr.append(getFeatures(min, max, data))
freqs.append([i,c])
c += 1
i += 1
cores_real_numbers = []
for frequency in corr:
for sensor in np.transpose(frequency):
for attribute in np.transpose(sensor):
cores_real_numbers.append(attribute)
return cores_real_numbers
def getFrequenciesPredefined(data):
corr = []
#data = applyTapering(data,0)
#corr.append(getFeatures(1, 4, applyTapering(data,0)))
#corr.append(getFeatures(8, 12, applyTapering(data,0)))
#corr.append(getFeatures(4, 8, applyTapering(data,0)))
#corr.append(getFeatures(12, 35, applyTapering(data,0)))
#corr.append(getFeatures(13, 32, applyTapering(data,0)))
corr.append(getFeatures(1, 4, data))
corr.append(getFeatures(8, 12, data))
corr.append(getFeatures(4, 8, data))
corr.append(getFeatures(12, 35, data))
corr.append(getFeatures(13, 32, data))
cores_real_numbers = []
for frequency in corr:
for sensor in np.transpose(frequency):
for attribute in np.transpose(sensor):
cores_real_numbers.append(attribute)
return cores_real_numbers
def generateTrainingSet(input_data, events):
return | pd.DataFrame(input_data) | pandas.DataFrame |
# encoding: utf-8
from opendatatools.common import RestAgent
from bs4 import BeautifulSoup
import json
import pandas as pd
import datetime
index_map={
'SSEC' : '上证综合指数',
'SZSC1' : '深证成份指数(价格)',
'FTXIN9' : '富时中国A50指数',
'DJSH' : '道琼斯上海指数',
'HSI' : '香港恒生指数 (CFD)',
'DJI' : '道琼斯工业平均指数',
'SPX' : '美国标准普尔500指数 (CFD)',
'IXIC' : '纳斯达克综合指数',
'RUT' : '美国小型股2000 (CFD)',
'VIX' : 'VIX恐慌指数 (CFD)',
'GSPTSE' : '加拿大多伦多S&P/TSX 综合指数 (CFD)',
'BVSP' : '巴西IBOVESPA股指',
'MXX' : 'S&P/BMV IPC',
'GDAXI' : '德国DAX30指数 (CFD)',
'FTSE' : '英国富时100指数 (CFD)',
'FCHI' : '法国CAC40指数',
'STOXX50E' : '欧洲斯托克(Eurostoxx)50指数 (CFD)',
'AEX' : '荷兰AEX指数',
'IBEX' : '西班牙IBEX35指数 (CFD)',
'FTMIB' : '意大利富时MIB指数 (CFD)',
'SSMI' : '瑞士SWI20指数 (CFD)',
'PSI20' : '葡萄牙PSI20指数',
'BFX' : '比利时BEL20指数 (CFD)',
'ATX' : 'ATX',
'OMXS30' : '瑞典OMX斯德哥尔摩30指数',
'IMOEX' : '俄罗斯MOEX Russia指数',
'IRTS' : '俄罗斯交易系统市值加权指数',
'WIG20' : '波兰华沙WIG20指数',
'BUX' : '匈牙利股票交易指数',
'XU100' : '土耳其伊斯坦堡100指数',
'TA35' : 'TA 35',
'TASI' : '沙特阿拉伯TASI指数',
'N225' : '日经225指数 (CFD)',
'AXJO' : '澳大利亚S&P/ASX200指数',
'TWII' : '台湾加权指数',
'SETI' : 'SET Index',
'KS11' : '韩国KOSPI指数',
'JKSE' : '印尼雅加达综合指数',
'NSEI' : '印度S&P CNX NIFTY指数',
'BSESN' : '印度孟买30指数',
'HNX30' : 'HNX 30',
'CSE' : '斯里兰卡科伦坡指数',
'VIX' : 'VIX恐慌指数 (CFD)',
}
index_map_inv = {v:k for k, v in index_map.items()}
class YingWeiAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.add_headers({'Referer': 'https://cn.investing.com/indices/shanghai-composite', 'X-Requested-With': 'XMLHttpRequest'})
def get_index_list(self):
url = "https://cn.investing.com/indices/major-indices"
response = self.do_request(url)
soup = BeautifulSoup(response, "html5lib")
tables = soup.find_all('table')
data_list = []
for table in tables:
if table.has_attr('id') and table['id'] == 'cr_12':
trs = table.findAll("tr")
for tr in trs:
if tr.has_attr('id'):
tds = tr.findAll('td')
time = datetime.datetime.fromtimestamp(int(tds[7]['data-value'])).strftime("%Y-%m-%d %H:%M:%S")
data_list.append({'index_name_cn': tr.a['title'],
'index_name': index_map_inv[tr.a['title']] if tr.a['title'] in index_map_inv else '',
'country' : tds[0].span['title'],
'last': tds[2].text,
'high': tds[3].text,
'low': tds[4].text,
'price_change': tds[5].text,
'percent_change': tds[6].text,
'time' : time,
})
df = pd.DataFrame(data_list)
return df, ''
def _get_id(self, symbol):
url = "https://cn.investing.com/indices/major-indices"
response = self.do_request(url)
soup = BeautifulSoup(response, "html5lib")
tables = soup.find_all('table')
for table in tables:
if table.has_attr('id') and table['id'] == 'cr_12':
rows = table.findAll("tr")
for row in rows:
if row.has_attr('id'):
if row.a['title'] == symbol:
return row['id'][5:]
return None
def get_index_data(self, symbol, interval, period):
symbol = index_map[symbol]
id = self._get_id(symbol)
if id is None:
return None, '暂不支持该指数'
url = "https://cn.investing.com/common/modules/js_instrument_chart/api/data.php"
param = {
'pair_id': id,
'pair_id_for_news': id,
'chart_type': 'area',
'pair_interval': interval,
'candle_count': 120,
'events': 'yes',
'volume_series': 'yes',
'period': period,
}
response = self.do_request(url, param=param, encoding='gzip')
if response is not None:
jsonobj = json.loads(response)
df = | pd.DataFrame(jsonobj['candles']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_value_counts(self):
def test_impl(S):
return S.value_counts()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['AA', 'BB', 'C', 'AA', 'C', 'AA'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_dist_input1(self):
'''Verify distribution of a Series without index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_dist_input2(self):
'''Verify distribution of a Series with integer index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
'''Verify distribution of a Series with string index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_concat1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6., 7.])
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_map1(self):
def test_impl(S):
return S.map(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_global1(self):
def test_impl(S):
return S.map(lambda a: a + GLOBAL_VAL)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup1(self):
def test_impl(S):
return S.map(lambda a: (a, 2 * a))
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup_map1(self):
def test_impl(S):
A = S.map(lambda a: (a, 2 * a))
return A.map(lambda a: a[1])
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_combine(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_float3264(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([np.float64(1), np.float64(2),
np.float64(3), np.float64(4), np.float64(5)])
S2 = pd.Series([np.float32(1), np.float32(2),
np.float32(3), np.float32(4), np.float32(5)])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_assert1(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3])
S2 = pd.Series([6., 21., 3., 5.])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_assert2(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6., 21., 3., 5.])
S2 = pd.Series([1, 2, 3])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_integer(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 16)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 3, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_different_types(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6.1, 21.2, 3.3, 5.4, 6.7])
S2 = pd.Series([1, 2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_integer_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 17, -5, 4])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_apply1(self):
def test_impl(S):
return S.apply(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_abs1(self):
def test_impl(S):
return S.abs()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, -2., 3., 0.5E-01, 0xFF, 0o7, 0b101])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_cov1(self):
def test_impl(S1, S2):
return S1.cov(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_corr1(self):
def test_impl(S1, S2):
return S1.corr(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_str_len1(self):
def test_impl(S):
return S.str.len()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'abc', 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str2str(self):
str2str_methods = ('capitalize', 'lower', 'lstrip', 'rstrip',
'strip', 'swapcase', 'title', 'upper')
for method in str2str_methods:
func_text = "def test_impl(S):\n"
func_text += " return S.str.{}()\n".format(method)
test_impl = _make_func_from_text(func_text)
hpat_func = hpat.jit(test_impl)
S = pd.Series([' \tbbCD\t ', 'ABC', ' mCDm\t', 'abc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_append1(self):
def test_impl(S, other):
return S.append(other).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
# Test single series
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_append2(self):
def test_impl(S1, S2, S3):
return S1.append([S2, S3]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
S3 = pd.Series([1.0])
# Test series tuple
np.testing.assert_array_equal(hpat_func(S1, S2, S3),
test_impl(S1, S2, S3))
def test_series_isin_list1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = [1, 2, 5, 7, 8]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = [1., 2., 5., 7., 8.]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'q', 'w', 'c', 'd', 'e', 'r'])
values = ['a', 'q', 'c', 'd', 'e']
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = {1, 2, 5, 7, 8}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = {1., 2., 5., 7., 8.}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
@unittest.skip('TODO: requires hashable unicode strings in Numba')
def test_series_isin_set3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'c', 'd', 'e'] * 2)
values = {'b', 'c', 'e'}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3., np.inf])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull1(self):
def test_impl(S):
return S.isnull()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull_full(self):
def test_impl(series):
return series.isnull()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_numeric + [test_global_input_data_unicode_kind4]:
series = pd.Series(data * 3)
ref_result = test_impl(series)
jit_result = hpat_func(series)
pd.testing.assert_series_equal(ref_result, jit_result)
def test_series_notna1(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_notna_noidx_float(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_int(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_num(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_notna_noidx_str(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = | pd.Series(input_data) | pandas.Series |
import torch
from transformer.nn_transformer import TRANSFORMER
from downstream.model import example_classifier, example_regression, AvecModel
from downstream.solver import get_optimizer
from downstream.dataloader_ds import AvecDataset, AvecDatasetFull
import pandas as pd
from torch.utils.data import DataLoader
import re
import numpy as np
import torch.nn as nn
import sys
import os
from sklearn.metrics import mean_squared_error
from torch.nn import init
from audtorch.metrics.functional import concordance_cc
from scipy.stats import pearsonr
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#config = {
#'mode' : 'regression',
#'sample_rate' : 1,
#'hidden_size' : 64,
#'pre_linear_dims' : [32], 'post_linear_dims': [32],'drop':0.1,
#'concat': 1, 'layers': 3, 'linear': False,
#'t_local': 45, 't_global': 150
#}
#classifier = AvecModel(28, 1, config, 3).to(device)
#features = torch.randn(8,1500, 28)
#labels = torch.randn(8)
#loss, result, correct, valid = classifier.forward(features, labels)
seeds = list(np.random.randint(0,1000,3))
def get_path(participant_ids, processed_path):
output = []
for participant_id in participant_ids:
file_name = str(participant_id) + "_VIDEO.npy"
output.append(os.path.join(processed_path, file_name))
return output
def concordance_correlation_coefficient(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
cor=np.corrcoef(y_true,y_pred)[0][1]
mean_true=np.mean(y_true)
mean_pred=np.mean(y_pred)
var_true=np.var(y_true)
var_pred=np.var(y_pred)
sd_true=np.std(y_true)
sd_pred=np.std(y_pred)
numerator=2*cor*sd_true*sd_pred
denominator=var_true+var_pred+(mean_true-mean_pred)**2
return numerator/denominator
output = []
subset = ["gpau"]
model_name_flm = "result/result_transformer/flm_base/states-250000.ckpt"
#model_name_au = "result/result_transformer/au_base/states-250000.ckpt"
#model_name_gp = "result/result_transformer/gp_base/states-250000.ckpt"
model_name_au = "result/result_transformer/au_aalbert_3L/states-200000.ckpt"
model_name_gp = "result/result_transformer/gp_base_aalbert/states-200000.ckpt"
model_name_gpau = "result/result_transformer/gpau_aalbert_3L/states-200000.ckpt"
model_name_dict = {"flm":model_name_flm, "au":model_name_au, "gp":model_name_gp, "gpau":model_name_gpau}
for seed in seeds:
torch.manual_seed(seed)
batch_size = 8
n_steps = 4000
eval_every = 40
max_len = 5000
norm_label = False
train_info, dev_info, test_info = "data/train_split.csv", "data/dev_split.csv", "data/test_split.csv"
regression_col_name = "PHQ_Score"
regression_col = list(pd.read_csv(test_info).columns).index(regression_col_name)
processed_npy_path = "/shares/perception-working/minh/avec_processed_three_fps/"
train_paths = get_path(pd.read_csv(train_info).values[:,0], processed_npy_path)
train_scores = pd.read_csv(train_info).values[:,regression_col]
dev_paths = get_path(pd.read_csv(dev_info).values[:,0], processed_npy_path)
dev_scores = | pd.read_csv(dev_info) | pandas.read_csv |
#!/usr/bin/python3
import pandas as pd
import pickle
import sys
import base64
import re
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
# Read the model, deserialize and unpickle it.
# model = ???
# Here we keep input data to Dataframe constructor
rows = []
# Iterate over standard input
for line in sys.stdin:
# Parse here your input and append result into rows
# ???
rows.append(line_dict)
# Initialize a dataframe from the list
df = | pd.DataFrame(rows) | pandas.DataFrame |
import pymongo
from PyQt5 import QtCore
import pandas as pd
import time
from bson.objectid import ObjectId
from nspyre.utils import get_mongo_client
import traceback
class DropEvent():
"""Represents a drop of a collection in a certain database"""
def __init__(self, db, col):
self.db, self.col = db, col
def modify_df(df, change):
# print(change)
if change['operationType'] == 'drop':
return DropEvent(change['ns']['db'], change['ns']['coll']), None
key = change['documentKey']['_id']
if change['operationType'] == 'update':
for k, val in change['updateDescription']['updatedFields'].items():
ks = k.split('.')
if len(ks) == 1:
df.loc[key, k] = val
elif len(ks) == 2:
# TODO: Figure out a more reliable way of doing this
if ks[1].isdigit():
# Assume an array here... Will see if we can get away with this
df.loc[key,ks[0]][int(ks[1])] = val
else:
df.loc[key,ks[0]][ks[1]] = val
else:
raise NotImplementedError('Cannot use a dept of more then 2 in the documents')
elif change['operationType'] == 'insert':
doc = change['fullDocument']
_id = doc.pop('_id')
s = pd.Series(doc, name=_id)
df = df.append(s)
else:
raise NotImplementedError('Cannot modify df with operationType: {}'.format(change['operationType']))
return df, df.loc[key]
class Mongo_Listenner(QtCore.QThread):
"""Qt Thread which monitors for changes to qither a collection or a database and emits a signal when something happens"""
updated = QtCore.pyqtSignal(object)
def __init__(self, db_name, col_name=None, mongodb_addr=None):
super().__init__()
self.db_name = db_name
self.col_name = col_name
self.mongodb_addr = mongodb_addr
self.exit_flag = False
def run(self):
self.exit_flag = False
# Connect
client = get_mongo_client(self.mongodb_addr)
mongo_obj = client[self.db_name]
if not self.col_name is None:
mongo_obj = mongo_obj[self.col_name]
with mongo_obj.watch() as stream:
while stream.alive:
doc = stream.try_next()
if doc is not None:
self.updated.emit(doc)
if self.exit_flag:
return
if not self.exit_flag:
self.run() #This takes care of the invalidate event which stops the change_stream cursor
class Synched_Mongo_Collection(QtCore.QObject):
updated_row = QtCore.pyqtSignal(object) # Emit the updated row
# mutex = QtCore.QMutex()
def __init__(self, db_name, col_name, mongodb_addr=None):
super().__init__()
self.watcher = Mongo_Listenner(db_name, col_name=col_name, mongodb_addr=mongodb_addr)
self.col = get_mongo_client(mongodb_addr)[db_name][col_name]
self.refresh_all()
self.watcher.start()
self.watcher.updated.connect(self._update_df)
def refresh_all(self):
col = list(self.col.find())
if col == []:
self.df = None
else:
self.df = pd.DataFrame(col)
self.df.set_index('_id', inplace=True)
def get_df(self):
# self.mutex.lock()
return self.df
@QtCore.pyqtSlot(object)
def _update_df(self, change):
if self.db is None:
self.refresh_all()
# print(change)
try:
self.df, row = modify_df(self.df, change)
self.updated_row.emit(row)
except:
traceback.print_exc()
print('Refreshing the entire collection')
self.refresh_all()
# self.refresh_all() #I will make this a little more efficient later on
def __del__(self):
self.watcher.exit_flag = True
class Synched_Mongo_Database(QtCore.QObject):
updated_row = QtCore.pyqtSignal(object, object) # Emit the updated row in the format (col_name, row)
col_added = QtCore.pyqtSignal(object) # Emit the name of the collection which was added
col_dropped = QtCore.pyqtSignal(object) # Emit the name of the collection which was dropped
db_dropped = QtCore.pyqtSignal() #Emitted when the database is dropped
def __init__(self, db_name, mongodb_addr=None):
super().__init__()
self.watcher = Mongo_Listenner(db_name, col_name=None, mongodb_addr=mongodb_addr)
self.db = get_mongo_client(mongodb_addr)[db_name]
self.refresh_all()
self.watcher.start()
self.watcher.updated.connect(self._update)
def refresh_all(self):
self.dfs = dict()
for col in self.db.list_collection_names():
col_data = list(self.db[col].find())
if not col_data == []:
self.dfs[col] = pd.DataFrame(col_data)
self.dfs[col].set_index('_id', inplace=True)
def get_df(self, col_name, timeout=0.1):
try:
if not self.dfs[col_name] is None:
return self.dfs[col_name]
finally:
t = time.time()
while time.time()-t<timeout:
if col_name in self.dfs:
return self.dfs[col_name]
@QtCore.pyqtSlot(object)
def _update(self, change):
# print(change)
try:
if change['operationType'] == 'dropDatabase':
self.dfs = dict()
self.db_dropped.emit()
return
elif change['operationType'] == 'invalidate':
return
col = change['ns']['coll']
if col in self.dfs:
df, row = modify_df(self.dfs[col], change)
if isinstance(df, DropEvent):
self.dfs.pop(col)
self.col_dropped.emit(col)
return
self.dfs[col] = df
self.updated_row.emit(col, row)
else:
doc = change['fullDocument']
row = pd.Series(doc)
self.dfs[col] = | pd.DataFrame([row]) | pandas.DataFrame |
from __future__ import absolute_import, division, print_function
import utool
import pandas as pd
import numpy as np
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[pdh]')
from ibeis.model.hots.hstypes import VEC_DIM, INTEGER_TYPE
class LazyGetter(object):
def __init__(self, getter_func):
self.getter_func = getter_func
def __getitem__(self, index):
return self.getter_func(index)
def __call__(self, index):
return self.getter_func(index)
#def lazy_getter(getter_func):
# def lazy_closure(*args):
# return getter_func(*args)
# return lazy_closure
class DataFrameProxy(object):
"""
pandas is actually really slow. This class emulates it so
I don't have to change my function calls, but without all the slowness.
"""
def __init__(self, ibs):
self.ibs = ibs
def __getitem__(self, key):
if key == 'kpts':
return LazyGetter(self.ibs.get_annot_kpts)
elif key == 'vecs':
return LazyGetter(self.ibs.get_annot_desc)
elif key == 'labels':
return LazyGetter(self.ibs.get_annot_class_labels)
@profile
def Int32Index(data, dtype=np.int32, copy=True, name=None):
return | pd.Index(data, dtype=dtype, copy=copy, name=name) | pandas.Index |
import pandas as pd
import numpy as np
from pyquantfinance.metrics import drawdown, max_drawdown, skewness, kurtosis, is_normal
def test_drawdown():
# test 100% percent returns
drawdown_test1 = drawdown([1, 1, 1, 1, 1])
assert drawdown_test1["Wealth"].equals(pd.Series([2, 4, 8, 16, 32]))
assert drawdown_test1["Previous Peak"].equals(pd.Series([2, 4, 8, 16, 32]))
assert drawdown_test1["Drawdown"].equals(
pd.Series([0.0, 0.0, 0.0, 0.0, 0.0]))
# test all zero returns
drawdown_test2 = drawdown([0, 0, 0, 0, 0])
assert drawdown_test2["Wealth"].equals( | pd.Series([1, 1, 1, 1, 1]) | pandas.Series |
# -*- coding: utf-8 -*-
"""NWS website scraping for river guage observations and forecasts.
This module will create local csv database of readings and forecasts.
Also the raw_data from scraping the NWS is saved to a text file for later examination.
Two datetimes are recorded. ScrapeTime and ForecastTime/ObservationTime.
Data is tuple of level and flowrate. (currently no flow data is published)
This program should be run daily (by cron for example).
A seperate program runs to analyze data and tweet when there is info to share.
If tweeting reports rising water then additional runs of scraping routine can be triggered.
"""
# import standard library modules
import os
from time import sleep
# import custom modules
from pathlib import Path
from bs4 import BeautifulSoup, Comment
import datetime as dt
from numpy import datetime64
import pytz
from tqdm import tqdm
from dateparser.search import search_dates
from loguru import logger
# this section imports code from the pypi repository (CFSIV-utils-Conradical) of my own utilities.
import cfsiv_utils.WebScraping as ws
import cfsiv_utils.filehandling as fh
import cfsiv_utils.time_strings as ts
import cfsiv_utils.log_handling as lh
RUNTIME_NAME = Path(__file__)
# These are the USGS identification numbers for river monitoring guages on the OHIO River
RIVER_GUAGE_IDS = [
141893,
143063,
144287,
142160,
145137,
143614,
141268,
144395,
143843,
142481,
143607,
145086,
142497,
151795,
152657,
141266,
145247,
143025,
142896,
144670,
145264,
144035,
143875,
143847,
142264,
152144,
143602,
144126,
146318,
141608,
144451,
144523,
144877,
151578,
142935,
142195,
146116,
143151,
142437,
142855,
142537,
142598,
152963,
143203,
143868,
144676,
143954,
143995,
143371,
153521,
153530,
143683,
]
USGS_WEBSITE_HEAD_URL = 'https://water.weather.gov//ahps2/river.php?wfo=lmk&wfoid=18699&riverid=204624&pt[]='
USGS_WEBSITE_TAIL_URL = '&allpoints=150960&data[]=obs&data[]=xml'
# build urls...
USGS_URLS = []
for site in RIVER_GUAGE_IDS:
USGS_URLS.append(f'{USGS_WEBSITE_HEAD_URL}{site}{USGS_WEBSITE_TAIL_URL}')
# TODO need guage location and relative elevation data dictionary
# TODO visualize data from guages to illustrate how a 'hump' of water moves down the river.
# possibly by graphing the guages as a flat surface and the water elevation above that imagined flat river.
# TODO predict the time of arrival of the 'hump' at various points. (machine learning?)
OUTPUT_ROOT = "CSV_DATA/"
@logger.catch
def extract_date(text_list):
date_list = ts.extract_date(text_list)
for date in date_list:
if date != None:
logger.debug(f'{date } Date found in {text_list}')
return date
logger.debug(f'No parseable date found in: {text_list}')
logger.warning('No parseable date found.')
return ts.UTC_NOW()
@logger.catch
def pull_details(soup):
"""return specific parts of the scrape.
Args:
soup (bs4.BeautifulSoup): NWS guage scrape
"""
guage_id = soup.h1["id"]
guage_name_string = soup.h1.string
# find the comments.
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
# convert the findAll.ResultSet into a plain list.
c_list = [c for c in comments]
# Search the comments for a date of this scrape.
# (the NWS webscrape contains exactly 1 date/timestamp of the scrape found inside of a comment).
scrape_date = extract_date(c_list)
logger.info(f"Scrape date: {scrape_date}")
# find all of the observation and forecast data
nws_class = soup.find(class_="obs_fores")
nws_obsfores_contents = nws_class.contents
return (nws_obsfores_contents, guage_id, guage_name_string, scrape_date)
@logger.catch
def get_NWS_web_data(site, cache=False):
"""Return a BeautifulSoup (BS4) object from the Nation Weater Service (NWS)
along with the ID# and TEXT describing the guage data.
If CACHE then place the cleaned HTML into local storage for later processing by other code.
"""
clean_soup = ws.retrieve_cleaned_html(site, cache)
return pull_details(clean_soup)
@logger.catch
def FixDate(s, scrape_date, time_zone="UTC"):
"""Split date from time timestamp provided by NWS and add timezone label as well as correct year.
Unfortunately, NWS chose not to include the year in their observation/forecast data.
This will be problematic when forecast dates are into the next year.
If Observation dates are in December, Forecast dates must be checked and fixed for roll over into next year.
# NOTE: forecast dates will appear to be in the past as compared to the scrapping date if they are actually supposed to be next year.
"""
# TODO make more robust string spliting
date_string, time_string = s.split()
hours, minutes = time_string.split(":")
# build a datetime time object
timestamp = dt.time(int(hours), int(minutes))
# recover the month and day
month_digits, day_digits = date_string.split("/")
if len(month_digits) + len(day_digits) != 4:
raise AssertionError("Month or Day string not correctly extracted.")
# use the available information to determine what year the webscrape data belongs to.
corrected_year = ts.apply_logical_year_value_to_monthday_pair(date_string, scrape_date)
# now place the timestamp back into the date object.
corrected_datetime = dt.datetime.combine(corrected_year, timestamp)
return corrected_datetime.replace(tzinfo=pytz.UTC)
@logger.catch
def sort_and_label_data(web_data, guage_id, guage_string, scrape_date):
readings = []
labels = ["datetime", "level", "flow"]
for i, item in enumerate(web_data):
if i >= 1: # zeroth item is an empty list. skip it.
# locate the name of this section (observed / forecast)
section = item.find(class_="data_name").contents[0]
sect_name = section.split()[0]
# Initialize a new dictionary.
row_dict = {"guage": guage_id, "type": sect_name}
# extract all readings from this section
section_data_list = item.find_all(class_="names_infos")
# organize the readings and add details
for i, data in enumerate(section_data_list):
element = data.contents[0]
pointer = i % 3 # each reading contains 3 unique data points
if pointer == 0: # this is the element for date/time
# This element needs modification
date = FixDate(element, scrape_date)
element = ts.timefstring(date)
# Add the element to the dictionary
row_dict[labels[pointer]] = element
if pointer == 2: # end of this reading
readings.append(row_dict) # add to the compilation
# reset the dict for next reading
row_dict = {"guage": guage_id, "type": sect_name}
return readings
@logger.catch
def Main():
# for point in POINTS_OF_INTEREST:
for point in USGS_URLS:
logger.debug(f'Scraping point: {point}')
time_now_string = ts.UTC_NOW_STRING()
raw_data, guage_id, friendly_name, scrape_date = get_NWS_web_data(point, cache=True)
# TODO verify webscraping success
# DONE, store raw_data for ability to work on dates problem over the newyear transition.
# It will be helpfull to have 12/28 to January 4 scrapes for repeated test processing.
# NOTE: cache=True above is used to make a local copy in the CWD of the original HTML scrape.
data_list = sort_and_label_data(raw_data, guage_id, friendly_name, scrape_date)
# TODO verify successful conversion of data
for item in tqdm(data_list, desc=friendly_name):
logger.debug(item)
date, time = time_now_string.split("_") # split date from time
yy, mm, dd = date.split("-")
OP = f"{yy}/{mm}/{dd}/"
OD = f"{OUTPUT_ROOT}{OP}"
FN = f"{time_now_string}"
fh.write_csv([item], filename=FN, directory=OD)
sleep(1) # guarantee next point of interest gets a new timestamp.
# some scrapes process in under 1 second and result in data collision.
logger.info(time_now_string)
return True
@logger.catch
def display_cached_data(number_of_scrapes):
"""process html collected previously and output to console.
Args:
number_of_scrapes (int) : number of scrapes to process from newest towards oldest
"""
logger.debug(f'Reviewing {number_of_scrapes} previous webscrapes.')
root = Path(Path.cwd(), "raw_web_scrapes")
files = list(root.glob("*.rawhtml")) # returns files ending with '.rawhtml'
# sort the list oldest to newest
files.sort(key=lambda fn: fn.stat().st_mtime, reverse=True)
# recover the scrapes
sample = []
for i in range(number_of_scrapes):
sample.append(files[i])
logger.debug(f'Loaded {len(sample)} scrapes.')
data_sample = []
for fl in sample:
data_list = []
with open(fl, "r") as txtfile:
raw_html = txtfile.read()
soup = BeautifulSoup(raw_html, "html.parser")
raw_data, guage_id, friendly_name, scrape_date = pull_details(soup)
data_list = sort_and_label_data(raw_data, guage_id, friendly_name, scrape_date)
data_list = data_list[::-1]
for i in range(9):
data_sample.append(data_list[i])
logger.debug(f'Processed {len(data_sample)} samples.')
for point in data_sample[::-1]:
logger.debug(f'Examining: {point}')
datestamp = point["datetime"]
if type(datestamp) == str:
full_date = datestamp[:10]
else:
full_date = datestamp.strftime("%Y/%m/%d")
_dummy = ts.apply_logical_year_value_to_monthday_pair(full_date, scrape_date)
logger.info(f"Correct observation date: {_dummy}, original full date: {full_date}")
return
import pandas as pd
@logger.catch
def display_cached_forecast_data(number_of_scrape_data_events):
logger.debug(f'Reviewing {number_of_scrape_data_events} previous webscrapes.')
files = fh.get_files(Path(OUTPUT_ROOT), pattern='*') # My files dont all have .csv extensions for some dumb reason
# sort the list oldest to newest
files.sort(key=lambda fn: fn.stat().st_mtime, reverse=True)
# recover the scrapes
logger.debug(f'Loaded {len(files)} scrapes.')
data_sample = []
# logger.debug(files)
for fl in files:
if fl.is_file():
df = pd.read_csv(fl)
data_sample.append(df)
# logger.debug(data_sample)
# extract only forecast data
forecasts_data = []
for sample_df in data_sample:
forecasts = sample_df[sample_df.type == 'Forecast']
if len(forecasts.index) > 0:
# sort forecasts by highest level to lowest
sorted_df = forecasts.sort_values(by=['level'], ascending=False)
sorted_df.reset_index(drop=True, inplace=True)
# logger.debug(f'\n{sorted_df}')
forecasts_data.append(sorted_df)
# logger.debug(forecasts_data)
# display only highest level and date
highest_forecasts = []
for sample_df in forecasts_data:
highest = sample_df[:1]
logger.debug(f'\n{highest[:1]}')
highest_forecasts.append(highest)
for itm in highest_forecasts:
logger.info(f'\n{itm}')
pass
return
# from datetime import datetime
@logger.catch
def display_cached_forecast_data2(number_of_scrape_data_events):
logger.debug(f'Reviewing {number_of_scrape_data_events} previous data gathered.')
files = fh.get_files(Path(OUTPUT_ROOT), pattern='*') # My files dont all have .csv extensions for some dumb reason
# remove the NOT files entries
files = [file for file in files if file.is_file()]
# sort the list oldest to newest
files.sort(key=lambda fn: fn.stat().st_mtime, reverse=True)
# recover the scrapes
logger.debug(f'Loaded {len(files)} scrapes.')
# logger.debug(files)
scrapes = files[:number_of_scrape_data_events]
logger.debug(f'filtered {len(scrapes)} scrapes.')
data_sample = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
import requests
import pandas as pd
def lcls_archiver_restore(pvlist, isotime='2018-08-11T10:40:00.000-07:00', verbose=True):
"""
Returns a dict of {'pvname':val} given a list of pvnames, at a time in ISO 8601 format, using the EPICS Archiver Appliance:
https://slacmshankar.github.io/epicsarchiver_docs/userguide.html
"""
url="http://lcls-archapp.slac.stanford.edu/retrieval/data/getDataAtTime?at="+isotime+"&includeProxies=true"
headers = {'Content-Type':'application/json'}
if verbose:
print('Requesting:', url)
data = pvlist
r = requests.post(url, headers=headers, json=data)
res = r.json()
d = {}
for k in pvlist:
if k not in res:
if verbose:
print('Warning: Missing PV:', k)
else:
d[k] = res[k]['val']
return d
def lcls_archiver_history(pvname, start='2018-08-11T10:40:00.000-07:00', end='2018-08-11T11:40:00.000-07:00', verbose=True):
"""
Get time series data from a PV name pvname,
with start and end times in ISO 8601 format, using the EPICS Archiver Appliance:
https://slacmshankar.github.io/epicsarchiver_docs/userguide.html
Returns tuple:
secs, vals
where secs is the UNIX timestamp, seconds since January 1, 1970, and vals are the values at those times.
Seconds can be converted to a datetime object using:
import datetime
datetime.datetime.utcfromtimestamp(secs[0])
"""
url="http://lcls-archapp.slac.stanford.edu/retrieval/data/getData.json?"
url += "pv="+pvname
url += "&from="+start
url += "&to="+end
#url += "&donotchunk"
#url="http://lcls-archapp.slac.stanford.edu/retrieval/data/getData.json?pv=VPIO:IN20:111:VRAW&donotchunk"
print(url)
r = requests.get(url)
data = r.json()
secs = [x['secs'] for x in data[0]['data']]
vals = [x['val'] for x in data[0]['data']]
return secs, vals
def lcls_archiver_history_dataframe(pvname, **kwargs):
"""
Same as lcls_archiver_history, but returns a dataframe with the index as the time.
"""
secs, vals = lcls_archiver_history(pvname, **kwargs)
# Get time series
ser = pd.to_datetime(pd.Series(secs), unit='s' )
df = | pd.DataFrame({'time':ser, pvname:vals}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Requests all forecasts (danger levels and problems) from the forecast api and writes to .csv file or plot."""
import datetime as dt
from varsomdata import getforecastapi as gf
from varsomdata import varsomclasses as vc
from varsomdata import getvarsompickles as gvp
from varsomdata import getmisc as gm
import logging as lg
import setenvironment as se
import pandas
__author__ = 'kmunve'
def test_AvalancheDanger_to_dict():
region_ids = [3022] # Trollheimen
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2018, 12, 5)
warnings_ = gf.get_avalanche_warnings_deprecated(region_ids, from_date, to_date, lang_key=1)
_d = warnings_[0].to_dict()
k = 'm'
def test_AvalancheDanger_as_df():
"""
Put class data into a pandas.DataFrame
:return:
"""
region_ids = [3022] # Trollheimen
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2018, 12, 6)
warnings_ = gf.get_avalanche_warnings_deprecated(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame.from_dict(warnings_)
df.to_csv(r'../localstorage/aval_danger.csv', header=True)
k = 'm'
def test_MountainWeather_class():
"""
Requires "forecast_api_version" : "v4.0.1" in /config/api.json
"""
region_ids = [3022] # Trollheimen
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2018, 12, 4)
warnings_as_json = gf.get_avalanche_warnings_as_json(region_ids, from_date, to_date, lang_key=1)
warnings_ = gf.get_avalanche_warnings_deprecated(region_ids, from_date, to_date, lang_key=1)
w = warnings_as_json[0]
mw = gf.MountainWeather()
mw.from_dict(w['MountainWeather'])
k = 'm'
def test_AvalancheWarning_class():
"""
Requires "forecast_api_version" : "v4.0.1" in /config/api.json
"""
region_ids = [3003]
from_date = dt.date(2018, 12, 3)
to_date = dt.date(2018, 12, 7)
warnings_as_json = gf.get_avalanche_warnings_as_json(region_ids, from_date, to_date, lang_key=1)
warnings_ = []
for w in warnings_as_json:
_aw = gf.AvalancheWarning()
_aw.from_dict(w)
warnings_.append(_aw)
print(warnings_[0])
k = 'm'
def test_get_avalanche_warnings():
region_ids = [3003]
from_date = dt.date(2018, 12, 3)
to_date = dt.date(2018, 12, 7)
aw = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=False)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame(aw_dict)
df.to_csv('../localstorage/test_aw_dict.csv', index_label='index')
k = 'm'
def get_season_17_18():
region_ids = [3003,3007,3009,3010,3011,3012,3013,3014,3015,3016,3017,3022,3023,3024,3027,3028,3029,3031,3032,3034,3035]
from_date = dt.date(2017, 12, 1)
to_date = dt.date(2018, 5, 31)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame(aw_dict)
df.to_csv('../localstorage/norwegian_avalanche_warnings_season_17_18.csv', index_label='index')
def get_season_18_19():
region_ids = [3003,3007,3009,3010,3011,3012,3013,3014,3015,3016,3017,3022,3023,3024,3027,3028,3029,3031,3032,3034,3035]
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2019, 3, 11)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame(aw_dict)
df.to_csv('../localstorage/norwegian_avalanche_warnings_season_18_19.csv', index_label='index')
def get_svalbard_regional_forecasts():
region_ids = [3001, 3002, 3003, 3004]
from_date = dt.date(2016, 12, 1)
to_date = dt.date(2019, 4, 30)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame(aw_dict)
df.to_csv('../localstorage/svalbard_forecasts.csv', index_label='index')
def get_svalbard_regional_forecasts_2015():
region_ids = [130]
from_date = dt.date(2014, 12, 1)
to_date = dt.date(2015, 5, 31)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = | pandas.DataFrame(aw_dict) | pandas.DataFrame |
import inspect
import os
import datetime
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal, assert_frame_equal
from numpy.testing import assert_allclose
from pvlib import tmy
from pvlib import pvsystem
from pvlib import clearsky
from pvlib import irradiance
from pvlib import atmosphere
from pvlib import solarposition
from pvlib.location import Location
from conftest import needs_numpy_1_10, requires_scipy
latitude = 32.2
longitude = -111
tus = Location(latitude, longitude, 'US/Arizona', 700, 'Tucson')
times = pd.date_range(start=datetime.datetime(2014,1,1),
end=datetime.datetime(2014,1,2), freq='1Min')
ephem_data = solarposition.get_solarposition(times,
latitude=latitude,
longitude=longitude,
method='nrel_numpy')
am = atmosphere.relativeairmass(ephem_data.apparent_zenith)
irrad_data = clearsky.ineichen(ephem_data['apparent_zenith'], am,
linke_turbidity=3)
aoi = irradiance.aoi(0, 0, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
meta = {'latitude': 37.8,
'longitude': -122.3,
'altitude': 10,
'Name': 'Oakland',
'State': 'CA',
'TZ': -8}
pvlib_abspath = os.path.dirname(os.path.abspath(inspect.getfile(tmy)))
tmy3_testfile = os.path.join(pvlib_abspath, 'data', '703165TY.csv')
tmy2_testfile = os.path.join(pvlib_abspath, 'data', '12839.tm2')
tmy3_data, tmy3_metadata = tmy.readtmy3(tmy3_testfile)
tmy2_data, tmy2_metadata = tmy.readtmy2(tmy2_testfile)
def test_systemdef_tmy3():
expected = {'tz': -9.0,
'albedo': 0.1,
'altitude': 7.0,
'latitude': 55.317,
'longitude': -160.517,
'name': '"SAND POINT"',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy3_metadata, 0, 0, .1, 5, 5)
def test_systemdef_tmy2():
expected = {'tz': -5,
'albedo': 0.1,
'altitude': 2.0,
'latitude': 25.8,
'longitude': -80.26666666666667,
'name': 'MIAMI',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy2_metadata, 0, 0, .1, 5, 5)
def test_systemdef_dict():
expected = {'tz': -8, ## Note that TZ is float, but Location sets tz as string
'albedo': 0.1,
'altitude': 10,
'latitude': 37.8,
'longitude': -122.3,
'name': 'Oakland',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 5}
assert expected == pvsystem.systemdef(meta, 5, 0, .1, 5, 5)
@needs_numpy_1_10
def test_ashraeiam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.ashraeiam(thetas, .05)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_ashraeiam():
module_parameters = pd.Series({'b': 0.05})
system = pvsystem.PVSystem(module_parameters=module_parameters)
thetas = np.linspace(-90, 90, 9)
iam = system.ashraeiam(thetas)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_physicaliam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.physicaliam(thetas, 1.526, 0.002, 4)
expected = np.array([ nan, 0.8893998 , 0.98797788, 0.99926198, nan,
0.99926198, 0.98797788, 0.8893998 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_physicaliam():
module_parameters = | pd.Series({'K': 4, 'L': 0.002, 'n': 1.526}) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 10:31:31 2021
@author: Administrator
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 22 11:25:22 2021
@author: Administrator
"""
import h5py
# from pyram.PyRAM import PyRAM
from scipy import interpolate
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import glob
import os
import sys
os.chdir(r'D:\passive_acoustics\propagation_modelling')
import gsw
from netCDF4 import Dataset
import pandas as pd
import cartopy
import cartopy.crs as ccrs
from scipy.ndimage import gaussian_filter
import arlpy.uwapm as pm
modelfrec=500
# load data and slice out region of interest
# read mapdata
latlim=[-62,-56]
lonlim=[-(46+5),-(46-5)]
spacer=1
gebcofile=r"C:\Users\a5278\Documents\gebco_2020_netcdf\GEBCO_2020.nc"
gebco = Dataset(gebcofile, mode='r')
g_lons = gebco.variables['lon'][:]
g_lon_inds = np.where((g_lons>=lonlim[0]) & (g_lons<=lonlim[1]))[0]
# jump over entries to reduce data
g_lon_inds=g_lon_inds[::spacer]
g_lons = g_lons[g_lon_inds]
g_lats = gebco.variables['lat'][:]
g_lat_inds = np.where((g_lats>=latlim[0]) & (g_lats<=latlim[1]))[0]
# jump over entries to reduce data
g_lat_inds=g_lat_inds[::spacer]
g_lats = g_lats[g_lat_inds]
d = gebco.variables['elevation'][g_lat_inds, g_lon_inds]
gebco.close()
#%% get bathymetry slices
import pyresample
lo,la=np.meshgrid(g_lons, g_lats)
grid = pyresample.geometry.GridDefinition(lats=la, lons=lo)
m_loc=[-( 45+57.548/60) , -(60+24.297/60)]
from pyproj import Geod
geod = Geod("+ellps=WGS84")
bearings=np.arange(360)
bathy_dict={}
points_lat=pd.DataFrame()
points_lon=pd.DataFrame()
for b in bearings:
print(b)
points = geod.fwd_intermediate(lon1=m_loc[0],lat1=m_loc[1],azi1=b,npts=500,del_s=1000 )
p_lon=points[3]
p_lat=points[4]
points_lat=pd.concat( [points_lat,pd.DataFrame(p_lat)],ignore_index=True,axis=1 )
points_lon=pd.concat( [points_lon,pd.DataFrame(p_lon)],ignore_index=True,axis=1 )
swath = pyresample.geometry.SwathDefinition(lons=p_lon, lats=p_lat)
# Determine nearest (w.r.t. great circle distance) neighbour in the grid.
_, _, index_array, distance_array = pyresample.kd_tree.get_neighbour_info(
source_geo_def=grid, target_geo_def=swath, radius_of_influence=500000,
neighbours=1)
# get_neighbour_info() returns indices in the flattened lat/lon grid. Compute
# the 2D grid indices:
index_array_2d = np.unravel_index(index_array, grid.shape)
value = d[index_array_2d[0],index_array_2d[1]]
dvec=np.arange(0,1000*500,1000)
bb=np.transpose(np.array([dvec,-value.data]))
bathy_dict[b]= bb.tolist()
timevec = pd.Series( pd.date_range(start=pd.Timestamp('2016-01-01'),end=pd.Timestamp('2017-01-01'),freq='M') )
#%%
datestr=timevec[7]
tl_mat_dict={}
tl_map_dict={}
for datestr in timevec:
ncfile=r"D:\copernicus_data\\" + datestr.strftime('%Y-%m-%d') + r"_ocean_reanalysis.nc"
nc = Dataset(ncfile)
la,lo=np.meshgrid(nc['latitude'][:].data, nc['longitude'][:].data)
grid = pyresample.geometry.GridDefinition(lats=la, lons=lo)
m_loc=[-( 45+57.548/60) , -(60+24.297/60)]
geod = Geod("+ellps=WGS84")
bearings=np.arange(360)
z_ss_dict={}
rp_ss_dict={}
cw_dict={}
points_lat=pd.DataFrame()
points_lon=pd.DataFrame()
tl_mat_ray=pd.DataFrame()
lat_mat_ray=pd.DataFrame()
lon_mat_ray=pd.DataFrame()
for b in bearings:
print(b)
points = geod.fwd_intermediate(lon1=m_loc[0],lat1=m_loc[1],azi1=b,npts=500,del_s=1000 )
p_lon=points[3]
p_lat=points[4]
points_lat=pd.concat( [points_lat,pd.DataFrame(p_lat)],ignore_index=True,axis=1 )
points_lon=pd.concat( [points_lon,pd.DataFrame(p_lon)],ignore_index=True,axis=1 )
swath = pyresample.geometry.SwathDefinition(lons=p_lon, lats=p_lat)
# Determine nearest (w.r.t. great circle distance) neighbour in the grid.
_, _, index_array, distance_array = pyresample.kd_tree.get_neighbour_info(
source_geo_def=grid, target_geo_def=swath, radius_of_influence=500000,
neighbours=1)
# get_neighbour_info() returns indices in the flattened lat/lon grid. Compute
# the 2D grid indices:
index_array_2d = np.unravel_index(index_array, grid.shape)
temp = nc['thetao'][:][0,:,index_array_2d[1],index_array_2d[0]]
sal = nc['so'][:][0,:,index_array_2d[1],index_array_2d[0] ]
depth=nc['depth'][:]
depth_mat=np.tile( depth, [sal.shape[0],1] )
# depth.shape
sound_speed = gsw.sound_speed(sal,temp,depth_mat)
sound_speed = pd.DataFrame( sound_speed.data )
sound_speed=sound_speed.fillna(axis=1,method='ffill')
# fig=plt.figure(num=6)
# plt.clf()
# plt.imshow(np.transpose(sound_speed.values[:,:]),aspect='auto')
# plt.pcolormesh(dvec,-depth,np.transpose(sound_speed.values))
# plt.boxplot((sound_speed.values))
dvec=np.arange(0,1000*500,1000)
# ssp2 = sound_speed.astype('int')
sspdic={}
i=0
dd=dvec.copy()
dd[-1]=dvec[-1]*10
for rang in dd:
sspdic[rang]= sound_speed.iloc[i,:].values
i=i+1
ssp2=pd.DataFrame(sspdic)
depth=nc['depth'][:]
dd=np.array(bathy_dict[b])[:,1].max()
ixx=depth<dd
ssp3=ssp2.iloc[ixx,:]
dssp=depth.data.astype('int').copy()[ixx]
dssp[0]=0
dssp[-1]=dd
ssp3.index=dssp
# ssp2 = pd.DataFrame({
# 0: [1540, 1530, 1532, 1533], # profile at 0 m range
# 100: [1540, 1535, 1530, 1533], # profile at 100 m range
# 200: [1530, 1520, 1522, 1525] }, # profile at 200 m range
# index=[0, 10, 20, 30]) # depths of the profile entries in m
env = pm.create_env2d(
depth= bathy_dict[b],
soundspeed=ssp3,
bottom_soundspeed=1450,
bottom_density=1200,
bottom_absorption=1.0,
tx_depth=200,
frequency=modelfrec,
min_angle = -45,
max_angle= 45)
ddarr=np.array(bathy_dict[b])
env['rx_range'] = ddarr[:,0]
# env['rx_range'] = np.linspace(0, 1000*299, 1000)
env['rx_depth'] = 15
# tloss = pm.compute_transmission_loss(env,mode='incoherent',debug=True)
tloss = pm.compute_transmission_loss(env,mode='incoherent')
tloss_dB= 20*np.log10( tloss.abs() )
lats=points_lat.iloc[:,b]
lons=points_lon.iloc[:,b]
lat_mat_ray= | pd.concat( [lat_mat_ray,lats],axis=1,ignore_index=True ) | pandas.concat |
import pandas as pd
def create_txt(ratio=0.9):
df = pd.read_csv('train.csv') # 读取数据
val = pd.DataFrame() # 划分出的test集合
train = | pd.DataFrame() | pandas.DataFrame |
import hashlib
import json
import logging
import os
import typing
import uuid
from collections import Counter, defaultdict
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
import numpy
import numpy as np
import pandas as pd
def load_json(fname):
if os.path.isfile(fname):
with open(fname) as f:
return json.loads(f.read())
else:
return None
def save_json(fname, data):
assert fname.endswith(".json")
with open(fname, "w") as outfile:
json.dump(data, outfile)
def load_jsonl(jsonl_fname, to_df=True):
if os.path.isfile(jsonl_fname):
data = []
with open(jsonl_fname) as f:
for line in f:
data.append(json.loads(line))
if to_df:
data = pd.DataFrame(data)
return data
else:
return None
def save_jsonl(fname, data):
assert fname.endswith(".jsonl")
with open(fname, "w") as outfile:
for entry in data:
json.dump(entry, outfile)
outfile.write("\n")
def mkf(*file_path):
"""Return file path, make sure the parent dir exists"""
file_path = [str(x) for x in file_path]
d = os.path.join(*file_path[:-1])
os.makedirs(d, exist_ok=True)
f = os.path.join(*file_path)
return f
def stem(fname, include_suffix=False):
"""/blah/my_file.json.gz --> my_file"""
path = Path(fname)
stem = path.stem
# If a filename has multiple suffixes, take them all off.
stem = stem[: stem.index(".")] if "." in stem else stem
if include_suffix:
stem = stem + "".join(path.suffixes)
return stem
def generate_md5_hash(data: str):
"""Generate a md5 hash of the input str
:param data: the input str
:return: the md5 hash string
"""
return hashlib.md5(data.encode()).hexdigest()
class PrettyDefaultDict(defaultdict):
"""An wrapper around defaultdict so the print out looks like
a normal dict."""
__repr__ = dict.__repr__
def gen_uuid():
return str(uuid.uuid4())
def file_len(fname):
try:
i = -1
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
except Exception as e:
logging.error(e)
return -1
def safe_getattr(object, attr):
try:
return getattr(object, attr)
except AttributeError:
return None
def list_to_textarea(ls: List[str]):
"""Converts a list of strings to a string joined by newlines.
This is meant to be used when rendering into a textarea.
"""
return "\n".join(ls)
def textarea_to_list(text: str):
"""Converts a textarea into a list of strings, assuming each line is an
item. This is meant to be the inverse of `list_to_textarea`.
"""
res = [x.strip() for x in text.split("\n")]
res = [x for x in res if len(x) > 0]
return res
def json_lookup(json_data, key):
"""
Give a key "a.b.c", look up json_data['a']['b']['c']
Returns None if any of the keys were not found.
"""
sofar = json_data
for k in key.split("."):
try:
sofar = sofar[k]
except:
return None
return sofar
def build_counter(annos: List[Optional[int]]):
"""
Input is a list of annotation values \in {-1, 0, 1, nan}.
We ignore 0 and nan, and return a Counter of {-1, 1}.
"""
# Ignore all the elements that are 0 or nan.
annos = [x for x in annos if x != 0 and not | pd.isna(x) | pandas.isna |
import logging
from operator import itemgetter
from logging.config import dictConfig
from datetime import datetime, timedelta, date
from math import ceil
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.express as px
import pandas as pd
from chinese_calendar import get_holidays
import plotly.graph_objects as go
import numpy as np
from keysersoze.models import (
Deal,
Asset,
AssetMarketHistory,
)
from keysersoze.utils import (
get_accounts_history,
get_accounts_summary,
)
from keysersoze.apps.app import APP
from keysersoze.apps.utils import make_card_component
LOGGER = logging.getLogger(__name__)
dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(filename)s:%(lineno)s: %(message)s',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
"stream": "ext://sys.stdout",
},
},
'loggers': {
'__main__': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'keysersoze': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
})
pd.options.mode.chained_assignment = 'raise'
COLUMN_MAPPINGS = {
'code': '代码',
'name': '名称',
'ratio': '占比',
'return_rate': '收益率',
'cost': '投入',
'avg_cost': '成本',
'price': '价格',
'price_date': '价格日期',
'amount': '份额',
'money': '金额',
'return': '收益',
'action': '操作',
'account': '账户',
'date': '日期',
'time': '时间',
'fee': '费用',
'position': '仓位',
'day_return': '日收益',
}
FORMATS = {
'价格日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'时间': {'type': 'datetime', 'format': Format(nully='N/A')},
'占比': {'type': 'numeric', 'format': Format(scheme='%', precision=2)},
'收益率': {'type': 'numeric', 'format': Format(nully='N/A', scheme='%', precision=2)},
'份额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'金额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'费用': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'投入': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'成本': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'价格': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'收益': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
}
ACCOUNT_PRIORITIES = {
'长期投资': 0,
'长赢定投': 1,
'U定投': 2,
'投资实证': 3,
'稳健投资': 4,
'证券账户': 6,
'蛋卷基金': 7,
}
all_accounts = [deal.account for deal in Deal.select(Deal.account).distinct()]
all_accounts.sort(key=lambda name: ACCOUNT_PRIORITIES.get(name, 1000))
layout = html.Div(
[
dcc.Store(id='assets'),
dcc.Store(id='stats'),
dcc.Store(id='accounts_history'),
dcc.Store(id='index_history'),
dcc.Store(id='deals'),
dcc.Store(id='start-date'),
dcc.Store(id='end-date'),
html.H3('投资账户概览'),
dbc.Checklist(
id='show-money',
options=[{'label': '显示金额', 'value': 'show'}],
value=[],
switch=True,
),
html.Hr(),
dbc.InputGroup(
[
dbc.InputGroupAddon('选择账户', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='checklist',
options=[{'label': a, 'value': a} for a in all_accounts],
value=[all_accounts[0]],
inline=True,
className='my-auto'
),
],
className='my-2',
),
html.Div(id='account-summary'),
html.Br(),
dbc.Tabs([
dbc.Tab(
label='资产走势',
children=[
dcc.Graph(
id='asset-history-chart',
config={
'displayModeBar': False,
}
),
]
),
dbc.Tab(
label='累计收益走势',
children=[
dcc.Graph(
id="total-return-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='累计收益率走势',
children=[
dbc.InputGroup(
[
dbc.InputGroupAddon('比较基准', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='compare',
options=[
{'label': '中证全指', 'value': '000985.CSI'},
{'label': '上证指数', 'value': '000001.SH'},
{'label': '深证成指', 'value': '399001.SZ'},
{'label': '沪深300', 'value': '000300.SH'},
{'label': '中证500', 'value': '000905.SH'},
],
value=['000985.CSI'],
inline=True,
className='my-auto'
),
],
className='my-2',
),
dcc.Graph(
id="return-curve-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='日收益历史',
children=[
dcc.Graph(
id="day-return-chart",
config={
'displayModeBar': False
},
),
]
),
]),
html.Center(
[
dbc.RadioItems(
id="date-range",
className='btn-group',
labelClassName='btn btn-light border',
labelCheckedClassName='active',
options=[
{"label": "近一月", "value": "1m"},
{"label": "近三月", "value": "3m"},
{"label": "近半年", "value": "6m"},
{"label": "近一年", "value": "12m"},
{"label": "今年以来", "value": "thisyear"},
{"label": "本月", "value": "thismonth"},
{"label": "本周", "value": "thisweek"},
{"label": "所有", "value": "all"},
{"label": "自定义", "value": "customized"},
],
value="thisyear",
),
],
className='radio-group',
),
html.Div(
id='customized-date-range-container',
children=[
dcc.RangeSlider(
id='customized-date-range',
min=2018,
max=2022,
step=None,
marks={year: str(year) for year in range(2018, 2023)},
value=[2018, 2022],
)
],
className='my-auto ml-0 mr-0',
style={'max-width': '100%', 'display': 'none'}
),
html.Hr(),
dbc.Tabs([
dbc.Tab(
label='持仓明细',
children=[
html.Br(),
dbc.Checklist(
id='show-cleared',
options=[{'label': '显示清仓品种', 'value': 'show'}],
value=[],
switch=True,
),
html.Div(id='assets_cards'),
html.Center(
[
dbc.RadioItems(
id="assets-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
dbc.Tab(
label='交易记录',
children=[
html.Br(),
html.Div(id='deals_table'),
html.Center(
[
dbc.RadioItems(
id="deals-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
])
],
)
@APP.callback(
[
dash.dependencies.Output('assets', 'data'),
dash.dependencies.Output('stats', 'data'),
dash.dependencies.Output('accounts_history', 'data'),
dash.dependencies.Output('index_history', 'data'),
dash.dependencies.Output('deals', 'data'),
dash.dependencies.Output('deals-pagination', 'options'),
dash.dependencies.Output('assets-pagination', 'options'),
],
[
dash.dependencies.Input('checklist', 'value'),
dash.dependencies.Input('compare', 'value'),
],
)
def update_after_check(accounts, index_codes):
accounts = accounts or all_accounts
summary_data, assets_data = get_accounts_summary(accounts)
history = get_accounts_history(accounts).to_dict('records')
history.sort(key=itemgetter('account', 'date'))
index_history = []
for index_code in index_codes:
index = Asset.get(zs_code=index_code)
for record in index.history:
index_history.append({
'account': index.name,
'date': record.date,
'price': record.close_price
})
index_history.sort(key=itemgetter('account', 'date'))
deals = []
for record in Deal.get_deals(accounts):
deals.append({
'account': record.account,
'time': record.time,
'code': record.asset.zs_code,
'name': record.asset.name,
'action': record.action,
'amount': record.amount,
'price': record.price,
'money': record.money,
'fee': record.fee,
})
deals.sort(key=itemgetter('time'), reverse=True)
valid_deals_count = 0
for item in deals:
if item['action'] == 'fix_cash':
continue
if item['code'] == 'CASH' and item['action'] == 'reinvest':
continue
valid_deals_count += 1
pagination_options = [
{'label': idx + 1, 'value': idx}
for idx in range(ceil(valid_deals_count / 100))
]
assets_pagination_options = []
return (
assets_data,
summary_data,
history,
index_history,
deals,
pagination_options,
assets_pagination_options
)
@APP.callback(
dash.dependencies.Output('account-summary', 'children'),
[
dash.dependencies.Input('stats', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def update_summary(stats, show_money):
body_content = []
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '总资产',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['money'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '日收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['day_return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['day_return_rate'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '累计收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['return_rate'] if stats['amount'] > 0 else 'N/A(已清仓)',
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '年化收益率',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['annualized_return'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True,
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '现金',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['cash'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '仓位',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['position'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
card = dbc.Card(
[
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto',
color='primary',
)
return [card]
@APP.callback(
dash.dependencies.Output('assets_cards', 'children'),
[
dash.dependencies.Input('assets', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('show-cleared', 'value'),
]
)
def update_assets_table(assets_data, show_money, show_cleared):
cards = [html.Hr()]
for row in assets_data:
if not show_cleared and abs(row['amount']) <= 0.001:
continue
if row["code"] in ('CASH', 'WZZNCK'):
continue
cards.append(make_asset_card(row, show_money))
cards.append(html.Br())
return cards
def make_asset_card(asset_info, show_money=True):
def get_color(value):
if not isinstance(value, (float, int)):
return None
if value > 0:
return 'text-danger'
if value < 0:
return 'text-success'
return None
header = dbc.CardHeader([
html.H5(
html.A(
f'{asset_info["name"]}({asset_info["code"]})',
href=f'/asset/{asset_info["code"].replace(".", "").lower()}',
target='_blank'
),
className='mb-0'
),
html.P(f'更新日期 {asset_info["price_date"]}', className='mb-0'),
])
body_content = []
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '持有金额/份额'},
{'item_cls': html.H4, 'type': 'money', 'content': asset_info['money']},
{'item_cls': html.P, 'type': 'amount', 'content': asset_info['amount']}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '日收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['day_return'],
'color': get_color(asset_info['day_return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['day_return_rate'],
'color': get_color(asset_info['day_return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '现价/成本'},
{'item_cls': html.H4, 'type': 'price', 'content': asset_info['price']},
{'item_cls': html.P, 'type': 'price', 'content': asset_info['avg_cost'] or 'N/A'}
],
show_money=show_money,
)
)
asset = Asset.get(zs_code=asset_info['code'])
prices = []
for item in asset.history.order_by(AssetMarketHistory.date.desc()).limit(10):
if item.close_price is not None:
prices.append({
'date': item.date,
'price': item.close_price,
})
else:
prices.append({
'date': item.date,
'price': item.nav,
})
if len(prices) >= 10:
break
prices.sort(key=itemgetter('date'))
df = pd.DataFrame(prices)
df['date'] = pd.to_datetime(df['date'])
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['price'],
showlegend=False,
marker={'color': 'orange'},
mode='lines+markers',
)
)
fig.update_layout(
width=150,
height=100,
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
yaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
)
fig.update_xaxes(
rangebreaks=[
{'bounds': ["sat", "mon"]},
{
'values': get_holidays(df.date.min(), df.date.max(), False)
}
]
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '十日走势'},
{
'item_cls': None,
'type': 'figure',
'content': fig
}
],
show_money=show_money
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '累计收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['return'],
'color': get_color(asset_info['return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['return_rate'],
'color': get_color(asset_info['return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '占比'},
{'item_cls': html.H4, 'type': 'percent', 'content': asset_info['position']},
],
show_money=show_money,
)
)
card = dbc.Card(
[
header,
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto'
)
return card
@APP.callback(
dash.dependencies.Output('return-curve-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('index_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
]
)
def draw_return_chart(accounts_history, index_history, start_date, end_date):
df = pd.DataFrame(accounts_history)[['amount', 'account', 'date', 'nav']]
df['date'] = pd.to_datetime(df['date'])
if start_date is not None:
df = df[df['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
df = df[df['date'] < pd.to_datetime(end_date)]
df = df[df['account'] == '总计']
df['account'] = '我的'
fig = go.Figure()
if len(df) > 0:
start_nav = float(df[df['date'] == df['date'].min()].nav)
df.loc[:, 'nav'] = df['nav'] / start_nav - 1.0
df.rename(columns={'nav': 'return'}, inplace=True)
df = df.drop(df[df['amount'] <= 0].index)[['account', 'date', 'return']]
start_date = df.date.min()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['return'],
marker={'color': 'orange'},
name='我的',
mode='lines',
)
)
index_df = None
if index_history:
index_history = pd.DataFrame(index_history)
index_history['date'] = pd.to_datetime(index_history['date'])
if start_date is not None:
index_history = index_history[index_history['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
index_history = index_history[index_history['date'] < pd.to_datetime(end_date)]
index_names = set(index_history.account)
for name in index_names:
cur_df = index_history[index_history['account'] == name].copy()
cur_df.loc[:, 'price'] = cur_df['price'] / cur_df.iloc[0].price - 1.0
cur_df.rename(columns={'price': 'return'}, inplace=True)
if index_df is None:
index_df = cur_df
else:
index_df = pd.concat([index_df, cur_df], ignore_index=True)
fig.add_trace(
go.Scatter(x=cur_df['date'], y=cur_df['return'], name=name)
)
fig.update_layout(
legend_title_text='',
legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01, font_size=14),
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
yaxis_tickformat='%',
xaxis_tickformat="%m/%d\n%Y",
hovermode='x unified',
xaxis={'fixedrange': True},
yaxis={'fixedrange': True},
)
return fig
@APP.callback(
[
dash.dependencies.Output('profit_detail_graph', 'figure'),
dash.dependencies.Output('loss_detail_graph', 'figure'),
dash.dependencies.Output('quit_profits_table', 'columns'),
dash.dependencies.Output('quit_profits_table', 'data'),
dash.dependencies.Output('quit_loss_table', 'columns'),
dash.dependencies.Output('quit_loss_table', 'data'),
],
[
dash.dependencies.Input('stats', 'data'),
dash.dependencies.Input('assets', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def update_return_details(stats_data, assets_data, show_money):
stats = stats_data
total_return = stats['money'] - stats['amount']
assets = pd.DataFrame(assets_data)
profits, loss, total_profit = [], [], 0
for _, row in assets.iterrows():
if row['code'] == 'CASH':
continue
return_value = row['return']
if abs(return_value) < 0.001:
continue
if return_value > 0:
profits.append({
'code': row['code'],
'name': row['name'],
'branch': '盈利',
'return_value': return_value,
'category': '实盈' if row['amount'] <= 0 else '浮盈',
})
else:
loss.append({
'code': row['code'],
'name': row['name'],
'branch': '亏损',
'return_value': abs(return_value),
'category': '实亏' if row['amount'] <= 0 else '浮亏',
})
total_profit += return_value
if abs(total_return - total_profit) > 0.001:
profits.append({
'category': '实盈',
'code': 'CASH',
'name': '现金',
'branch': '盈利',
'return_value': round(total_return - total_profit, 2),
})
if not show_money:
profit_sum = sum([item['return_value'] for item in profits])
for item in profits:
item['return_value'] = round(10000 * item['return_value'] / profit_sum, 2)
loss_sum = sum([item['return_value'] for item in loss])
for item in loss:
item['return_value'] = round(10000 * item['return_value'] / loss_sum, 2)
profits = profits or [{
'code': '',
'name': '',
'branch': '盈利',
'category': '实盈',
'return_value': 0,
}]
profits = pd.DataFrame(profits)
if not show_money:
profits.loc[:, 'return_value'] = profits['return_value'] / 10000
profit_fig = px.treemap(
profits,
path=['branch', 'category', 'name'],
values='return_value',
branchvalues="total",
color='name',
)
profit_fig.update_layout(margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4})
loss = loss or [{
'code': '',
'name': '',
'branch': '亏损: 无',
'category': '实亏',
'return_value': 0,
}]
loss = pd.DataFrame(loss)
if not show_money:
loss.loc[:, 'return_value'] = loss['return_value'] / 10000
loss_fig = px.treemap(
loss,
path=['branch', 'category', 'name'],
values='return_value',
branchvalues="total",
color='name',
)
loss_fig.update_layout(margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4})
df = profits[['code', 'name', 'return_value']]
df = df.rename(columns={'return_value': '盈利', **COLUMN_MAPPINGS})
columns1, columns2 = [], []
for name in df.columns:
if name != '盈利':
columns1.append({'id': name, 'name': name})
columns2.append({'id': name, 'name': name})
continue
column = {'type': 'numeric'}
if not show_money:
column['format'] = Format(scheme='%', precision=2)
else:
column['format'] = Format(scheme=Scheme.fixed, precision=2)
columns1.append({'id': '盈利', 'name': '盈利', **column})
columns2.append({'id': '亏损', 'name': '亏损', **column})
data1 = df.to_dict('records')
data1.sort(key=itemgetter('盈利'), reverse=True)
df = loss[['code', 'name', 'return_value']]
df = df.rename(columns={'return_value': '亏损', **COLUMN_MAPPINGS})
data2 = [item for item in df.to_dict('records') if item['名称']]
data2.sort(key=itemgetter('亏损'), reverse=True)
return profit_fig, loss_fig, columns1, data1, columns2, data2
@APP.callback(
dash.dependencies.Output('deals_table', 'children'),
[
dash.dependencies.Input('deals', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('deals-pagination', 'value'),
]
)
def add_deal_record(deals, show_money, page_num):
cards = []
deals = [
item for item in deals
if item['action'] != 'fix_cash' and not (
item['code'] == 'CASH' and item['action'] == 'reinvest'
)
]
for row in deals[page_num * 100:(page_num + 1) * 100]:
cards.append(make_deal_card(row, show_money))
cards.append(html.Br())
return cards
def make_deal_card(deal_info, show_money=False):
action_mappings = {
'transfer_in': '转入',
'transfer_out': '转出',
'buy': '买入',
'sell': '卖出',
'reinvest': '红利再投资',
'bonus': '现金分红',
'spin_off': '拆分/合并'
}
body_content = []
if deal_info['code'] not in ('CASH', 'WZZNCK'):
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': f'{action_mappings[deal_info["action"]]}',
},
{
'item_cls': html.H5,
'type': 'text',
'content': html.A(
f'{deal_info["name"]}({deal_info["code"]})',
href=f'/asset/{deal_info["code"].replace(".", "").lower()}',
target='_blank'
),
},
{
'item_cls': html.P,
'type': 'text',
'content': pd.to_datetime(deal_info['time']).strftime('%Y-%m-%d %H:%M:%S'),
}
],
show_money=show_money
)
)
else:
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': f'{action_mappings[deal_info["action"]]}',
},
{
'item_cls': html.H5,
'type': 'text',
'content': deal_info['name'],
},
{
'item_cls': html.P,
'type': 'text',
'content': pd.to_datetime(deal_info['time']).strftime('%Y-%m-%d %H:%M:%S'),
}
],
show_money=show_money
)
)
body_content.extend([
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '份额/价格',
},
{
'item_cls': html.H5,
'type': 'amount',
'content': deal_info['amount'],
},
{
'item_cls': html.P,
'type': 'price',
'content': deal_info['price'],
}
],
show_money=show_money
),
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '金额/费用',
},
{
'item_cls': html.H5,
'type': 'money',
'content': deal_info['money'],
},
{
'item_cls': html.P,
'type': 'money',
'content': deal_info['fee'],
}
],
show_money=show_money
)
])
card = dbc.Card(
[
dbc.CardBody(
dbc.Row(
[
dbc.Col([card_component], width=6 if idx == 0 else 3)
for idx, card_component in enumerate(body_content)
],
),
className='py-2',
)
],
className='my-auto'
)
return card
@APP.callback(
dash.dependencies.Output('customized-date-range-container', 'style'),
dash.dependencies.Input('date-range', 'value'),
)
def toggle_datepicker(date_range):
if date_range == 'customized':
return {'display': 'block'}
return {'display': 'none'}
@APP.callback(
[
dash.dependencies.Output('start-date', 'data'),
dash.dependencies.Output('end-date', 'data'),
],
[
dash.dependencies.Input('date-range', 'value'),
dash.dependencies.Input('customized-date-range', 'value'),
]
)
def update_return_range(date_range, customized_date_range):
start_date, end_date = None, None
if date_range == '1m':
start_date = (datetime.now() - timedelta(days=30)).date()
elif date_range == '3m':
start_date = (datetime.now() - timedelta(days=60)).date()
elif date_range == '6m':
start_date = (datetime.now() - timedelta(days=180)).date()
elif date_range == '12m':
start_date = (datetime.now() - timedelta(days=365)).date()
elif date_range == 'thisyear':
start_date = datetime.now().replace(month=1, day=1).date()
elif date_range == 'thismonth':
start_date = datetime.now().replace(day=1).date()
elif date_range == 'thisweek':
today = datetime.now().date()
start_date = today - timedelta(days=today.weekday())
elif date_range == 'customized' and customized_date_range:
start_year, end_year = customized_date_range
start_date = date(start_year, 1, 1)
end_date = date(end_year, 1, 1)
return start_date, end_date
@APP.callback(
dash.dependencies.Output('asset-history-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
]
)
def draw_asset_history(accounts_history, show_money, start_date, end_date):
accounts_history.sort(key=itemgetter('date'))
df = pd.DataFrame(accounts_history)
df = df[df['account'] == '总计']
df.date = pd.to_datetime(df.date)
if start_date is not None:
df = df[df['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
df = df[df['date'] < pd.to_datetime(end_date)]
if not show_money:
df.loc[:, "amount"] = df.amount / accounts_history[0]['amount']
df.loc[:, "money"] = df.money / accounts_history[0]['amount']
df["color"] = np.where(df.money > df.amount, 'red', 'green')
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df.date,
y=df.amount,
name='总投入',
marker={'color': 'green'},
mode='lines',
)
)
fig.add_trace(
go.Scatter(
x=df.date,
y=df.money,
name='总资产',
fill='tonexty',
marker={'color': 'red'},
mode='lines',
)
)
fig.update_layout(
xaxis_rangeslider_visible=False,
legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01, font_size=14),
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'fixedrange': True},
yaxis={'fixedrange': True},
hovermode='x unified',
)
fig.update_xaxes(tickformat="%m/%d\n%Y")
return fig
@APP.callback(
dash.dependencies.Output('portfolio-analysis', 'children'),
dash.dependencies.Input('assets', 'data'),
)
def update_porfolio_analysis(assets):
return html.P("hello")
@APP.callback(
dash.dependencies.Output('total-return-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def draw_total_return_chart(accounts_history, start_date, end_date, show_money):
df = pd.DataFrame(accounts_history)
df['date'] = pd.to_datetime(df['date'])
if start_date is not None:
df = df[df['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
df = df[df['date'] < pd.to_datetime(end_date)]
df = df[df['account'] == '总计']
df.loc[:, 'return'] -= df.iloc[0]['return']
df['account'] = '我的'
if not show_money:
max_return = df['return'].abs().max()
df.loc[:, 'return'] = df['return'] / max_return
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['return'],
marker={'color': 'orange'},
mode='lines',
)
)
max_idx = df['return'].argmax()
fig.add_annotation(
x=df.iloc[max_idx]['date'],
y=df.iloc[max_idx]['return'],
text=f'最大值: {df.iloc[max_idx]["return"]:0.2f}',
showarrow=True,
arrowhead=1
)
fig.update_layout(
legend_title_text='',
xaxis_tickformat='%m/%d\n%Y',
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'fixedrange': True},
yaxis={'fixedrange': True},
hovermode='x unified',
)
return fig
@APP.callback(
dash.dependencies.Output('day-return-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def draw_day_return_chart(accounts_history, start_date, end_date, show_money):
df = pd.DataFrame(accounts_history)
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
# import sompy / tfprop_sompy related packages
import math
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import urllib
import random
import matplotlib as mpl
from sompy.sompy import SOMFactory
from sompy.visualization.plot_tools import plot_hex_map
import logging
import pickle
import os
import sklearn
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
from sklearn import cluster
from sklearn.externals import joblib
import warnings
warnings.filterwarnings('ignore')
logging.getLogger('matplotlib.font_manager').disabled = True
def read_data(file):
"""
input: csv file chosen from the directory
"""
try:
f = open(str(file))
return pd.DataFrame(file)
except IOError:
print("File not accessible")
finally:
return pd.DataFrame(file)
def sm_training(self):
"""
Train the model with different parameters.
"""
self.data = self.open_csvfile()
# initialize the build
self.sm = SOMFactory().build(self.data, self.mapsize, self.normalization,
self.initialization, self.component_names, self.lattice)
# start training
self.sm.train(self.n_job, self.shared_memory, self.verbose, self.train_rough_len, self.train_rough_radiusin, self.train_rough_radiusfin,
self.train_finetune_len, self.train_finetune_radiusin, self.train_finetune_radiusfin, self.train_len_factor, self.maxtrainlen)
# errors calculation
self.topographic_error = self.sm.calculate_topographic_error()
self.quantitization_error = np.mean(self.sm._bmu[1])
# if multiple runs are required
#joblib.dump(sm, "model_{}.joblib".format(i))
pickle.dump(self.sm, open("Models/sm_model", "wb"))
# print errors on the cmd prompt
print("the topographic error is %s " % self.topographic_error)
print("the quantitization error is %s " % self.quantitization_error)
def select_model(file):
"""
The file should be the trained sm model in the directory
This operation
"""
dir_name = "Models/ "
sm = pickle.load(open(os.path.join(dir_name, file_name), "rb"))
return sm
# generate vis and export to dir_name + filename
def vis(self):
"""
generate cluster map visualization
"""
# the followings are default, we can customize later
title = "Cluster"
dir_name = "Images/"
file_name = "cluster.png"
data = self.open_csvfile()
sm = self.open_modelfile()
labels = labels = list(data.index)
n_clusters = 5
cmap = plt.get_cmap("tab20")
n_palette = 20 # number of different colors in this color palette
color_list = [cmap((i % n_palette)/n_palette)
for i in range(n_clusters)]
msz = sm.codebook.mapsize
proj = sm.project_data(sm.data_raw)
coord = sm.bmu_ind_to_xy(proj)
fig, ax = plt.subplots(1, 1, figsize=(40, 40))
#cl_labels = som.cluster(n_clusters)
cl_labels = sklearn.cluster.KMeans(
n_clusters=n_clusters, random_state=555).fit_predict(sm.codebook.matrix)
# fill each rectangular unit area with cluster color
# and draw line segment to the border of cluster
norm = mpl.colors.Normalize(vmin=0, vmax=n_palette, clip=True)
# borders
ax.pcolormesh(cl_labels.reshape(msz[0], msz[1]).T % n_palette,
cmap=cmap, norm=norm, edgecolors='face',
lw=0.5, alpha=0.5)
ax.scatter(coord[:, 0]+0.5, coord[:, 1]+0.5, c='k', marker='o')
ax.axis('off')
for label, x, y in zip(labels, coord[:, 0], coord[:, 1]):
x += 0.2
y += 0.2
# "+ 0.1" means shift of label location to upperright direction
# randomize the location of the label not to be overwrapped with each other
# x_text += 0.1 * np.random.randn()
y += 0.3 * np.random.randn()
# wrap of label for chemical compound
# label = str_wrap(label)
# ax.text(x+0.3, y+0.3, label,
# horizontalalignment='left', verticalalignment='bottom',
# rotation=30, fontsize=15, weight='semibold')
plt.title(title)
# save as png file
plt.savefig(os.path.join(dir_name, file_name)+".png")
# cluster inspector
def cluster_inspector(self):
"""
Input: sm is the som model
data is the input data matrix
"""
data = self.open_csvfile()
sm = self.open_modelfile()
# This makes all the loggers stay quiet unless it's important
logging.getLogger().setLevel(logging.WARNING)
cl_labels = ci.kmeans_clust(sm, 5)
clusters_list = ci.sort_materials_by_cluster(sm, data, cl_labels)
# # This makes it so it will display the full lists
pd.set_option('display.max_rows', 2000)
pd.set_option('display.width', 1000)
| pd.set_option("display.max_columns", 50) | pandas.set_option |
#
# Copyright 2017 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import warnings
import empyrical as ep
from pandas.tseries.offsets import BDay
from scipy import stats
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
from . import utils
def factor_information_coefficient(factor_data,
group_adjust=False,
by_group=False):
"""
Computes the Spearman Rank Correlation based Information Coefficient (IC)
between factor values and N period forward returns for each period in
the factor index.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
group_adjust : bool
Demean forward returns by group before computing IC.
by_group : bool
If True, compute period wise IC separately for each group.
Returns
-------
ic : pd.DataFrame
Spearman Rank correlation between factor and
provided forward returns.
"""
def src_ic(group):
f = group['factor']
_ic = group[utils.get_forward_returns_columns(factor_data.columns)] \
.apply(lambda x: stats.spearmanr(x, f)[0])
return _ic
factor_data = factor_data.copy()
grouper = [factor_data.index.get_level_values('date')]
if group_adjust:
factor_data = utils.demean_forward_returns(factor_data,
grouper + ['group'])
if by_group:
grouper.append('group')
ic = factor_data.groupby(grouper).apply(src_ic)
return ic
def mean_information_coefficient(factor_data,
group_adjust=False,
by_group=False,
by_time=None):
"""
Get the mean information coefficient of specified groups.
Answers questions like:
What is the mean IC for each month?
What is the mean IC for each group for our whole timerange?
What is the mean IC for for each group, each week?
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
group_adjust : bool
Demean forward returns by group before computing IC.
by_group : bool
If True, take the mean IC for each group.
by_time : str (pd time_rule), optional
Time window to use when taking mean IC.
See http://pandas.pydata.org/pandas-docs/stable/timeseries.html
for available options.
Returns
-------
ic : pd.DataFrame
Mean Spearman Rank correlation between factor and provided
forward price movement windows.
"""
ic = factor_information_coefficient(factor_data, group_adjust, by_group)
grouper = []
if by_time is not None:
grouper.append(pd.Grouper(freq=by_time))
if by_group:
grouper.append('group')
if len(grouper) == 0:
ic = ic.mean()
else:
ic = (ic.reset_index().set_index('date').groupby(grouper).mean())
return ic
def factor_weights(factor_data,
demeaned=True,#long_short
group_adjust=False,#group_neutral
equal_weight=False):
"""
Computes asset weights by factor values and dividing by the sum of their
absolute value (achieving gross leverage of 1). Positive factor values will
results in positive weights and negative values in negative weights.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
demeaned : bool
Should this computation happen on a long short portfolio? if True,
weights are computed by demeaning factor values and dividing by the sum
of their absolute value (achieving gross leverage of 1). The sum of
positive weights will be the same as the negative weights (absolute
value), suitable for a dollar neutral long-short portfolio
group_adjust : bool
Should this computation happen on a group neutral portfolio? If True,
compute group neutral weights: each group will weight the same and
if 'demeaned' is enabled the factor values demeaning will occur on the
group level.
equal_weight : bool, optional
if True the assets will be equal-weighted instead of factor-weighted
If demeaned is True then the factor universe will be split in two
equal sized groups, top assets with positive weights and bottom assets
with negative weights
Returns
-------
returns : pd.Series
Assets weighted by factor value.
"""
def to_weights(group, _demeaned, _equal_weight):
if _equal_weight:
group = group.copy()
if _demeaned:
# top assets positive weights, bottom ones negative
group = group - group.median()
negative_mask = group < 0
group[negative_mask] = -1.0
positive_mask = group > 0
group[positive_mask] = 1.0
if _demeaned:
# positive weights must equal negative weights
if negative_mask.any():
group[negative_mask] /= negative_mask.sum()
if positive_mask.any():
group[positive_mask] /= positive_mask.sum()
elif _demeaned:
group = group - group.mean()
return group / group.abs().sum()
grouper = [factor_data.index.get_level_values('date')]
if group_adjust:
grouper.append('group')
weights = factor_data.groupby(grouper)['factor'] \
.apply(to_weights, demeaned, equal_weight)
if group_adjust:
weights = weights.groupby(level='date').apply(to_weights, False, False)
return weights
def factor_returns(factor_data,
demeaned=True,
group_adjust=False,
equal_weight=False,
by_asset=False):
"""
Computes period wise returns for portfolio weighted by factor
values.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
demeaned : bool
Control how to build factor weights
-- see performance.factor_weights for a full explanation
group_adjust : bool
Control how to build factor weights
-- see performance.factor_weights for a full explanation
equal_weight : bool, optional
Control how to build factor weights
-- see performance.factor_weights for a full explanation
by_asset: bool, optional
If True, returns are reported separately for each esset.
Returns
-------
returns : pd.DataFrame
Period wise factor returns
"""
weights = \
factor_weights(factor_data, demeaned, group_adjust, equal_weight)
weighted_returns = \
factor_data[utils.get_forward_returns_columns(factor_data.columns)] \
.multiply(weights, axis=0)
if by_asset:
returns = weighted_returns
else:
returns = weighted_returns.groupby(level='date').sum()
return returns
def compute_market_index(factor_data,returns):
universe_ret = factor_data.groupby(level='date')[
utils.get_forward_returns_columns(factor_data.columns)] \
.mean().loc[returns.index]
return universe_ret
def factor_alpha_beta(factor_data,
market_index=None,
index_name='market_mean',
returns=None,
demeaned=True,
group_adjust=False,
equal_weight=False,
):
"""
Compute the alpha (excess returns), alpha t-stat (alpha significance),
and beta (market exposure) of a factor. A regression is run with
the period wise factor universe mean return as the independent variable
and mean period wise return from a portfolio weighted by factor values
as the dependent variable.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
returns : pd.DataFrame, optional
Period wise factor returns. If this is None then it will be computed
with 'factor_returns' function and the passed flags: 'demeaned',
'group_adjust', 'equal_weight'
demeaned : bool
Control how to build factor returns used for alpha/beta computation
-- see performance.factor_return for a full explanation
group_adjust : bool
Control how to build factor returns used for alpha/beta computation
-- see performance.factor_return for a full explanation
equal_weight : bool, optional
Control how to build factor returns used for alpha/beta computation
-- see performance.factor_return for a full explanation
market_index: pd.DataFrame - Index
A DataFrame indexed by date. Periods used as columns.
Returns
-------
alpha_beta : pd.Series
A list containing the alpha, beta, a t-stat(alpha)
for the given factor and forward returns.
"""
if returns is None:
returns = \
factor_returns(factor_data, demeaned, group_adjust, equal_weight)
if index_name == 'market_mean':
universe_ret = factor_data.groupby(level='date')[
utils.get_forward_returns_columns(factor_data.columns)] \
.mean().loc[returns.index]
else:
universe_ret=market_index[utils.get_forward_returns_columns(market_index.columns)] \
.loc[returns.index]
if isinstance(returns, pd.Series):
returns.name = universe_ret.columns.values[0]
returns = pd.DataFrame(returns)
alpha_beta = | pd.DataFrame() | pandas.DataFrame |
"""
Extract metaphorical/non-metaphorical verbs, their arguments, and contexts
(sentences) from the VU Amsterdam corpus
"""
import json
import os
from tqdm import tqdm
tqdm.pandas()
from bs4 import BeautifulSoup
import pandas as pd
from gensim.utils import simple_preprocess
from pycorenlp import StanfordCoreNLP
nlp = None
GENRE_MAP = {
'ACPROSE': 'academic',
'NEWS': 'news',
'FICTION': 'fiction',
'CONVRSN': 'conversation'
}
def normalize_whitespace(x):
return ' '.join(x.strip().split())
def load_vuamc(filepath):
with open(filepath, 'r') as vuamc_f:
vuamc = BeautifulSoup(vuamc_f.read(), 'lxml')
return vuamc.find('text')
def load_lemmas(jsonlines_f):
verb_lemmas = []
ids = []
with open(jsonlines_f, 'r') as jf:
for line in jf:
lemma_json = json.loads(line)
verb_lemmas.append(lemma_json['x']['U-lemmaByPOS'])
ids.append(lemma_json['id'])
return | pd.DataFrame({'verb_lemma': verb_lemmas, 'id': ids}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 09:15:15 2019
PROGRAM PURPOSE:
Creates the followng graphics for the CTM:
-Table 7: Historical Households and Average Household Sizes for
XXXX County, 1980-2010
-Figure 4: Trend in Average Household Size for XXXX County, 1980-2010
@author: slq584
"""
import pandas as pd
import matplotlib.pyplot as plt
import paths as p
import API_call as api
import population_growth as pop
"""
get_excel_data() PURPOSE:
Get Number of Households and Average Household Size from Excel files
containing data that predates the 2000s
"""
def get_excel_data(data, county, decade):
#Dataframe to hold data
df = pd.DataFrame()
df = | pd.read_excel(data) | pandas.read_excel |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
from datetime import datetime as dt, timezone, timedelta, date
import time
import numpy as np
import pandas as pd
import pymongo
try:
import QUANTAXIS as QA
except:
print('PLEASE run "pip install QUANTAXIS" before call GolemQ.fetch.StockCN_realtime modules')
pass
try:
from utils.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST
)
except:
class AKA():
"""
常量,专有名称指标,定义成常量可以避免直接打字符串造成的拼写错误。
"""
# 蜡烛线指标
CODE = 'code'
NAME = 'name'
OPEN = 'open'
HIGH = 'high'
LOW = 'low'
CLOSE = 'close'
VOLUME = 'volume'
VOL = 'vol'
DATETIME = 'datetime'
LAST_CLOSE = 'last_close'
PRE_CLOSE = 'pre_close'
def __setattr__(self, name, value):
raise Exception(u'Const Class can\'t allow to change property\' value.')
return super().__setattr__(name, value)
from QUANTAXIS.QAUtil import (
QASETTING,
)
client = QASETTING.client['QAREALTIME']
from utils.symbol import (
normalize_code
)
def GQ_fetch_stock_realtime_adv(code=None,
num=1,
collections=client.get_collection('realtime_{}'.format(date.today())),
verbose=True,
suffix=False,):
'''
返回当日的上下五档, code可以是股票可以是list, num是每个股票获取的数量
:param code:
:param num:
:param collections: realtime_XXXX-XX-XX 每天实时时间
:param suffix: 股票代码是否带沪深交易所后缀
:return: DataFrame
'''
if code is not None:
# code 必须转换成list 去查询数据库,因为五档数据用一个collection保存了股票,指数及基金,所以强制必须使用标准化代码
if isinstance(code, str):
code = [normalize_code(code)]
elif isinstance(code, list):
code = [normalize_code(symbol) for symbol in code]
pass
else:
print("QA Error GQ_fetch_stock_realtime_adv parameter code is not List type or String type")
#print(verbose, code)
items_from_collections = [
item for item in collections.find({'code': {
'$in': code
}},
limit=num * len(code),
sort=[('datetime',
pymongo.DESCENDING)])
]
if (items_from_collections is None) or \
(len(items_from_collections) == 0):
if verbose:
print("QA Error GQ_fetch_stock_realtime_adv find parameter code={} num={} collection={} return NOne"
.format(code,
num,
collections))
return
data = pd.DataFrame(items_from_collections)
if (suffix == False):
# 返回代码数据中是否包含交易所代码
data['code'] = data.apply(lambda x: x.at['code'][:6], axis=1)
data_set_index = data.set_index(['datetime',
'code'],
drop=False).drop(['_id'],
axis=1)
return data_set_index
else:
print("QA Error GQ_fetch_stock_realtime_adv parameter code is None")
def GQ_fetch_stock_day_realtime_adv(codelist,
data_day,
verbose=True):
"""
查询日线实盘数据,支持多股查询
"""
if codelist is not None:
# codelist 必须转换成list 去查询数据库
if isinstance(codelist, str):
codelist = [codelist]
elif isinstance(codelist, list):
pass
else:
print("QA Error GQ_fetch_stock_day_realtime_adv parameter codelist is not List type or String type")
start_time = dt.strptime(str(dt.now().date()) + ' 09:15', '%Y-%m-%d %H:%M')
if ((dt.now() > start_time) and ((dt.now() - data_day.data.index.get_level_values(level=0)[-1].to_pydatetime()) > timedelta(hours=10))) or \
((dt.now() < start_time) and ((dt.now() - data_day.data.index.get_level_values(level=0)[-1].to_pydatetime()) > timedelta(hours=40))):
if (verbose == True):
print('时间戳差距超过:', dt.now() - data_day.data.index.get_level_values(level=0)[-1].to_pydatetime(),
'尝试查找实盘数据....', codelist)
#print(codelist, verbose)
try:
if (dt.now() > start_time):
collections = client.get_collection('realtime_{}'.format(date.today()))
else:
collections = client.get_collection('realtime_{}'.format(date.today() - timedelta(hours=24)))
data_realtime = GQ_fetch_stock_realtime_adv(codelist, num=8000, verbose=verbose, suffix=False, collections=collections)
except:
data_realtime = QA.QA_fetch_stock_realtime_adv(codelist, num=8000, verbose=verbose)
if (data_realtime is not None) and \
(len(data_realtime) > 0):
# 合并实盘实时数据
data_realtime = data_realtime.drop_duplicates((["datetime",
'code'])).set_index(["datetime",
'code'],
drop=False)
data_realtime = data_realtime.reset_index(level=[1], drop=True)
data_realtime['date'] = pd.to_datetime(data_realtime['datetime']).dt.strftime('%Y-%m-%d')
data_realtime['datetime'] = | pd.to_datetime(data_realtime['datetime']) | pandas.to_datetime |
import pandas as pd
import numpy as np
from scipy.ndimage.interpolation import shift
from pandas import Series
def daily_returns(df, source_column = 'close', target_column = 'daily_return'):
df[target_column] = df[source_column] - df[source_column].shift(1)
def sma(df, periods = 7, source_column = 'close', target_column = 'sma'):
df['{}{}'.format(target_column, periods)] = df[source_column].rolling(window=periods).mean()
def triangular(n): return n * (n + 1) / 2
def wma(df, periods = 7, source_column = 'close', target_column = 'wma'):
x = np.linspace(1, periods, periods)
weights = (periods-x+1) / triangular(periods)
wma = np.convolve(weights, df[source_column], 'valid')
wma.resize(len(wma)+periods-1)
wma = Series(wma)
wma = wma.shift(periods-1)
df['{}{}'.format(target_column, periods)] = wma
def ema(df, periods = 7, source_column = 'close', target_column = 'ema'):
weights = np.exp(1-np.linspace(-1, 1, periods))
weights /= weights.sum()
ema = np.convolve(weights, df[source_column], 'valid')
ema.resize(len(ema)+periods-1)
ema = Series(ema)
ema = ema.shift(periods-1)
df['{}{}'.format(target_column, periods)] = ema
def std(df, periods = 20, source_column = 'close', target_column = 'std'):
df['{}{}'.format(target_column, periods)] = df[source_column].rolling(window=periods).std()
def bbands(df, periods = 20, source_column = 'close', target_column = 'bb'):
temp_df = pd.DataFrame(df[source_column])
std(temp_df, periods)
ema(temp_df, periods)
df['{}_mid'.format(target_column)] = temp_df['ema{}'.format(periods)]
df['{}_upper'.format(target_column)] = temp_df['ema{}'.format(periods)] + 2*temp_df['std{}'.format(periods)]
df['{}_lower'.format(target_column)] = temp_df['ema{}'.format(periods)] - 2*temp_df['std{}'.format(periods)]
def rsi(df, periods = 14, source_column='daily_return', target_column='rsi'):
temp_df = | pd.DataFrame(df[source_column]) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
PeriodIndex,
Series,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float
class TestFillNA:
def test_fillna_datetime(self, datetime_frame):
tf = datetime_frame
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = datetime_frame.fillna(0)
assert (zero_filled.loc[zero_filled.index[:5], "A"] == 0).all()
padded = datetime_frame.fillna(method="pad")
assert np.isnan(padded.loc[padded.index[:5], "A"]).all()
assert (
padded.loc[padded.index[-5:], "A"] == padded.loc[padded.index[-5], "A"]
).all()
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
datetime_frame.fillna()
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_frame.fillna(5, method="ffill")
def test_fillna_mixed_type(self, float_string_frame):
mf = float_string_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
# TODO: make stronger assertion here, GH 25640
mf.fillna(value=0)
mf.fillna(method="pad")
def test_fillna_mixed_float(self, mixed_float_frame):
# mixed numeric (but no float16)
mf = mixed_float_frame.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype={"C": None})
result = mf.fillna(method="pad")
_check_mixed_float(result, dtype={"C": None})
def test_fillna_empty(self):
# empty frame (GH#2778)
df = DataFrame(columns=["x"])
for m in ["pad", "backfill"]:
df.x.fillna(method=m, inplace=True)
df.x.fillna(method=m)
def test_fillna_different_dtype(self):
# with different dtype (GH#3386)
df = DataFrame(
[["a", "a", np.nan, "a"], ["b", "b", np.nan, "b"], ["c", "c", np.nan, "c"]]
)
result = df.fillna({2: "foo"})
expected = DataFrame(
[["a", "a", "foo", "a"], ["b", "b", "foo", "b"], ["c", "c", "foo", "c"]]
)
tm.assert_frame_equal(result, expected)
return_value = df.fillna({2: "foo"}, inplace=True)
tm.assert_frame_equal(df, expected)
assert return_value is None
def test_fillna_limit_and_value(self):
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
tm.assert_frame_equal(result, expected)
def test_fillna_datelike(self):
# with datelike
# GH#6344
df = DataFrame(
{
"Date": [NaT, Timestamp("2014-1-1")],
"Date2": [Timestamp("2013-1-1"), NaT],
}
)
expected = df.copy()
expected["Date"] = expected["Date"].fillna(df.loc[df.index[0], "Date2"])
result = df.fillna(value={"Date": df["Date2"]})
tm.assert_frame_equal(result, expected)
def test_fillna_tzaware(self):
# with timezone
# GH#15855
df = DataFrame({"A": [Timestamp("2012-11-11 00:00:00+01:00"), NaT]})
exp = DataFrame(
{
"A": [
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
}
)
tm.assert_frame_equal(df.fillna(method="pad"), exp)
df = DataFrame({"A": [NaT, Timestamp("2012-11-11 00:00:00+01:00")]})
exp = DataFrame(
{
"A": [
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
}
)
tm.assert_frame_equal(df.fillna(method="bfill"), exp)
def test_fillna_tzaware_different_column(self):
# with timezone in another column
# GH#15522
df = DataFrame(
{
"A": date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1, 2, np.nan, np.nan],
}
)
result = df.fillna(method="pad")
expected = DataFrame(
{
"A": date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1.0, 2.0, 2.0, 2.0],
}
)
tm.assert_frame_equal(result, expected)
def test_na_actions_categorical(self):
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = DataFrame({"cats": cat2, "vals": vals2})
cat3 = Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = DataFrame({"cats": cat3, "vals": vals3})
cat4 = Categorical([1, 2], categories=[1, 2, 3])
vals4 = ["a", "b"]
df_exp_drop_all = DataFrame({"cats": cat4, "vals": vals4})
# fillna
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(ValueError, match=msg):
df.fillna(value={"cats": 4, "vals": "c"})
res = df.fillna(method="pad")
tm.assert_frame_equal(res, df_exp_fill)
# dropna
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes missing values into account
c = | Categorical([np.nan, "b", np.nan], categories=["a", "b"]) | pandas.Categorical |
import time
import importlib
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_table
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from inspect import signature
class Dataset:
def __init__(self, df):
self.df = df
def detect_outlier_zscore(data, threshold):
outliers = pd.DataFrame([], columns=['ID', 'sqdist', 'cluster'])
mean = np.mean(data.sqdist)
std = np.std(data.sqdist)
for y in data.itertuples():
z_score = (y.sqdist - mean)/std
if np.abs(z_score) > threshold:
outliers = outliers.append(
{'ID': y.ID, 'sqdist': y.sqdist, 'cluster': y.cluster}, ignore_index=True)
return outliers
def detect_outlier_quantile(data, percent):
quantile = data.sqdist.quantile(percent)
outliers = | pd.DataFrame([], columns=['ID', 'sqdist', 'cluster']) | pandas.DataFrame |
from osgeo import gdal
import os
import cv2
import sys
import glob
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
import concurrent.futures
import psutil
import pathlib
import shutil
import time
import hipp
import hsfm
"""
Wrappers around other hsfm functions for batch processing.
Inputs are general a folder contaning multiple files or a csv listing
multiple urls.
"""
def prepare_ba_run(input_directory,
output_directory,
scale):
camera_solve_directory = os.path.join(output_directory, 'cam_solve')
bundle_adjust_directory = os.path.join(output_directory,'ba')
images_directory = os.path.join(output_directory,'images'+'_sub'+str(scale))
gcp_directory = os.path.join(input_directory,'gcp')
hsfm.io.batch_rename_files(
camera_solve_directory,
file_extension=str(scale)+'.match',
destination_file_path=bundle_adjust_directory)
overlap_list = hsfm.core.create_overlap_list_from_match_files(camera_solve_directory,
images_directory,
output_directory)
if not os.path.exists(os.path.join(bundle_adjust_directory,'overlaplist.txt')):
gcp_directory = os.path.join(input_directory,'gcp')
overlap_list = hsfm.core.create_overlap_list(gcp_directory,
images_directory,
output_directory=output_directory)
return os.path.join(bundle_adjust_directory,'overlaplist.txt')
def prepare_stereo_run(output_directory):
bundle_adjust_directory = os.path.join(output_directory, 'ba')
stereo_input_directory = os.path.join(output_directory, 'stereo/stereo_inputs')
stereo_output_directory = os.path.join(output_directory, 'stereo/stereo_run')
hsfm.io.batch_rename_files(
bundle_adjust_directory,
file_extension='tsai',
destination_file_path=stereo_input_directory)
hsfm.io.batch_rename_files(
bundle_adjust_directory,
file_extension='clean.match',
destination_file_path=stereo_input_directory)
def rescale_images(image_directory,
output_directory,
extension='.tif',
scale=8,
verbose=False):
output_directory = os.path.join(output_directory, 'images'+'_sub'+str(scale))
image_files = sorted(glob.glob(os.path.join(image_directory,'*'+ extension)))
# n = len(psutil.Process().cpu_affinity())
# pool = concurrent.futures.ThreadPoolExecutor(max_workers=n)
# parallel_data = {pool.submit(hsfm.utils.rescale_geotif,
# image_file,
# output_directory=output_directory,
# scale=scale): \
# image_file for image_file in image_files}
# for future in concurrent.futures.as_completed(parallel_data):
# r = future.result()
# if verbose:
# print(r)
for image_file in image_files:
hsfm.utils.rescale_geotif(image_file,
output_directory=output_directory,
scale=scale,
verbose=verbose)
return os.path.relpath(output_directory)
# return sorted(glob.glob(os.path.join(output_directory,'*'+ extension)))
def rescale_tsai_cameras(camera_directory,
output_directory,
extension='.tsai',
scale=8):
output_directory = os.path.join(output_directory, 'cameras'+'_sub'+str(scale))
hsfm.io.create_dir(output_directory)
pitch = "pitch = 1"
new_pitch = "pitch = "+str(scale)
camera_files = sorted(glob.glob(os.path.join(camera_directory,'*'+ extension)))
for camera_file in camera_files:
file_path, file_name, file_extension = hsfm.io.split_file(camera_file)
output_file = os.path.join(output_directory,
file_name +'_sub'+str(scale)+file_extension)
hsfm.io.replace_string_in_file(camera_file, output_file, pitch, new_pitch)
return os.path.relpath(output_directory)
# return sorted(glob.glob(os.path.join(output_directory,'*'+ extension)))
def batch_generate_cameras(image_directory,
camera_positions_file_name,
reference_dem_file_name,
focal_length_mm,
output_directory,
pixel_pitch_mm=0.02,
verbose=False,
subset=None,
manual_heading_selection=False,
reverse_order=False):
"""
Function to generate cameras in batch.
Note:
- Specifying subset as a tuple indicates selecting a range of values, while supplying
a list allows for single or multiple specific image selection.
"""
# TODO
# - Embed hsfm.utils.pick_headings() within calculate_heading_from_metadata() and launch for images where the heading could not be determined with high confidence (e.g. if image
# potentially part of another flight line, or at the end of current flight line with no
# subsequent image to determine flight line from.)
# - provide principal_point_px to hsfm.core.initialize_cameras on a per image basis
# put gcp generation in a seperate batch routine
image_list = sorted(glob.glob(os.path.join(image_directory, '*.tif')))
image_list = hsfm.core.subset_input_image_list(image_list, subset=subset)
if reverse_order:
image_list = image_list[::-1]
if manual_heading_selection == False:
df = hsfm.batch.calculate_heading_from_metadata(camera_positions_file_name,
output_directory=output_directory,
subset=subset,
reverse_order=reverse_order)
else:
df = hsfm.utils.pick_headings(image_directory, camera_positions_file_name, subset, delta=0.01)
if len(image_list) != len(df):
print('Mismatch between metadata entries in camera position file and available images.')
sys.exit(1)
for i,v in enumerate(image_list):
image_file_name = v
camera_lat_lon_center_coordinates = (df['Latitude'].iloc[i], df['Longitude'].iloc[i])
heading = df['heading'].iloc[i]
gcp_directory = hsfm.core.prep_and_generate_gcp(image_file_name,
camera_lat_lon_center_coordinates,
reference_dem_file_name,
focal_length_mm,
heading,
output_directory)
# principal_point_px is needed to initialize the cameras in the next step.
img_ds = gdal.Open(image_file_name)
image_width_px = img_ds.RasterXSize
image_height_px = img_ds.RasterYSize
principal_point_px = (image_width_px / 2, image_height_px /2 )
focal_length_px = focal_length_mm / pixel_pitch_mm
# should be using principal_point_px on a per image basis
intial_cameras_directory = hsfm.core.initialize_cameras(camera_positions_file_name,
reference_dem_file_name,
focal_length_px,
principal_point_px,
output_directory)
output_directory = hsfm.asp.generate_ba_cameras(image_directory,
gcp_directory,
intial_cameras_directory,
output_directory,
subset=subset)
return output_directory
def calculate_heading_from_metadata(df,
subset = None,
reverse_order = False,
output_directory = None,
for_metashape = False,
reference_dem = None,
flight_altitude_above_ground_m = 1500,
file_base_name_column = 'fileName',
longitude_column = 'Longitude',
latitude_column = 'Latitude'):
# TODO
# - Add flightline seperation function
# - Generalize beyond NAGAP keys
if not isinstance(df, type(pd.DataFrame())):
df = | pd.read_csv(df) | pandas.read_csv |
from oas_dev.util.eusaar_data import time_h
from oas_dev.constants import path_eusaar_data
import netCDF4
import numpy as np
import pandas as pd
# %%
def load_time():
p = path_eusaar_data + '/GEN/'
timef = 'timevec_DOY.dat'
ti = np.loadtxt(p+timef)
return ti
def test_time():
ti = load_time()
units = 'days since 2008-01-01'
time = netCDF4.num2date(ti, units, 'standard')
# microseconds by precision error?
time = | pd.to_datetime(time) | pandas.to_datetime |
import altair as alt
from collections import Counter
import logomaker
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
random.seed(100)
import seaborn as sns
import swifter
import Bio
from Bio import motifs
from Bio.Seq import Seq
import genbank_utils as gu
# +-----------------------------------+
# | Functions for Viewing motif logos |
# +-----------------------------------+
def load_promoter_seqs(filename):
'''
Load fasta file of promoters into ID, desc, and seq. It expects
each fasta header to be divided by "|" with in the format:
LOCUS_TAG|GENE_SYMBOL|PRODUCT
'''
proms = []
with open(filename,'r') as f:
for line in f:
if line.startswith(">"):
full_header = line.strip()[1:].strip()
locus_tag = full_header.split('|')[0]
else:
seq = line.strip().upper()
proms.append((locus_tag,full_header,seq))
return proms
def view_motif(m1,m2):
'''
Given two Motif objects from the BioPython motifs module,
create a sequence logo from their PWMs
'''
df1 = pd.DataFrame(m1.pwm, columns=['A','C','G','T'])
df2 = pd.DataFrame(m2.pwm, columns=['A','C','G','T'])
# initialize 2 panel figure
fig,(ax1,ax2) = plt.subplots(1,2,sharey=True,figsize=[10,2])
# block 1
logo1 = logomaker.transform_matrix(df1,from_type='probability',to_type='information')
crp_logo1 = logomaker.Logo(logo1,ax=ax1)
# block 2
logo2 = logomaker.transform_matrix(df2,from_type='probability',to_type='information')
crp_logo2 = logomaker.Logo(logo2,ax=ax2,)
# labels
df1_title = f"Motif Block 1"
df2_title = f"Motif Block 2"
ax1.set_title(df1_title)
ax2.set_title(df2_title)
ax1.set_xticks([])
ax2.set_xticks([])
plt.ylim(0,2)
plt.show()
def build_2Bmotif_from_selection_file(filename, randomize=False,verbose=False):
'''
Given a SELECTION.fa, build a 2 block consensus motif from the
first 6 and last 6 bases of each input sequence
'''
proms = load_promoter_seqs(filename)
# collect first 6 and last 6 bases from each predicted promoter
block1_instances = [Seq(x[:6]) for (_,_,x) in proms]
block2_instances = [Seq(x[-6:]) for (_,_,x) in proms]
# if we are randomizing the sequences to make a random motif
if randomize:
# establish a new random ordering of indicies (some may be repeated or left out)
new_order = [random.randint(0,5) for x in range(6)]
# remap letters to different bases. This tries to maintain a "strong signal"
# exists (just shuffling all the letters to different positions independently
# would make the information content just go flat). Instead, this just changes
# which letters have stronger or weaker signal, but the signal strength of the
# original consensus remains.... I think.
remap = {
'A':'T',
'C':'A',
'G':'C',
'T':'G',
}
# collect randomized versions of the hexamers
rand_block1_instances = []
rand_block2_instances = []
for i in range(len(block1_instances)):
seq1 = str(block1_instances[i])
seq1 = ''.join([remap[x] for x in seq1])
seq1 = ''.join([seq1[x] for x in new_order])
rand_block1_instances.append(Seq(seq1))
seq2 = str(block2_instances[i])
seq2 = ''.join([remap[x] for x in seq2])
seq2 = ''.join([seq2[x] for x in new_order])
rand_block2_instances.append(Seq(seq2))
block1_instances = rand_block1_instances
block2_instances = rand_block2_instances
# create BioPython motif objects
m1 = motifs.create(block1_instances)
m2 = motifs.create(block2_instances)
# add pseudocount for proper pssm
m1.pseudocounts = 0.5
m2.pseudocounts = 0.5
if verbose:
# Display Motif matrix info and consensus
print("PWM")
print(m1.pwm)
print(m2.pwm)
print("\nPSSM")
print(m1.pssm)
print(m2.pssm)
print("\nConsensus")
print(m1.consensus)
print(m2.consensus)
# view the consensus from this file
view_motif(m1,m2)
return proms, m1, m2
# +-----------------------------------------------------+
# | Functions for scanning the genome for motif matches |
# +-----------------------------------------------------+
def build_feature_distance_index(feat_coords, genome_len):
'''
Given a list of feature coords on a particular strand, go through the genome and for
each position, record the distance to the next feature start position. If the position
is inside a feature, mark as -1.
feat_coords should be alist of tuples: (feat_start_coord, feat_end_coord, locus_id)
'''
start_idx = 0
end_idx = 1
loc_idx = 2
feat_idx = 0 # current feature index
cur_feat = feat_coords[feat_idx]
# keep track of distance to next feat as well as
# which feat we're that distance from
dist_array = np.zeros(genome_len,dtype='U15')
nearest_feat_array = np.zeros(genome_len,dtype='U15')
# for each position in the genome
for genome_idx in range(genome_len):
# if we're before the next feature
if genome_idx < cur_feat[start_idx]:
# record distance from current loc to feature start
dist_array[genome_idx] = cur_feat[start_idx] - genome_idx
# if we're in the feature, mark as -1
elif genome_idx >= cur_feat[start_idx] and genome_idx <= cur_feat[end_idx]:
dist_array[genome_idx] = -1
# if we've passed through the current feature and are on to the next,
# update the current feature
elif genome_idx > cur_feat[end_idx]:
feat_idx += 1
# if we've run out of features at the end of the genome
if feat_idx >= len(feat_coords):
# build the final feat around the circle of the genome
cur_feat = (genome_len+feat_coords[0][start_idx], genome_len+feat_coords[0][end_idx], feat_coords[0][loc_idx])
else:
cur_feat = feat_coords[feat_idx]
dist_array[genome_idx] = cur_feat[start_idx] - genome_idx
# mark nearest gene
nearest_feat_array[genome_idx] = cur_feat[loc_idx] # id of feature
return dist_array, nearest_feat_array
def build_genome_position_category_df(pos_dist_arr, neg_dist_arr):
'''
Given an array of genome positions and their distance to the
next nearest feature, build a dictionary counting the number
of positions in each type of category:
* Inside a gene
* within 100bp of a gene start
* between 300bp and 100bp of a gene start
* intergenic, beyond 300 bp from gene start
This df is later used to normalize pssm match counts (by the total
number of positions in the genome that are in each category)
'''
def get_category(num):
'''
Given a distance, return it's category
'''
if num == -1:
return "in gene"
elif num <= 100:
return "<100 to ATG"
elif num <=300:
return "100:300 to ATG"
else:
return "intergenic"
# count number of positions in each genome category
pos_cat_dict = Counter([get_category(int(x)) for x in pos_dist_arr])
neg_cat_dict = Counter([get_category(int(x)) for x in neg_dist_arr])
# convert to df
pos_cat_df = | pd.DataFrame.from_dict(pos_cat_dict,orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 18:10:43 2019
@author: galengao
"""
import os
import time
import argparse
import pandas as pd
from multiprocessing import Pool
import bootstrapper as bs
import genotyper as gt
import make_KIR_minibams as genKIR
def convert_to_fasta(bamfile):
'''Unpack reads from BAM file (xxx.bam) into FASTA file (xxx.fasta).
Returns name of FASTA file.'''
stem = bamfile[:-4]
os.system('samtools fasta ' + bamfile + ' > ' + stem + '.fasta')
return stem+'.fasta'
def run_blast(fastafile, kgene):
'''BLAST reads from inputted BAMfile against a database of all reference
alleles for the inputted KIR gene; outputted as a .csv. Returns CSV name.'''
kDB = kgene + '_db'
outfile = fastafile[:-6] + '.csv'
cmd = 'blastn -db ' +kDB+ ' -outfmt 10 -query ' +fastafile+ ' > ' +outfile
os.system(cmd)
return outfile
def run_bootstrapper(scorefile, kgene, tag, part=0.5, nboot=100, alpha=1e-05, maxIter=1000):
'''Run bootstrapped EM algorithm on each BLAST score file to generate KIR
allele probabilities. Return probability file.'''
if os.stat(scorefile).st_size != 0: # make sure BLAST file is not empty
P, alls = bs.bootstrap_BLAST_file(scorefile, kgene, pident=100, part=part, \
n_boot=nboot, alpha=alpha, maxIter=maxIter)
df_out = pd.DataFrame(P, index=alls, columns=[tag])
df_out.to_csv(scorefile[:-4]+'_calls.tsv', sep='\t')
return df_out
else:
return pd.DataFrame([])
def run_genotyper(df, thresh=0.25):
'''Run KIR genotyper on each BLAST score file.'''
if len(df) != 0: # make sure allele probability file is not empty
sol = gt.genotype_bootstrap(df.values.ravel(), df.index, thresh=thresh)
else:
sol = ('No Solution', 'No Solution')
return sol
def process_sample_multipleBAMs(bamfile):
'''Given inputted BAM file, run bootstrapper & genotyper. Individual
functions write output to disc.'''
# extract the KIR gene we're dealing with from the bamfile name
stem = bamfile[:-4]
kgene = stem.split('_')[-1]
tag = stem.split('_')[0]
print(' >>> ' + kgene + ' <<< ')
# run pipeline on the BAMfile
fastafile = convert_to_fasta(bamfile)
scorefile = run_blast(fastafile, kgene)
df_p = run_bootstrapper(scorefile, kgene, tag, part=part, nboot=nboot, \
alpha=alpha, maxIter=maxIter)
sol = run_genotyper(df_p, thresh=thresh)
return df_p, sol
def write_KIR_outputs(tag, df_outs, sols):
'''Write outputs to disc: [tag]_probabilities.txt and [tag]_genotypes.txt'''
# write aggregated allele probabilities to file
if len(df_outs) != 0:
df_out = pd.concat(df_outs)
else:
df_out = | pd.DataFrame([]) | pandas.DataFrame |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190102T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_weekahead(session, site_metadata, mocker):
variable = 'net_load'
observation = default_observation(
site_metadata, variable=variable,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
run_time = pd.Timestamp('20190110T1945Z')
forecast = default_forecast(
site_metadata, variable=variable,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1d'),
interval_label='beginning')
issue_time = pd.Timestamp('20190111T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, observation, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_interval_index(session, site_metadata,
obs_5min_begin):
# index=True not supported for day ahead
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert 'index=True not supported' in str(excinfo.value)
def test_run_persistence_interval_too_long(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('48h'), # too long
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_interval_not_midnight_to_midnight(session,
site_metadata,
obs_5min_begin):
# not midnight to midnight
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=22),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2200Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_incompatible_issue(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2330Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'incompatible' in str(excinfo.value).lower()
def test_run_persistence_fx_too_short(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1min'),
run_length=pd.Timedelta('3min'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'requires observation.interval_length' in str(excinfo.value)
def test_run_persistence_incompatible_instant_fx(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'instantaneous forecast' in str(excinfo.value).lower()
def test_run_persistence_incompatible_instant_interval(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
obs = obs_5min_begin.replace(interval_label='instantaneous',
interval_length=pd.Timedelta('10min'))
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs, forecast, run_time,
issue_time)
assert 'identical interval length' in str(excinfo.value)
def test_verify_nwp_forecasts_compatible(ac_power_forecast_metadata):
fx0 = ac_power_forecast_metadata
fx1 = replace(fx0, run_length=pd.Timedelta('10h'), interval_label='ending')
df = pd.DataFrame({'forecast': [fx0, fx1], 'model': ['a', 'b']})
errs = main._verify_nwp_forecasts_compatible(df)
assert set(errs) == {'model', 'run_length', 'interval_label'}
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', True),
('{"is_reference_persistence_forecast": true}', False),
('{"is_reference_forecast": "True"}', True),
('{"is_reference_forecast":"True"}', True),
('is_reference_forecast" : "True"}', True),
('{"is_reference_forecast" : true, "otherkey": badjson, 9}', True),
('reference_forecast": true', False),
('{"is_reference_forecast": false}', False),
("is_reference_forecast", False)
])
def test_is_reference_forecast(string, expected):
assert main._is_reference_forecast(string) == expected
def test_find_reference_nwp_forecasts_json_err(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
extra_params = '{"model": "themodel", "is_reference_forecast": true}'
fxs = [replace(ac_power_forecast_metadata, extra_parameters=extra_params),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "yes"}'),
replace(ac_power_forecast_metadata, extra_parameters='{"is_reference_forecast": true'), # NOQA
replace(ac_power_forecast_metadata, extra_parameters='')]
out = main.find_reference_nwp_forecasts(fxs)
assert logger.warning.called
assert len(out) == 1
def test_find_reference_nwp_forecasts_no_model(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
fxs = [replace(ac_power_forecast_metadata, extra_parameters='{}',
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 0
assert logger.debug.called
assert logger.error.called
def test_find_reference_nwp_forecasts_no_init(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 2
assert out.next_issue_time.unique() == [None]
assert out.piggyback_on.unique() == ['0']
def test_find_reference_nwp_forecasts(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(
fxs, pd.Timestamp('20190501T0000Z'))
assert len(out) == 2
assert out.next_issue_time.unique()[0] == pd.Timestamp('20190501T0500Z')
assert out.piggyback_on.unique() == ['0']
@pytest.fixture()
def forecast_list(ac_power_forecast_metadata):
model = 'nam_12km_cloud_cover_to_hourly_mean'
prob_dict = ac_power_forecast_metadata.to_dict()
prob_dict['constant_values'] = (0, 50, 100)
prob_dict['axis'] = 'y'
prob_dict['extra_parameters'] = '{"model": "gefs_half_deg_to_hourly_mean", "is_reference_forecast": true}' # NOQA
return [replace(ac_power_forecast_metadata,
extra_parameters=(
'{"model": "%s", "is_reference_forecast": true}'
% model),
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "gfs_quarter_deg_hourly_to_hourly_mean", "is_reference_forecast": true}', # NOQA
forecast_id='1'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='2',
variable='ghi'),
datamodel.ProbabilisticForecast.from_dict(prob_dict),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='3',
variable='dni',
provider='Organization 2'
),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "badmodel", "is_reference_forecast": true}', # NOQA
forecast_id='4'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "6", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='5',
variable='ghi'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": false}' % model, # NOQA
forecast_id='7',
variable='ghi'),
]
def test_process_nwp_forecast_groups(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 4
@pytest.mark.parametrize('run_time', [None, pd.Timestamp('20190501T0000Z')])
def test_process_nwp_forecast_groups_issue_time(mocker, forecast_list,
run_time):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert post_vals.call_count == 4
run_nwp.assert_called_with(mocker.ANY, mocker.ANY, mocker.ANY,
pd.Timestamp('20190501T0500Z'))
def test_process_nwp_forecast_groups_missing_var(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-3])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert logger.warning.called
assert post_vals.call_count == 4
def test_process_nwp_forecast_groups_bad_model(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[4:-1])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 0
def test_process_nwp_forecast_groups_missing_runfor(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[-2:])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert api.post_forecast_values.call_count == 0
@pytest.mark.parametrize('ind', [0, 1, 2])
def test__post_forecast_values_regular(mocker, forecast_list, ind):
api = mocker.MagicMock()
fx = forecast_list[ind]
main._post_forecast_values(api, fx, [0], 'whatever')
assert api.post_forecast_values.call_count == 1
def test__post_forecast_values_cdf(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
main._post_forecast_values(api, fx, vals, 'gefs')
assert api.post_probabilistic_forecast_constant_value_values.call_count == 3 # NOQA
def test__post_forecast_values_cdf_not_gefs(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(ValueError):
main._post_forecast_values(api, fx, vals, 'gfs')
def test__post_forecast_values_cdf_less_cols(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(10)})
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, vals, 'gefs')
def test__post_forecast_values_cdf_not_df(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, ser, 'gefs')
def test__post_forecast_values_cdf_no_cv_match(mocker, forecast_list):
api = mocker.MagicMock()
fx = replace(forecast_list[3], constant_values=(
replace(forecast_list[3].constant_values[0], constant_value=3.0
),))
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(KeyError):
main._post_forecast_values(api, fx, vals, 'gefs')
@pytest.mark.parametrize('issue_buffer,empty', [
(pd.Timedelta('10h'), False),
(pd.Timedelta('1h'), True),
(pd.Timedelta('5h'), False)
])
def test_make_latest_nwp_forecasts(forecast_list, mocker, issue_buffer, empty):
session = mocker.patch('solarforecastarbiter.io.api.APISession')
session.return_value.get_user_info.return_value = {'organization': ''}
session.return_value.list_forecasts.return_value = forecast_list[:-3]
session.return_value.list_probabilistic_forecasts.return_value = []
run_time = pd.Timestamp('20190501T0000Z')
# last fx has different org
fxdf = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
process = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.process_nwp_forecast_groups') # NOQA
main.make_latest_nwp_forecasts('', run_time, issue_buffer)
if empty:
process.assert_not_called()
else:
assert_frame_equal(process.call_args[0][-1], fxdf)
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', False),
('{"is_reference_persistence_forecast": true}', True),
('{"is_reference_persistence_forecast": "True"}', True),
('{"is_reference_persistence_forecast":"True"}', True),
('is_reference_persistence_forecast" : "True"}', True),
('{"is_reference_persistence_forecast" : true, "otherkey": badjson, 9}',
True),
('reference_persistence_forecast": true', False),
('{"is_reference_persistence_forecast": false}', False),
("is_reference_persistence_forecast", False)
])
def test_is_reference_persistence_forecast(string, expected):
assert main._is_reference_persistence_forecast(string) == expected
@pytest.fixture
def perst_fx_obs(mocker, ac_power_observation_metadata,
ac_power_forecast_metadata):
observations = [
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
)
]
def make_extra(obs):
extra = (
'{"is_reference_persistence_forecast": true,'
f'"observation_id": "{obs.observation_id}"'
'}'
)
return extra
forecasts = [
ac_power_forecast_metadata.replace(
name='FX0',
extra_parameters=make_extra(observations[0]),
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX no persist',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX bad js',
extra_parameters='is_reference_persistence_forecast": true other',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
)
]
return forecasts, observations
def test_generate_reference_persistence_forecast_parameters(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
# one hour ahead forecast, so 14Z was made at 13Z
# enough data to do 14Z and 15Z issue times but not 16Z
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 2
assert param_list[0] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T14:00Z'),
False
)
assert param_list[1] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
)
def test_generate_reference_persistence_forecast_parameters_no_forecast_yet(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.NaT, pd.NaT)
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 1
assert param_list[0] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
)
def test_generate_reference_persistence_forecast_parameters_no_data(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.NaT, pd.NaT)
session.get_forecast_time_range.return_value = (
pd.NaT, pd.NaT)
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_diff_org(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': 'a new one'}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_not_reference_fx(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts = [fx.replace(extra_parameters='') for fx in forecasts]
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_no_obs_id(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts[0] = forecasts[0].replace(
extra_parameters='{"is_reference_persistence_forecast": true}')
forecasts[1] = forecasts[1].replace(
extra_parameters='{"is_reference_persistence_forecast": true, "observation_id": "idnotinobs"}') # NOQA
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_ending_label(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts = [fx.replace(
interval_label='ending', lead_time_to_start= | pd.Timedelta('0h') | pandas.Timedelta |
import os
from copy import deepcopy
from datetime import datetime
from dateutil.parser import parse as parse_to_datetime
import dateutil
import numpy as np
import pandas as pd
np.seterr(divide='ignore')
from sklearn.utils import class_weight
from sklearn.cluster import KMeans, MeanShift, DBSCAN
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer, StandardScaler
from sklearn.metrics import (
classification_report, confusion_matrix,
silhouette_score, homogeneity_score, completeness_score, v_measure_score,
auc, plot_roc_curve
)
from sklearn.model_selection import (
KFold, StratifiedKFold, cross_val_score,
GridSearchCV,
train_test_split
)
from xgboost import XGBClassifier
LUCKY_NUMBER = 6969
classifier = XGBClassifier(objective='multi:softprob', n_jobs=11)
parameters = {
'max_depth': range (2, 7, 1),
'n_estimators': range(60, 90, 10),
'learning_rate': [0.01, 0.05, 0.1]
}
HP_searcher = GridSearchCV(
estimator=classifier,
param_grid=parameters,
scoring='f1_macro',
cv=10,
verbose=True
)
def to_datetime(text: str):
try:
dt_format = str(parse_to_datetime(text))
dt_object = datetime.strptime(dt_format,'%Y-%m-%d %H:%M:%S')
except Exception:
return None
return dt_object
def month_to_quarter(month: int) -> int:
if 1 <= month <= 3:
return 1
elif 4 <= month <= 6:
return 2
elif 7 <= month <= 9:
return 3
elif 10 <= month <= 12:
return 4
else:
raise ValueError(f'input must be between 1 and 12')
def load_csv(filepath: str) -> pd.DataFrame:
if not os.path.isfile(filepath):
raise FileNotFoundError(f"Cannot find {filepath}")
return | pd.read_csv(filepath) | pandas.read_csv |
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize('ordered', [True, False])
@pytest.mark.parametrize('categories', [
['b', 'a', 'c'],
['a', 'b', 'c', 'd'],
])
def test_factorize(categories, ordered):
cat = pd.Categorical(['b', 'b', 'a', 'c', None],
categories=categories,
ordered=ordered)
labels, uniques = pd.factorize(cat)
expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a', 'c'],
categories=categories,
ordered=ordered)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(['b', 'b', None, 'a'])
labels, uniques = | pd.factorize(cat, sort=True) | pandas.factorize |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: | pd.Timestamp("2013-02-07 00:00:00") | pandas.Timestamp |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), | u('nom') | pandas.compat.u |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.tests.extension.base import BaseOpsUtil
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
| is_float_dtype(other) | pandas.api.types.is_float_dtype |
import json
import pandas as pd
import numpy as np
from preprocessors import preprocessors
"""
Basic model creates predictions based on score assigned to each product.
Predictions are constant in time (they are the same list containing the
best products in the store for each user).
Score metric is based on the IMDB metric.
"""
class Recommender:
def __init__(self, recommendations: list):
"""
Constructs basic Recommender based on recommendation list.
Recommendations list should contain BEST products from the whole store.
The way it is calculated is left to the provider.
:param recommendations: list containing the best products.
"""
self.recommendations = recommendations
def recommend(self, user_id: int, category: str) -> list:
"""
Generates recommendation for the user.
:param user_id: this parameter is not used.
:param category: this parameter is not used.
:return: list of products recommended to the user.
"""
return self.recommendations
def dump(self, recommendations_fp: str):
"""
Saves basic model into file.
:param recommendations_fp: file path to store recommendations list in.
"""
with (open(recommendations_fp, 'w')) as file:
json.dump(self.recommendations, file)
@staticmethod
def name():
"""
Function returns the name of the recommender.
"""
return "Basic"
################################################################
# Code below is associated with building basic Recommender. #
################################################################
def _calculate_score(rating, popularity, min_popularity, avg_rating) -> pd.Series:
return ((popularity / (popularity + min_popularity)) * rating) + (
(min_popularity / (popularity + min_popularity)) * avg_rating)
def _products_with_score(products_df: pd.DataFrame) -> pd.DataFrame:
avg_rating = products_df['user_rating'].mean()
min_popularity = np.percentile(products_df['popularity'], 80)
# Deep copy because products df is modified and returned as the result.
products = products_df[products_df['popularity'] >= min_popularity].copy(deep=True)
user_ratings = products['user_rating']
popularity = products['popularity']
products['score'] = _calculate_score(
user_ratings,
popularity,
min_popularity,
avg_rating
)
return products
def _assign_popularity_to_products(
sessions_df: pd.DataFrame,
products_df: pd.DataFrame
) -> pd.DataFrame:
popularity = sessions_df['product_id'].value_counts().rename_axis('product_id').reset_index(name='popularity')
return pd.merge(products_df, popularity, how='inner', on='product_id')
def _best_list_products(scored_products: pd.DataFrame, n: int = 10) -> list:
return scored_products.sort_values('score', ascending=False).head(n=n)["product_id"].to_list()
def build(sessions_df: pd.DataFrame, products_df: pd.DataFrame) -> Recommender:
"""
Builds basic model based on provides sessions and products DataFrames.
Sessions_df must contain columns named "product_id".
Products_df must contain columns named "user_rating" and "product_id".
:param products_df: pd.DataFrame containing products information.
:param sessions_df: pd.DataFrame containing sessions information.
:return: basic Recommender.
"""
products_with_popularity = _assign_popularity_to_products(sessions_df, products_df)
products_with_scores = _products_with_score(products_with_popularity)
recommendations = _best_list_products(products_with_scores)
return Recommender(
recommendations=recommendations
)
def from_file(recommendations_fp: str) -> Recommender:
"""
Function restores basic Recommender from file.
:param recommendations_fp: file containing recommendations for basic Recommender (created with Recommender.dump())
:return: basic Recommender constructed from data in file.
"""
with open(recommendations_fp, 'r') as file:
recommendations = json.load(file)
return Recommender(
recommendations=recommendations
)
if __name__ == "__main__":
productsDataPath = '../notebooks/data/v2/products.jsonl'
sessionsDataPath = '../notebooks/data/v2/sessions.jsonl'
sessionsDF = | pd.read_json(sessionsDataPath, lines=True) | pandas.read_json |
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
Base class template for forecaster scitype.
class name: BaseForecaster
Scitype defining methods:
fitting - fit(y, X=None, fh=None)
forecasting - predict(fh=None, X=None)
updating - update(y, X=None, update_params=True)
Convenience methods:
fit&forecast - fit_predict(y, X=None, fh=None)
update&forecast - update_predict(cv=None, X=None, update_params=True)
forecast residuals - predict_residuals(y, X=None, fh=None)
forecast scores - score(y, X=None, fh=None)
Optional, special capability methods (check capability tags if available):
forecast intervals - predict_interval(fh=None, X=None, coverage=0.90)
forecast quantiles - predict_quantiles(fh=None, X=None, alpha=[0.05, 0.95])
Inspection methods:
hyper-parameter inspection - get_params()
fitted parameter inspection - get_fitted_params()
current ForecastingHorizon - fh
State:
fitted model/strategy - by convention, any attributes ending in "_"
fitted state flag - is_fitted (property)
fitted state inspection - check_is_fitted()
"""
__author__ = ["mloning", "big-o", "fkiraly", "sveameyer13"]
__all__ = ["BaseForecaster"]
from contextlib import contextmanager
from warnings import warn
import numpy as np
import pandas as pd
from sktime.base import BaseEstimator
from sktime.datatypes import convert_to, mtype
from sktime.forecasting.base import ForecastingHorizon
from sktime.utils.datetime import _shift
from sktime.utils.validation.forecasting import check_alpha, check_cv, check_fh, check_X
from sktime.utils.validation.series import check_equal_time_index, check_series
DEFAULT_ALPHA = 0.05
class BaseForecaster(BaseEstimator):
"""Base forecaster template class.
The base forecaster specifies the methods and method
signatures that all forecasters have to implement.
Specific implementations of these methods is deferred to concrete
forecasters.
"""
# default tag values - these typically make the "safest" assumption
_tags = {
"scitype:y": "univariate", # which y are fine? univariate/multivariate/both
"ignores-exogeneous-X": True, # does estimator ignore the exogeneous X?
"capability:pred_int": False, # can the estimator produce prediction intervals?
"handles-missing-data": False, # can estimator handle missing data?
"y_inner_mtype": "pd.Series", # which types do _fit/_predict, support for y?
"X_inner_mtype": "pd.DataFrame", # which types do _fit/_predict, support for X?
"requires-fh-in-fit": True, # is forecasting horizon already required in fit?
"X-y-must-have-same-index": True, # can estimator handle different X/y index?
"enforce_index_type": None, # index type that needs to be enforced in X/y
}
def __init__(self):
self._is_fitted = False
self._y = None
self._X = None
# forecasting horizon
self._fh = None
self._cutoff = None # reference point for relative fh
self._converter_store_y = dict() # storage dictionary for in/output conversion
super(BaseForecaster, self).__init__()
def fit(self, y, X=None, fh=None):
"""Fit forecaster to training data.
State change:
Changes state to "fitted".
Writes to self:
Sets self._is_fitted flag to True.
Writes self._y and self._X with `y` and `X`, respectively.
Sets self.cutoff and self._cutoff to last index seen in `y`.
Sets fitted model attributes ending in "_".
Stores fh to self.fh if fh is passed.
Parameters
----------
y : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Time series to which to fit the forecaster.
if self.get_tag("scitype:y")=="univariate":
must have a single column/variable
if self.get_tag("scitype:y")=="multivariate":
must have 2 or more columns
if self.get_tag("scitype:y")=="both": no restrictions apply
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
if self.get_tag("requires-fh-in-fit"), must be passed, not optional
X : pd.DataFrame, or 2D np.array, optional (default=None)
Exogeneous time series to fit to
if self.get_tag("X-y-must-have-same-index"), X.index must contain y.index
Returns
-------
self : Reference to self.
"""
# if fit is called, fitted state is re-set
self._is_fitted = False
fh = self._check_fh(fh)
# check and convert X/y
X_inner, y_inner = self._check_X_y(X=X, y=y)
# set internal X/y to the new X/y
# this also updates cutoff from y
self._update_y_X(y_inner, X_inner)
# checks and conversions complete, pass to inner fit
#####################################################
self._fit(y=y_inner, X=X_inner, fh=fh)
# this should happen last
self._is_fitted = True
return self
def predict(
self,
fh=None,
X=None,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
keep_old_return_type=True,
):
"""Forecast time series at future horizon.
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
self.cutoff, self._is_fitted
Writes to self:
Stores fh to self.fh if fh is passed and has not been passed previously.
Parameters
----------
fh : int, list, np.ndarray or ForecastingHorizon
Forecasting horizon
X : pd.DataFrame, or 2D np.ndarray, optional (default=None)
Exogeneous time series to predict from
if self.get_tag("X-y-must-have-same-index"), X.index must contain fh.index
return_pred_int : bool, optional (default=False)
If True, returns prediction intervals for given alpha values.
alpha : float or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Point forecasts at fh, with same index as fh
y_pred has same type as y passed in fit (most recently)
y_pred_int : pd.DataFrame - only if return_pred_int=True
in this case, return is 2-tuple (otherwise a single y_pred)
Prediction intervals
"""
# handle inputs
self.check_is_fitted()
fh = self._check_fh(fh)
# todo deprecate NotImplementedError in v 10.0.1
if return_pred_int and not self.get_tag("capability:pred_int"):
raise NotImplementedError(
f"{self.__class__.__name__} does not have the capability to return "
"prediction intervals. Please set return_pred_int=False. If you "
"think this estimator should have the capability, please open "
"an issue on sktime."
)
# input check and conversion for X
X_inner = self._check_X(X=X)
# this is how it is supposed to be after the refactor is complete and effective
if not return_pred_int:
y_pred = self._predict(fh=fh, X=X_inner)
# convert to output mtype, identical with last y mtype seen
y_out = convert_to(
y_pred,
self._y_mtype_last_seen,
as_scitype="Series",
store=self._converter_store_y,
)
return y_out
# keep following code for downward compatibility,
# todo: can be deleted once refactor is completed and effective,
# todo: deprecate in v 10
else:
warn(
"return_pred_int in predict() will be deprecated;"
"please use predict_interval() instead to generate "
"prediction intervals.",
FutureWarning,
)
if not self._has_predict_quantiles_been_refactored():
# this means the method is not refactored
y_pred = self._predict(
self.fh,
X=X_inner,
return_pred_int=return_pred_int,
alpha=alpha,
)
# returns old return type anyways
pred_int = y_pred[1]
y_pred = y_pred[0]
else:
# it's already refactored
# opposite definition previously vs. now
if isinstance(alpha, list):
coverage = [1 - a for a in alpha]
else:
coverage = alpha
pred_int = self.predict_interval(fh=fh, X=X_inner, coverage=coverage)
if keep_old_return_type:
pred_int = self._convert_new_to_old_pred_int(pred_int, alpha)
y_pred = self._predict(
self.fh,
X=X_inner,
)
# convert to output mtype, identical with last y mtype seen
y_out = convert_to(
y_pred,
self._y_mtype_last_seen,
as_scitype="Series",
store=self._converter_store_y,
)
return (y_out, pred_int)
def fit_predict(
self, y, X=None, fh=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Fit and forecast time series at future horizon.
State change:
Changes state to "fitted".
Writes to self:
Sets is_fitted flag to True.
Writes self._y and self._X with `y` and `X`, respectively.
Sets self.cutoff and self._cutoff to last index seen in `y`.
Sets fitted model attributes ending in "_".
Stores fh to self.fh.
Parameters
----------
y : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Time series to which to fit the forecaster.
if self.get_tag("scitype:y")=="univariate":
must have a single column/variable
if self.get_tag("scitype:y")=="multivariate":
must have 2 or more columns
if self.get_tag("scitype:y")=="both": no restrictions apply
fh : int, list, np.array or ForecastingHorizon (not optional)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, or 2D np.array, optional (default=None)
Exogeneous time series to fit to and to predict from
if self.get_tag("X-y-must-have-same-index"),
X.index must contain y.index and fh.index
return_pred_int : bool, optional (default=False)
If True, returns prediction intervals for given alpha values.
alpha : float or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Point forecasts at fh, with same index as fh
y_pred has same type as y
y_pred_int : pd.DataFrame - only if return_pred_int=True
in this case, return is 2-tuple (otherwise a single y_pred)
Prediction intervals
"""
# if fit is called, fitted state is re-set
self._is_fitted = False
fh = self._check_fh(fh)
# check and convert X/y
X_inner, y_inner = self._check_X_y(X=X, y=y)
# set internal X/y to the new X/y
# this also updates cutoff from y
self._update_y_X(y_inner, X_inner)
# apply fit and then predict
self._fit(y=y_inner, X=X_inner, fh=fh)
self._is_fitted = True
# call the public predict to avoid duplicating output conversions
# input conversions are skipped since we are using X_inner
return self.predict(
fh=fh, X=X_inner, return_pred_int=return_pred_int, alpha=alpha
)
def predict_quantiles(self, fh=None, X=None, alpha=None):
"""Compute/return quantile forecasts.
If alpha is iterable, multiple quantiles will be calculated.
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
self.cutoff, self._is_fitted
Writes to self:
Stores fh to self.fh if fh is passed and has not been passed previously.
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon, default = y.index (in-sample forecast)
X : pd.DataFrame, optional (default=None)
Exogenous time series
alpha : float or list of float, optional (default=[0.05, 0.95])
A probability or list of, at which quantile forecasts are computed.
Returns
-------
quantiles : pd.DataFrame
Column has multi-index: first level is variable name from y in fit,
second level being the values of alpha passed to the function.
Row index is fh. Entries are quantile forecasts, for var in col index,
at quantile probability in second col index, for the row index.
"""
self.check_is_fitted()
# input checks
if alpha is None:
alpha = [0.05, 0.95]
fh = self._check_fh(fh)
alpha = check_alpha(alpha)
# input check and conversion for X
X_inner = self._check_X(X=X)
quantiles = self._predict_quantiles(fh=fh, X=X_inner, alpha=alpha)
return quantiles
def predict_interval(
self,
fh=None,
X=None,
coverage=0.90,
):
"""Compute/return prediction interval forecasts.
If coverage is iterable, multiple intervals will be calculated.
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
self.cutoff, self._is_fitted
Writes to self:
Stores fh to self.fh if fh is passed and has not been passed previously.
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon, default = y.index (in-sample forecast)
X : pd.DataFrame, optional (default=None)
Exogenous time series
coverage : float or list of float, optional (default=0.90)
Returns
-------
pred_int : pd.DataFrame
Column has multi-index: first level is variable name from y in fit,
second level being quantile fractions for interval low-high.
Quantile fractions are 0.5 - c/2, 0.5 + c/2 for c in coverage.
Row index is fh. Entries are quantile forecasts, for var in col index,
at quantile probability in second col index, for the row index.
"""
self.check_is_fitted()
# input checks
fh = self._check_fh(fh)
coverage = check_alpha(coverage)
# check and convert X
X_inner = self._check_X(X=X)
pred_int = self._predict_interval(fh=fh, X=X_inner, coverage=coverage)
return pred_int
def update(self, y, X=None, update_params=True):
"""Update cutoff value and, optionally, fitted parameters.
If no estimator-specific update method has been implemented,
default fall-back is as follows:
update_params=True: fitting to all observed data so far
update_params=False: updates cutoff and remembers data only
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
Pointers to seen data, self._y and self.X
self.cutoff, self._is_fitted
If update_params=True, model attributes ending in "_".
Writes to self:
Update self._y and self._X with `y` and `X`, by appending rows.
Updates self. cutoff and self._cutoff to last index seen in `y`.
If update_params=True,
updates fitted model attributes ending in "_".
Parameters
----------
y : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Time series to which to fit the forecaster.
if self.get_tag("scitype:y")=="univariate":
must have a single column/variable
if self.get_tag("scitype:y")=="multivariate":
must have 2 or more columns
if self.get_tag("scitype:y")=="both": no restrictions apply
X : pd.DataFrame, or 2D np.ndarray optional (default=None)
Exogeneous time series to fit to
if self.get_tag("X-y-must-have-same-index"), X.index must contain y.index
update_params : bool, optional (default=True)
whether model parameters should be updated
Returns
-------
self : reference to self
"""
self.check_is_fitted()
# input checks and minor coercions on X, y
X_inner, y_inner = self._check_X_y(X=X, y=y)
# update internal X/y with the new X/y
# this also updates cutoff from y
self._update_y_X(y_inner, X_inner)
# checks and conversions complete, pass to inner fit
self._update(y=y_inner, X=X_inner, update_params=update_params)
return self
def update_predict(
self,
y,
cv=None,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Make predictions and update model iteratively over the test set.
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
Pointers to seen data, self._y and self.X
self.cutoff, self._is_fitted
If update_params=True, model attributes ending in "_".
Writes to self:
Update self._y and self._X with `y` and `X`, by appending rows.
Updates self.cutoff and self._cutoff to last index seen in `y`.
If update_params=True,
updates fitted model attributes ending in "_".
Parameters
----------
y : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Time series to which to fit the forecaster.
if self.get_tag("scitype:y")=="univariate":
must have a single column/variable
if self.get_tag("scitype:y")=="multivariate":
must have 2 or more columns
if self.get_tag("scitype:y")=="both": no restrictions apply
cv : temporal cross-validation generator, optional (default=None)
X : pd.DataFrame, or 2D np.ndarray optional (default=None)
Exogeneous time series to fit to and predict from
if self.get_tag("X-y-must-have-same-index"),
X.index must contain y.index and fh.index
update_params : bool, optional (default=True)
return_pred_int : bool, optional (default=False)
alpha : int or list of ints, optional (default=None)
Returns
-------
y_pred : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Point forecasts at fh, with same index as fh
y_pred has same type as y
y_pred_int : pd.DataFrame - only if return_pred_int=True
in this case, return is 2-tuple (otherwise a single y_pred)
Prediction intervals
"""
self.check_is_fitted()
if return_pred_int and not self.get_tag("capability:pred_int"):
raise NotImplementedError(
f"{self.__class__.__name__} does not have the capability to return "
"prediction intervals. Please set return_pred_int=False. If you "
"think this estimator should have the capability, please open "
"an issue on sktime."
)
# input checks and minor coercions on X, y
X_inner, y_inner = self._check_X_y(X=X, y=y)
cv = check_cv(cv)
return self._predict_moving_cutoff(
y=y_inner,
cv=cv,
X=X_inner,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
def update_predict_single(
self,
y=None,
y_new=None,
fh=None,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Update model with new data and make forecasts.
This method is useful for updating and making forecasts in a single step.
If no estimator-specific update method has been implemented,
default fall-back is first update, then predict.
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
Pointers to seen data, self._y and self.X
self.cutoff, self._is_fitted
If update_params=True, model attributes ending in "_".
Writes to self:
Update self._y and self._X with `y` and `X`, by appending rows.
Updates self. cutoff and self._cutoff to last index seen in `y`.
If update_params=True,
updates fitted model attributes ending in "_".
Parameters
----------
y : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Target time series to which to fit the forecaster.
if self.get_tag("scitype:y")=="univariate":
must have a single column/variable
if self.get_tag("scitype:y")=="multivariate":
must have 2 or more columns
if self.get_tag("scitype:y")=="both": no restrictions apply
y_new : alias for y for downwards compatibility, pass only one of y, y_new
to be deprecated in version 0.10.0
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, or 2D np.array, optional (default=None)
Exogeneous time series to fit to and to predict from
if self.get_tag("X-y-must-have-same-index"),
X.index must contain y.index and fh.index
update_params : bool, optional (default=False)
return_pred_int : bool, optional (default=False)
If True, prediction intervals are returned in addition to point
predictions.
alpha : float or list of floats
Returns
-------
y_pred : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Point forecasts at fh, with same index as fh
y_pred has same type as y
pred_ints : pd.DataFrame
Prediction intervals
"""
# todo deprecate return_pred_int in v 0.10.1
self.check_is_fitted()
fh = self._check_fh(fh)
# handle input alias, deprecate in v 0.10.1
if y is None:
y = y_new
if y is None:
raise ValueError("y must be of Series type and cannot be None")
self.check_is_fitted()
fh = self._check_fh(fh)
# input checks and minor coercions on X, y
X_inner, y_inner = self._check_X_y(X=X, y=y)
# update internal _X/_y with the new X/y
# this also updates cutoff from y
self._update_y_X(y_inner, X_inner)
return self._update_predict_single(
y=y_inner,
fh=fh,
X=X_inner,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
def predict_residuals(self, y=None, X=None):
"""Return residuals of time series forecasts.
Residuals will be computed for forecasts at y.index.
If fh must be passed in fit, must agree with y.index.
If y is an np.ndarray, and no fh has been passed in fit,
the residuals will be computed at a fh of range(len(y.shape[0]))
State required:
Requires state to be "fitted".
If fh has been set, must correspond to index of y (pandas or integer)
Accesses in self:
Fitted model attributes ending in "_".
self.cutoff, self._is_fitted
Writes to self:
Stores y.index to self.fh if has not been passed previously.
Parameters
----------
y : pd.Series, pd.DataFrame, np.ndarray (1D or 2D), or None
Time series with ground truth observations, to compute residuals to.
Must have same type, dimension, and indices as expected return of predict.
if None, the y seen so far (self._y) are used, in particular:
if preceded by a single fit call, then in-sample residuals are produced
if fit requires fh, it must have pointed to index of y in fit
X : pd.DataFrame, or 2D np.ndarray, optional (default=None)
Exogeneous time series to predict from
if self.get_tag("X-y-must-have-same-index"), X.index must contain fh.index
Returns
-------
y_res : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Forecast residuals at fh, with same index as fh
y_pred has same type as y passed in fit (most recently)
"""
# if no y is passed, the so far observed y is used
if y is None:
y = self._y
# we want residuals, so fh must be the index of y
# if data frame: take directly from y
# to avoid issues with _set_fh, we convert to relative if self.fh is
if isinstance(y, (pd.DataFrame, pd.Series)):
fh = ForecastingHorizon(y.index, is_relative=False)
if self._fh is not None and self.fh.is_relative:
fh = fh.to_relative(self.cutoff)
fh = self._check_fh(fh)
# if np.ndarray, rows are not indexed
# so will be interpreted as range(len), or existing fh if it is stored
elif isinstance(y, np.ndarray):
if self._fh is None:
fh = range(y.shape[0])
else:
fh = self.fh
else:
raise TypeError("y must be a supported Series mtype")
y_pred = self.predict(fh=fh, X=X)
if not type(y_pred) == type(y):
raise TypeError(
"y must have same type, dims, index as expected predict return. "
f"expected type {type(y_pred)}, but found {type(y)}"
)
y_res = y - y_pred
return y_res
def score(self, y, X=None, fh=None):
"""Scores forecast against ground truth, using MAPE.
Parameters
----------
y : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Time series to score
if self.get_tag("scitype:y")=="univariate":
must have a single column/variable
if self.get_tag("scitype:y")=="multivariate":
must have 2 or more columns
if self.get_tag("scitype:y")=="both": no restrictions apply
fh : int, list, array-like or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, or 2D np.array, optional (default=None)
Exogeneous time series to score
if self.get_tag("X-y-must-have-same-index"), X.index must contain y.index
Returns
-------
score : float
sMAPE loss of self.predict(fh, X) with respect to y_test.
See Also
--------
:meth:`sktime.performance_metrics.forecasting.mean_absolute_percentage_error`
"""
# no input checks needed here, they will be performed
# in predict and loss function
# symmetric=True is default for mean_absolute_percentage_error
from sktime.performance_metrics.forecasting import (
mean_absolute_percentage_error,
)
return mean_absolute_percentage_error(y, self.predict(fh, X))
def get_fitted_params(self):
"""Get fitted parameters.
State required:
Requires state to be "fitted".
Returns
-------
fitted_params : dict
"""
raise NotImplementedError("abstract method")
def _check_X_y(self, X=None, y=None):
"""Check and coerce X/y for fit/predict/update functions.
Parameters
----------
y : pd.Series, pd.DataFrame, or np.ndarray (1D or 2D), optional (default=None)
Time series to check.
X : pd.DataFrame, or 2D np.array, optional (default=None)
Exogeneous time series.
Returns
-------
y_inner : Series compatible with self.get_tag("y_inner_mtype") format
converted/coerced version of y, mtype determined by "y_inner_mtype" tag
None if y was None
X_inner : Series compatible with self.get_tag("X_inner_mtype") format
converted/coerced version of y, mtype determined by "X_inner_mtype" tag
None if X was None
Raises
------
TypeError if y or X is not one of the permissible Series mtypes
TypeError if y is not compatible with self.get_tag("scitype:y")
if tag value is "univariate", y must be univariate
if tag value is "multivariate", y must be bi- or higher-variate
if tag vaule is "both", y can be either
TypeError if self.get_tag("X-y-must-have-same-index") is True
and the index set of X is not a super-set of the index set of y
Writes to self
--------------
_y_mtype_last_seen : str, mtype of y
_converter_store_y : dict, metadata from conversion for back-conversion
"""
# input checks and minor coercions on X, y
###########################################
enforce_univariate = self.get_tag("scitype:y") == "univariate"
enforce_multivariate = self.get_tag("scitype:y") == "multivariate"
enforce_index_type = self.get_tag("enforce_index_type")
# checking y
if y is not None:
check_y_args = {
"enforce_univariate": enforce_univariate,
"enforce_multivariate": enforce_multivariate,
"enforce_index_type": enforce_index_type,
"allow_None": False,
"allow_empty": True,
}
y = check_series(y, **check_y_args, var_name="y")
self._y_mtype_last_seen = mtype(y, as_scitype="Series")
# end checking y
# checking X
if X is not None:
X = check_series(X, enforce_index_type=enforce_index_type, var_name="X")
if self.get_tag("X-y-must-have-same-index"):
check_equal_time_index(X, y, mode="contains")
# end checking X
# convert X & y to supported inner type, if necessary
#####################################################
# retrieve supported mtypes
# convert X and y to a supported internal mtype
# it X/y mtype is already supported, no conversion takes place
# if X/y is None, then no conversion takes place (returns None)
y_inner_mtype = self.get_tag("y_inner_mtype")
y_inner = convert_to(
y,
to_type=y_inner_mtype,
as_scitype="Series", # we are dealing with series
store=self._converter_store_y,
)
X_inner_mtype = self.get_tag("X_inner_mtype")
X_inner = convert_to(
X,
to_type=X_inner_mtype,
as_scitype="Series", # we are dealing with series
)
return X_inner, y_inner
def _check_X(self, X=None):
"""Shorthand for _check_X_y with one argument X, see _check_X_y."""
return self._check_X_y(X=X)[0]
def _update_X(self, X, enforce_index_type=None):
if X is not None:
X = check_X(X, enforce_index_type=enforce_index_type)
if X is len(X) > 0:
self._X = X.combine_first(self._X)
def _update_y_X(self, y, X=None, enforce_index_type=None):
"""Update internal memory of seen training data.
Accesses in self:
_y : only if exists, then assumed same type as y and same cols
_X : only if exists, then assumed same type as X and same cols
these assumptions should be guaranteed by calls
Writes to self:
_y : same type as y - new rows from y are added to current _y
if _y does not exist, stores y as _y
_X : same type as X - new rows from X are added to current _X
if _X does not exist, stores X as _X
this is only done if X is not None
cutoff : is set to latest index seen in y
Parameters
----------
y : pd.Series, pd.DataFrame, or nd.nparray (1D or 2D)
Endogenous time series
X : pd.DataFrame or 2D np.ndarray, optional (default=None)
Exogeneous time series
"""
# we only need to modify _y if y is not None
if y is not None:
# if _y does not exist yet, initialize it with y
if not hasattr(self, "_y") or self._y is None or not self.is_fitted:
self._y = y
# otherwise, update _y with the new rows in y
# if y is np.ndarray, we assume all rows are new
elif isinstance(y, np.ndarray):
self._y = np.concatenate(self._y, y)
# if y is pandas, we use combine_first to update
elif isinstance(y, (pd.Series, pd.DataFrame)) and len(y) > 0:
self._y = y.combine_first(self._y)
# set cutoff to the end of the observation horizon
self._set_cutoff_from_y(y)
# we only need to modify _X if X is not None
if X is not None:
# if _X does not exist yet, initialize it with X
if not hasattr(self, "_X") or self._X is None or not self.is_fitted:
self._X = X
# otherwise, update _X with the new rows in X
# if X is np.ndarray, we assume all rows are new
elif isinstance(X, np.ndarray):
self._X = np.concatenate(self._X, X)
# if X is pandas, we use combine_first to update
elif isinstance(X, (pd.Series, pd.DataFrame)) and len(X) > 0:
self._X = X.combine_first(self._X)
def _get_y_pred(self, y_in_sample, y_out_sample):
"""Combine in- & out-sample prediction, slices given fh.
Parameters
----------
y_in_sample : pd.Series
In-sample prediction
y_out_sample : pd.Series
Out-sample prediction
Returns
-------
pd.Series
y_pred, sliced by fh
"""
y_pred = y_in_sample.append(y_out_sample, ignore_index=True).rename("y_pred")
y_pred = pd.DataFrame(y_pred)
# Workaround for slicing with negative index
y_pred["idx"] = [x for x in range(-len(y_in_sample), len(y_out_sample))]
y_pred = y_pred.loc[y_pred["idx"].isin(self.fh.to_indexer(self.cutoff).values)]
y_pred.index = self.fh.to_absolute(self.cutoff)
y_pred = y_pred["y_pred"].rename(None)
return y_pred
@property
def cutoff(self):
"""Cut-off = "present time" state of forecaster.
Returns
-------
cutoff : int
"""
return self._cutoff
def _set_cutoff(self, cutoff):
"""Set and update cutoff.
Parameters
----------
cutoff: pandas compatible index element
Notes
-----
Set self._cutoff is to `cutoff`.
"""
self._cutoff = cutoff
def _set_cutoff_from_y(self, y):
"""Set and update cutoff from series y.
Parameters
----------
y: pd.Series, pd.DataFrame, or np.array
Time series from which to infer the cutoff.
Notes
-----
Set self._cutoff to last index seen in `y`.
"""
y_mtype = mtype(y, as_scitype="Series")
if len(y) > 0:
if y_mtype in ["pd.Series", "pd.DataFrame"]:
self._cutoff = y.index[-1]
elif y_mtype == "np.ndarray":
self._cutoff = len(y)
else:
raise TypeError("y does not have a supported type")
@contextmanager
def _detached_cutoff(self):
"""Detached cutoff mode.
When in detached cutoff mode, the cutoff can be updated but will
be reset to the initial value after leaving the detached cutoff mode.
This is useful during rolling-cutoff forecasts when the cutoff needs
to be repeatedly reset, but afterwards should be restored to the
original value.
"""
cutoff = self.cutoff # keep initial cutoff
try:
yield
finally:
# re-set cutoff to initial value
self._set_cutoff(cutoff)
@property
def fh(self):
"""Forecasting horizon that was passed."""
# raise error if some method tries to accessed it before it has been set
if self._fh is None:
raise ValueError(
"No `fh` has been set yet, please specify `fh` " "in `fit` or `predict`"
)
return self._fh
def _check_fh(self, fh):
"""Check, set and update the forecasting horizon.
Called from all methods where fh can be passed:
fit, predict-like, update-like
Reads and writes to self._fh
Writes fh to self._fh if does not exist
Checks equality of fh with self._fh if exists, raises error if not equal
Parameters
----------
fh : None, int, list, np.ndarray or ForecastingHorizon
Returns
-------
self._fh : ForecastingHorizon or None
if ForecastingHorizon, last passed fh coerced to ForecastingHorizon
Raises
------
ValueError if self._fh exists and is inconsistent with fh
ValueError if fh is not passed (None) in a case where it must be:
- in fit, if self has the tag "requires-fh-in-fit" (value True)
- in predict, if it has not been passed in fit
"""
requires_fh = self.get_tag("requires-fh-in-fit")
msg = (
f"This is because fitting of the `"
f"{self.__class__.__name__}` "
f"depends on `fh`. "
)
# below loop treats four cases from three conditions:
# A. forecaster is fitted yes/no - self.is_fitted
# B. no fh is passed yes/no - fh is None
# C. fh is optional in fit yes/no - optfh
# B. no fh is passed
if fh is None:
# A. strategy fitted (call of predict or similar)
if self._is_fitted:
# in case C. fh is optional in fit:
# if there is none from before, there is none overall - raise error
if not requires_fh and self._fh is None:
raise ValueError(
"The forecasting horizon `fh` must be passed "
"either to `fit` or `predict`, "
"but was found in neither."
)
# in case C. fh is not optional in fit: this is fine
# any error would have already been caught in fit
# A. strategy not fitted (call of fit)
elif requires_fh:
# in case fh is not optional in fit:
# fh must be passed in fit
raise ValueError(
"The forecasting horizon `fh` must be passed to "
"`fit`, but none was found. " + msg
)
# in case C. fh is optional in fit:
# this is fine, nothing to check/raise
# B. fh is passed
else:
# If fh is passed, validate (no matter the situation)
fh = check_fh(fh)
# fh is written to self if one of the following is true
# - estimator has not been fitted yet (for safety from side effects)
# - fh has not been seen yet
# - fh has been seen, but was optional in fit,
# this means fh needs not be same and can be overwritten
if not requires_fh or not self._fh or not self._is_fitted:
self._fh = fh
# there is one error condition:
# - fh is mandatory in fit, i.e., fh in predict must be same if passed
# - fh already passed, and estimator is fitted
# - fh that was passed in fit is not the same as seen in predict
# note that elif means: optfh == False, and self._is_fitted == True
elif self._fh and not np.array_equal(fh, self._fh):
# raise error if existing fh and new one don't match
raise ValueError(
"A different forecasting horizon `fh` has been "
"provided from "
"the one seen in `fit`. If you want to change the "
"forecasting "
"horizon, please re-fit the forecaster. " + msg
)
# if existing one and new match, ignore new one
return self._fh
def _fit(self, y, X=None, fh=None):
"""Fit forecaster to training data.
core logic
Writes to self:
Sets fitted model attributes ending in "_".
Parameters
----------
y : guaranteed to be of a type in self.get_tag("y_inner_mtype")
Time series to which to fit the forecaster.
if self.get_tag("scitype:y")=="univariate":
guaranteed to have a single column/variable
if self.get_tag("scitype:y")=="multivariate":
guaranteed to have 2 or more columns
if self.get_tag("scitype:y")=="both": no restrictions apply
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : optional (default=None)
guaranteed to be of a type in self.get_tag("X_inner_mtype")
Exogeneous time series to fit to.
Returns
-------
self : returns an instance of self.
"""
raise NotImplementedError("abstract method")
def _predict(self, fh, X=None, alpha=0.95):
"""Forecast time series at future horizon.
core logic
State required:
Requires state to be "fitted".
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon
X : optional (default=None)
guaranteed to be of a type in self.get_tag("X_inner_mtype")
Exogeneous time series to predict from.
return_pred_int : bool, optional (default=False)
If True, returns prediction intervals for given alpha values.
- Will be removed in v 0.10.0
alpha : float or list, optional (default=0.95)
Returns
-------
y_pred : series of a type in self.get_tag("y_inner_mtype")
Point forecasts at fh, with same index as fh
y_pred_int : pd.DataFrame - only if return_pred_int=True
Prediction intervals - deprecate in v 0.10.1
"""
raise NotImplementedError("abstract method")
def _update(self, y, X=None, update_params=True):
"""Update time series to incremental training data.
Writes to self:
If update_params=True,
updates fitted model attributes ending in "_".
Parameters
----------
y : guaranteed to be of a type in self.get_tag("y_inner_mtype")
Time series to which to fit the forecaster.
if self.get_tag("scitype:y")=="univariate":
guaranteed to have a single column/variable
if self.get_tag("scitype:y")=="multivariate":
guaranteed to have 2 or more columns
if self.get_tag("scitype:y")=="both": no restrictions apply
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon
X : optional (default=None)
guaranteed to be of a type in self.get_tag("X_inner_mtype")
Exogeneous time series to predict from.
return_pred_int : bool, optional (default=False)
If True, returns prediction intervals for given alpha values.
alpha : float or list, optional (default=0.95)
Returns
-------
y_pred : series of a type in self.get_tag("y_inner_mtype")
Point forecasts at fh, with same index as fh
y_pred_int : pd.DataFrame - only if return_pred_int=True
Prediction intervals
"""
if update_params:
# default to re-fitting if update is not implemented
warn(
f"NotImplementedWarning: {self.__class__.__name__} "
f"does not have a custom `update` method implemented. "
f"{self.__class__.__name__} will be refit each time "
f"`update` is called."
)
# refit with updated data, not only passed data
self.fit(self._y, self._X, self.fh)
# todo: should probably be self._fit, not self.fit
# but looping to self.fit for now to avoid interface break
return self
def _update_predict_single(
self,
y,
fh,
X=None,
update_params=True,
# todo: deprecate return_pred_int in v 10.0.1
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Update forecaster and then make forecasts.
Implements default behaviour of calling update and predict
sequentially, but can be overwritten by subclasses
to implement more efficient updating algorithms when available.
"""
self.update(y, X, update_params=update_params)
return self.predict(fh, X, return_pred_int=return_pred_int, alpha=alpha)
def _predict_interval(self, fh=fh, X=None, coverage=0.95):
"""Compute/return prediction interval forecasts.
If coverage is iterable, multiple intervals will be calculated.
core logic
State required:
Requires state to be "fitted".
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon, default = y.index (in-sample forecast)
X : pd.DataFrame, optional (default=None)
Exogenous time series
alpha : float or list, optional (default=0.95)
Probability mass covered by interval or list of coverages.
Returns
-------
pred_int : pd.DataFrame
Column has multi-index: first level is variable name from y in fit,
second level being quantile fractions for interval low-high.
Quantile fractions are 0.5 - c/2, 0.5 + c/2 for c in coverage.
Row index is fh. Entries are quantile forecasts, for var in col index,
at quantile probability in second col index, for the row index.
"""
alphas = []
for c in coverage:
alphas.extend([(1 - c) / 2.0, 0.5 + (c / 2.0)])
alphas = sorted(alphas)
pred_int = self._predict_quantiles(fh=fh, X=X, alpha=alphas)
pred_int = pred_int.rename(columns={"Quantiles": "Intervals"})
return pred_int
def _predict_quantiles(self, fh, X, alpha):
"""
Compute/return prediction quantiles for a forecast.
Must be run *after* the forecaster has been fitted.
If alpha is iterable, multiple quantiles will be calculated.
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon, default = y.index (in-sample forecast)
X : pd.DataFrame, optional (default=None)
Exogenous time series
alpha : float or list of float, optional (default=[0.05, 0.95])
A probability or list of, at which quantile forecasts are computed.
Returns
-------
quantiles : pd.DataFrame
Column has multi-index: first level is variable name from y in fit,
second level being the values of alpha passed to the function.
Row index is fh. Entries are quantile forecasts, for var in col index,
at quantile probability in second col index, for the row index.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not have the capability to return "
"prediction quantiles. If you "
"think this estimator should have the capability, please open "
"an issue on sktime."
)
def _predict_moving_cutoff(
self,
y,
cv,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Make single-step or multi-step moving cutoff predictions.
Parameters
----------
y : pd.Series
cv : temporal cross-validation generator
X : pd.DataFrame
update_params : bool
return_pred_int : bool
alpha : float or array-like
Returns
-------
y_pred = pd.Series
"""
if return_pred_int:
raise NotImplementedError()
fh = cv.get_fh()
y_preds = []
cutoffs = []
# enter into a detached cutoff mode
with self._detached_cutoff():
# set cutoff to time point before data
self._set_cutoff(_shift(y.index[0], by=-1))
# iterate over data
for new_window, _ in cv.split(y):
y_new = y.iloc[new_window]
# we use `update_predict_single` here
# this updates the forecasting horizon
y_pred = self._update_predict_single(
y_new,
fh,
X,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
y_preds.append(y_pred)
cutoffs.append(self.cutoff)
return _format_moving_cutoff_predictions(y_preds, cutoffs)
# TODO: remove in v0.10.0
def _has_predict_quantiles_been_refactored(self):
"""Check if specific forecaster implements _predict_quantiles()."""
base_predict_quantiles = BaseForecaster._predict_quantiles
this_predict_quantiles = self.__class__._predict_quantiles
# true if self's _predict_quantiles is new implementation
return base_predict_quantiles != this_predict_quantiles
# TODO: remove in v0.10.0
def _convert_new_to_old_pred_int(self, pred_int_new, alpha):
name = pred_int_new.columns.get_level_values(0).unique()[0]
alpha = check_alpha(alpha)
alphas = [alpha] if isinstance(alpha, (float, int)) else alpha
pred_int_old_format = [
pd.DataFrame(
{
"lower": pred_int_new[(name, 0.5 - (float(a) / 2))],
"upper": pred_int_new[(name, 0.5 + (float(a) / 2))],
}
)
for a in alphas
]
# for a single alpha, return single pd.DataFrame
if len(alphas) == 1:
return pred_int_old_format[0]
# otherwise return list of pd.DataFrames
return pred_int_old_format
def _format_moving_cutoff_predictions(y_preds, cutoffs):
"""Format moving-cutoff predictions.
Parameters
----------
y_preds: list of pd.Series or pd.DataFrames, of length n
must have equal index and equal columns
cutoffs: iterable of cutoffs, of length n
Returns
-------
y_pred: pd.DataFrame, composed of entries of y_preds
if length of elements in y_preds is 2 or larger:
row-index = index common to the y_preds elements
col-index = (cutoff[i], y_pred.column)
entry is forecast at horizon given by row, from cutoff/variable at column
if length of elements in y_preds is 1:
row-index = forecasting horizon
col-index = y_pred.column
"""
# check that input format is correct
if not isinstance(y_preds, list):
raise ValueError(f"`y_preds` must be a list, but found: {type(y_preds)}")
if len(y_preds) == 0:
return | pd.DataFrame(columns=cutoffs) | pandas.DataFrame |
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error, mean_squared_log_error, mean_absolute_error, median_absolute_error
from scipy.integrate import odeint
from scipy.optimize import differential_evolution, minimize
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['font.size'] = 20
plt.rcParams['font.family'] = 'serif'
plt.rcParams['text.usetex'] = True
sns.set_palette(["#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2",
"#D55E00", "#CC79A7"])
ncolours = len(plt.rcParams['axes.prop_cycle'])
colours = [list(plt.rcParams['axes.prop_cycle'])[i]['color'] for i in range(ncolours)]
from tqdm.auto import tqdm
class PDEmodel:
def __init__(self, data, model, initfunc, bounds, param_names=None, nvars=1,
ndims=1, nreplicates=1, obsidx=None, outfunc=None):
'''Initialises the PDEmodel object.
Parameters
----------
data: DataFrame of data points with each entry in the form:
[timepoint, coordinates, fuction values]
model: the PDE model to fit to the data. Should accept the parameters to
estimate as its last inputs and return the time derivatives of the
functions.
initfunc: array of functions defining the initial conditions for the model.
bounds: the contraints for the parameter values to use in the estimation,
as a tuple of tuples or list of tuples.
param_names (optional, default: None): parameter names to be used in tables
and plots. If None, names appear as
"parameter 1", "parameter 2", etc.
nvars (optional, default: 1): the number of variables in the system.
ndims (optional, default: 1): the number of spatial dimensions in the system.
nreplicates (optional, default: 1): the number of measurements per time-coodinate
pair in the data.
obsidx (optional, default: None): the indices (starting from zero) of the measured
variables. If None, all outputs are used.
outfunc (optional, default: None): function to be applied to the output.
If None, raw outputs are used.
Returns
-------
The constructed object.
'''
self.model = model
self.initfunc = initfunc
self.data = data
self.bounds = bounds
self.nvars = nvars
self.spacedims = ndims
self.nreplicates = nreplicates
self.obsidx = obsidx
self.outfunc = outfunc
self.nparams = len(self.bounds)
if param_names is not None:
self.param_names = param_names
else:
self.param_names = ['parameter ' + str(i+1) for i in range(self.nparams)]
datacols = data.columns.values
alloutputs = data[datacols[1+ndims:]].values
allcoordinates = data[datacols[1:1+ndims]].values
self.timedata = np.sort(np.unique(data[datacols[0]]))
dt = self.timedata[1] - self.timedata[0]
self.time = np.concatenate((np.arange(0,self.timedata[0],dt), self.timedata))
self.timeidxs = np.array([np.argwhere(np.isclose(t, self.time))[0][0] for t in self.timedata])
if self.spacedims==1:
self.space = np.sort(np.unique(allcoordinates))
elif self.spacedims>1:
shapes = np.empty(self.spacedims).astype(int)
self.spacerange = []
grid = []
for i in range(self.spacedims):
sortedspace = np.sort(np.unique(allcoordinates[:,i]))
self.spacerange.append([np.min(sortedspace), np.max(sortedspace)])
grid.append(sortedspace)
shapes[i] = sortedspace.shape[0]
shapes = tuple(np.append(shapes, self.spacedims))
self.spacerange = np.array(self.spacerange)
self.space = np.array(np.meshgrid(*(v for v in grid))).T.reshape(shapes)
self.shapes = shapes
if self.spacedims == 0:
self.initial_condition = np.array([self.initfunc[i]() for i in range(self.nvars)])
elif self.spacedims == 1:
self.initial_condition = np.array([np.vectorize(self.initfunc[i])(self.space) for i in range(self.nvars)])
else:
self.initial_condition = np.array([np.apply_along_axis(self.initfunc[i], -1, self.space) for i in range(self.nvars)])
if self.nvars == 1:
self.initial_condition = self.initial_condition[0]
self.functiondata = alloutputs
return
def costfn(self, params, initial_condition, functiondata, bootstrap=False):
'''Integrates the model and computes the cost function
Parameters
----------
params: parameter values.
Returns
-------
error: float, the value of the chosen error (see 'fit()') for the given set of parameters.
'''
if self.spacedims == 0:
if self.nparams == 1:
ft = odeint(self.model, initial_condition, self.time, args=(params[0],))
else:
ft = odeint(self.model, initial_condition, self.time, args=tuple(params))
ft = ft[self.timeidxs]
if not bootstrap:
ft = np.repeat(ft, self.nreplicates, axis=0)
if self.outfunc is not None:
ft = np.apply_along_axis(self.outfunc, -1, ft)
elif self.obsidx is not None:
ft = ft[:, self.obsidx]
try:
error = self.error(ft, functiondata)
except:
error = np.inf
if self.sqrt:
try:
error = np.sqrt(error)
except:
error = np.inf
return error
else:
if self.spacedims > 1 or self.nvars > 1:
initial_condition = initial_condition.reshape(-1)
ft = odeint(self.model, initial_condition, self.time, args=(self.space, *params))
if self.nvars>1:
ft = ft.reshape(ft.shape[0], self.nvars, -1)
ft = np.array([np.transpose([ft[:,j,:][i] for j in range(self.nvars)]) for i in range(ft.shape[0])])
if self.spacedims > 1:
if self.nvars > 1:
ft = ft.reshape(ft.shape[0], *self.shapes[:-1], self.nvars)
else:
ft = ft.reshape(ft.shape[0], *self.shapes[:-1])
ft = ft[self.timeidxs]
if self.nvars > 1:
ft = ft.reshape(-1,self.nvars)
else:
ft = ft.reshape(-1)
if not bootstrap:
ft = np.repeat(ft, self.nreplicates, axis=0)
if self.outfunc is not None:
ft = np.apply_along_axis(self.outfunc, -1, ft)
elif self.obsidx is not None:
ft = ft[:, self.obsidx]
try:
error = self.error(ft, functiondata)
except:
error = np.inf
if self.sqrt:
try:
error = np.sqrt(error)
except:
error = np.inf
return error
def fit(self, error='mse'):
'''Finds the parameters that minimise the cost function using differential evolution.
Prints them and assigns them to the Estimator object.
Parameters
----------
error: the type of error to minimise.
- 'mse': mean squared error
- 'rmse': root mean squared error
- 'msle': mean squared logarithmic error
- 'rmsle': mean squared logarithmic error
- 'mae': mean absolute error
- 'medae': median absolute error
Returns
-------
None
'''
if error is 'rmse' or error is 'rmsle':
self.sqrt = True
else:
self.sqrt = False
if error is 'mse' or error is 'rmse':
self.error = mean_squared_error
elif error is 'msle' or error is 'rmsle':
self.error = mean_squared_log_error
elif error is 'mae':
self.error = mean_absolute_error
elif error is 'medae':
self.error = median_absolute_error
optimisation = differential_evolution(self.costfn, bounds=self.bounds, args=(self.initial_condition, self.functiondata))
params = optimisation.x
best_params = {self.param_names[i]: [params[i]] for i in range(self.nparams)}
self.best_params = | pd.DataFrame(best_params) | pandas.DataFrame |
import os
import random
import shutil
import numpy as np
import pandas as pd
import pytest
from PIL import Image
from keras_preprocessing.image import dataframe_iterator
from keras_preprocessing.image import image_data_generator
@pytest.fixture(scope='module')
def all_test_images():
img_w = img_h = 20
rgb_images = []
rgba_images = []
gray_images = []
for n in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = Image.fromarray(imarray.astype('uint8')).convert('RGB')
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 4) * variance + bias
im = Image.fromarray(imarray.astype('uint8')).convert('RGBA')
rgba_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = Image.fromarray(
imarray.astype('uint8').squeeze()).convert('L')
gray_images.append(im)
return [rgb_images, rgba_images, gray_images]
def test_dataframe_iterator(all_test_images, tmpdir):
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
filepaths = []
filenames_without = []
for test_images in all_test_images:
for im in test_images:
filename = "image-{}.png".format(count)
filename_without = "image-{}".format(count)
filenames.append(filename)
filepaths.append(os.path.join(str(tmpdir), filename))
filenames_without.append(filename_without)
im.save(str(tmpdir / filename))
count += 1
df = pd.DataFrame({
"filename": filenames,
"class": [str(random.randint(0, 1)) for _ in filenames],
"filepaths": filepaths
})
# create iterator
iterator = dataframe_iterator.DataFrameIterator(df, str(tmpdir))
batch = next(iterator)
assert len(batch) == 2
assert isinstance(batch[0], np.ndarray)
assert isinstance(batch[1], np.ndarray)
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, x_col='filepaths')
df_iterator_dir = generator.flow_from_dataframe(df, str(tmpdir))
df_sparse_iterator = generator.flow_from_dataframe(df, str(tmpdir),
class_mode="sparse")
assert not np.isnan(df_sparse_iterator.classes).any()
# check number of classes and images
assert len(df_iterator.class_indices) == num_classes
assert len(df_iterator.classes) == count
assert set(df_iterator.filenames) == set(filepaths)
assert len(df_iterator_dir.class_indices) == num_classes
assert len(df_iterator_dir.classes) == count
assert set(df_iterator_dir.filenames) == set(filenames)
# test without shuffle
_, batch_y = next(generator.flow_from_dataframe(df, str(tmpdir),
shuffle=False,
class_mode="sparse"))
assert (batch_y == df['class'].astype('float')[:len(batch_y)]).all()
# Test invalid use cases
with pytest.raises(ValueError):
generator.flow_from_dataframe(df, str(tmpdir), color_mode='cmyk')
with pytest.raises(ValueError):
generator.flow_from_dataframe(df, str(tmpdir), class_mode='output')
with pytest.warns(DeprecationWarning):
generator.flow_from_dataframe(df, str(tmpdir), has_ext=True)
with pytest.warns(DeprecationWarning):
generator.flow_from_dataframe(df, str(tmpdir), has_ext=False)
def preprocessing_function(x):
"""This will fail if not provided by a Numpy array.
Note: This is made to enforce backward compatibility.
"""
assert x.shape == (26, 26, 3)
assert type(x) is np.ndarray
return np.zeros_like(x)
# Test usage as Sequence
generator = image_data_generator.ImageDataGenerator(
preprocessing_function=preprocessing_function)
dir_seq = generator.flow_from_dataframe(df, str(tmpdir),
target_size=(26, 26),
color_mode='rgb',
batch_size=3,
class_mode='categorical')
assert len(dir_seq) == np.ceil(count / 3)
x1, y1 = dir_seq[1]
assert x1.shape == (3, 26, 26, 3)
assert y1.shape == (3, num_classes)
x1, y1 = dir_seq[5]
assert (x1 == 0).all()
with pytest.raises(ValueError):
x1, y1 = dir_seq[9]
def test_dataframe_iterator_validate_filenames(all_test_images, tmpdir):
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({"filename": filenames + ['test.jpp', 'test.jpg']})
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df,
str(tmpdir),
class_mode="input")
assert len(df_iterator.filenames) == len(df['filename']) - 2
df_iterator = generator.flow_from_dataframe(df,
str(tmpdir),
class_mode="input",
validate_filenames=False)
assert len(df_iterator.filenames) == len(df['filename'])
def test_dataframe_iterator_sample_weights(all_test_images, tmpdir):
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({"filename": filenames})
df['weight'] = ([2, 5] * len(df))[:len(df)]
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, str(tmpdir),
x_col="filename",
y_col=None,
shuffle=False,
batch_size=5,
weight_col='weight',
class_mode="input")
batch = next(df_iterator)
assert len(batch) == 3 # (x, y, weights)
# check if input and output have the same shape and they're the same
assert(batch[0].all() == batch[1].all())
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
assert input_img[0][0][0] != output_img[0][0][0]
assert np.array_equal(np.array([2, 5, 2, 5, 2]), batch[2])
# fail
df['weight'] = (['2', '5'] * len(df))[:len(df)]
with pytest.raises(TypeError):
image_data_generator.ImageDataGenerator().flow_from_dataframe(
df,
weight_col='weight',
class_mode="input"
)
def test_dataframe_iterator_class_mode_input(all_test_images, tmpdir):
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({"filename": filenames})
generator = image_data_generator.ImageDataGenerator()
df_autoencoder_iterator = generator.flow_from_dataframe(df, str(tmpdir),
x_col="filename",
y_col=None,
class_mode="input")
batch = next(df_autoencoder_iterator)
# check if input and output have the same shape and they're the same
assert np.allclose(batch[0], batch[1])
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
assert(input_img[0][0][0] != output_img[0][0][0])
df_autoencoder_iterator = generator.flow_from_dataframe(df, str(tmpdir),
x_col="filename",
y_col="class",
class_mode="input")
batch = next(df_autoencoder_iterator)
# check if input and output have the same shape and they're the same
assert(batch[0].all() == batch[1].all())
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
assert(input_img[0][0][0] != output_img[0][0][0])
def test_dataframe_iterator_class_mode_categorical_multi_label(all_test_images,
tmpdir):
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
label_opt = ['a', 'b', ['a'], ['b'], ['a', 'b'], ['b', 'a']]
df = pd.DataFrame({
"filename": filenames,
"class": [random.choice(label_opt) for _ in filenames[:-2]] + ['b', 'a']
})
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, str(tmpdir))
batch_x, batch_y = next(df_iterator)
assert isinstance(batch_x, np.ndarray)
assert len(batch_x.shape) == 4
assert isinstance(batch_y, np.ndarray)
assert batch_y.shape == (len(batch_x), 2)
for labels in batch_y:
assert all(label in {0, 1} for label in labels)
# on first 3 batches
df = pd.DataFrame({
"filename": filenames,
"class": [['b', 'a']] + ['b'] + [['c']] + [random.choice(label_opt)
for _ in filenames[:-3]]
})
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, str(tmpdir), shuffle=False)
batch_x, batch_y = next(df_iterator)
assert isinstance(batch_x, np.ndarray)
assert len(batch_x.shape) == 4
assert isinstance(batch_y, np.ndarray)
assert batch_y.shape == (len(batch_x), 3)
for labels in batch_y:
assert all(label in {0, 1} for label in labels)
assert (batch_y[0] == np.array([1, 1, 0])).all()
assert (batch_y[1] == np.array([0, 1, 0])).all()
assert (batch_y[2] == np.array([0, 0, 1])).all()
def test_dataframe_iterator_class_mode_multi_output(all_test_images, tmpdir):
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
# fit both outputs are a single number
df = | pd.DataFrame({"filename": filenames}) | pandas.DataFrame |
import plotly.plotly as py
from plotly.graph_objs import *
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from os import path
import re
class StationNetwork:
"""This class is used to visualise the station network contained as a sequence of dates in the data
on class initialisation the class will take:
- option: 0 Represents constructing data but not the graph, 1 represents constructing the data and the supporting graph and adjacency matrix
- ide: {'vs', 'pc'}. 'vs' represents visual studio, 'pc' represents pycharm
"""
def __init__(self, ide_option, option=0):
""" On initialisation of the class, all of the data will be constructed. If option 1 has been selected then the graph will be created as well"""
if ide_option in ['vs','pc']:
self.train_station_timestamp, self.train_date, self.train_numeric, self.date_cols = self.which_ide(ide_option)
self.train_station_timestamp = self.adjust_timestamp_df()
if option == 1:
self.adj = self.define_adj(self.train_station_timestamp, self.date_cols)
self.d, self.labels, self.coords_x, self.coords_y= self.create_list_of_pairs(self.date_cols[1:len(self.date_cols)])
self.Gr = self.drawGraph(self.adj, self.d, self.labels)
else:
print('Ide option does not exist, see help(class_name) for more information on input args')
def create_file_reference(self, initial_dir, file_name):
s = path.join(initial_dir, file_name)
return s
def create_training_data(self, train_input_data_path, numeric_input_data_path):
data_init = pd.read_csv(train_input_data_path,
nrows=10, dtype=float, usecols=list(range(0, 1157)))
date_cols = data_init.count().reset_index().sort_values(by=0, ascending=False)
# Initial string of the cols: e.g. L1_S32_F2343 i.e. the vector will be 'split' into 3 with i = 0,1,2
date_cols['Station'] = date_cols['index'].apply(lambda x: x.split('_')[1] if x != 'Id' else x)
date_cols = date_cols.drop_duplicates('Station', keep='first')['index'].tolist()
train_date = | pd.read_csv(train_input_data_path, usecols=date_cols, nrows=100000) | pandas.read_csv |
#!/usr/bin/env python
# coding=utf-8
"""
@version: 0.1
@author: li
@file: factor_revenue_quality.py
@time: 2019-01-28 11:33
"""
import gc, six
import sys
sys.path.append("../")
sys.path.append("../../")
sys.path.append("../../../")
import numpy as np
import pandas as pd
import json
from pandas.io.json import json_normalize
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
@six.add_metaclass(Singleton)
class FactorRevenueQuality(object):
"""
收益质量
"""
def __init__(self):
__str__ = 'factor_revenue_quality'
self.name = '财务指标'
self.factor_type1 = '财务指标'
self.factor_type2 = '收益质量'
self.description = '财务指标的二级指标, 收益质量'
@staticmethod
def NetNonOIToTP(tp_revenue_quanlity, revenue_quality, dependencies=['total_profit', 'non_operating_revenue', 'non_operating_expense']):
"""
:name: 营业外收支净额/利润总额
:desc: 营业外收支净额/利润总额
:unit:
:view_dimension: 0.01
"""
earning = tp_revenue_quanlity.loc[:, dependencies]
earning['NetNonOIToTP'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.non_operating_revenue.values +
earning.non_operating_expense.values)
/ earning.total_profit.values
)
earning = earning.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, earning, how='outer', on="security_code")
return revenue_quality
@staticmethod
def NetNonOIToTPTTM(ttm_revenue_quanlity, revenue_quality, dependencies=['total_profit', 'non_operating_revenue', 'non_operating_expense']):
"""
:name:营业外收支净额(TTM)/利润总额(TTM)
:desc: 营业外收支净额(TTM)/利润总额(TTM)
:unit:
:view_dimension: 0.01
"""
earning = ttm_revenue_quanlity.loc[:, dependencies]
earning['NetNonOIToTPTTM'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.non_operating_revenue.values +
earning.non_operating_expense.values)
/ earning.total_profit.values
)
earning = earning.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, earning, how='outer', on="security_code")
return revenue_quality
@staticmethod
def OperatingNIToTPTTM(ttm_revenue_quanlity, revenue_quality, dependencies=['total_operating_revenue', 'total_operating_cost', 'total_profit']):
"""
:name: 经营活动净收益/利润总额(TTM)
:desc: 经营活动净收益(TTM)/利润总额(TTM)*100%(注,对于非金融企业 经营活动净收益=营业总收入-营业总成本; 对于金融企业 经营活动净收益=营业收入-公允价值变动损益-投资收益-汇兑损益-营业支出 此处以非金融企业的方式计算)
:unit:
:view_dimension: 0.01
"""
earning = ttm_revenue_quanlity.loc[:, dependencies]
earning['OperatingNIToTPTTM'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.total_operating_revenue.values -
earning.total_operating_cost.values)
/ earning.total_profit.values)
earning = earning.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, earning, how='outer', on="security_code")
return revenue_quality
@staticmethod
def OperatingNIToTP(tp_revenue_quanlity, revenue_quality, dependencies=['total_operating_revenue', 'total_operating_cost', 'total_profit']):
"""
:name: 经营活动净收益/利润总额
:desc:(注,对于非金融企业 经营活动净收益=营业总收入-营业总成本; 对于金融企业 经营活动净收益=营业收入-公允价值变动损益-投资收益-汇兑损益-营业支出 此处以非金融企业的方式计算)
:unit:
:view_dimension: 0.01
"""
earning = tp_revenue_quanlity.loc[:, dependencies]
earning['OperatingNIToTP'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.total_operating_revenue.values -
earning.total_operating_cost.values)
/ earning.total_profit.values)
earning = earning.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, earning, how='outer', on="security_code")
return revenue_quality
@staticmethod
def OptCFToCurrLiabilityTTM(ttm_revenue_quanlity, revenue_quality, dependencies=['net_operate_cash_flow_indirect', 'total_current_liability']):
"""
:name: 经营活动产生的现金流量净额(TTM)/流动负债(TTM)
:desc: 经营活动产生的现金流量净额(TTM)/流动负债(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_revenue_quanlity.loc[:, dependencies]
cash_flow['OptCFToCurrLiabilityTTM'] = np.where(
CalcTools.is_zero(cash_flow.total_current_liability.values), 0,
cash_flow.net_operate_cash_flow_indirect.values / cash_flow.total_current_liability.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, cash_flow, how='outer', on="security_code")
return revenue_quality
@staticmethod
def NetInToTPTTM(ttm_revenue_quanlity, revenue_quality, dependencies=['fair_value_variable_income',
'total_profit']):
"""
:name: 价值变动净收益/利润总额(TTM)
:desc: 价值变动净收益(TTM)/利润总额(TTM)(使用的是公允价值变动收益)
:unit:
:view_dimension: 0.01
"""
historical_value = ttm_revenue_quanlity.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] != 0 and x[1] is not None and x[0] is not None else None
historical_value['NetInToTPTTM'] = historical_value.apply(func, axis=1)
historical_value = historical_value.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, historical_value, how='outer', on='security_code')
return revenue_quality
@staticmethod
def OPToTPTTM(ttm_revenue_quanlity, revenue_quality,
dependencies=['operating_profit', 'total_profit']):
"""
:name: 营业利润/利润总额(TTM)
:desc: 营业利润(TTM)/利润总额(TTM)
:unit:
:view_dimension: 0.01
"""
historical_value = ttm_revenue_quanlity.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] is not None and x[1] != 0 else None
historical_value['OPToTPTTM'] = historical_value.apply(func, axis=1)
historical_value = historical_value.drop(dependencies, axis=1)
revenue_quality = | pd.merge(revenue_quality, historical_value, how='outer', on='security_code') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import nose
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("The elements of 'usecols' must "
"either be all strings, all unicode, or all integers")
usecols = [0, 'b', 2]
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# see gh-5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_usecols_index_col_False(self):
# see gh-9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_usecols_index_col_conflict(self):
# see gh-4201: test that index_col as integer reflects usecols
data = 'a,b,c,d\nA,a,1,one\nB,b,2,two'
expected = DataFrame({'c': [1, 2]}, index=Index(
['a', 'b'], name='b'))
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col=0)
| tm.assert_frame_equal(expected, df) | pandas.util.testing.assert_frame_equal |
"""
Utility functions for Jupyter notebook to:
- format data
- transform pandas data structures
- compute common stats
These functions are used for both interactive data exploration and to implement
more complex pipelines. The output is reported through logging.
"""
import datetime
import logging
import math
from typing import (
Any,
Callable,
Collection,
Dict,
List,
Optional,
Tuple,
Union,
cast,
)
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
import statsmodels
import statsmodels.api
import tqdm.autonotebook as tauton
import core.plotting as cplott
import helpers.dbg as dbg
import helpers.list as hlist
import helpers.printing as hprint
_LOG = logging.getLogger(__name__)
# #############################################################################
# Helpers.
# #############################################################################
# TODO(gp): Move this to helpers/pandas_helpers.py
def cast_to_df(obj: Union[pd.Series, pd.DataFrame]) -> pd.DataFrame:
"""
Convert a pandas object into a pd.DataFrame.
"""
if isinstance(obj, pd.Series):
df = pd.DataFrame(obj)
else:
df = obj
dbg.dassert_isinstance(df, pd.DataFrame)
return df
def cast_to_series(obj: Union[pd.Series, pd.DataFrame]) -> pd.Series:
"""
Convert a pandas object into a pd.Series.
"""
if isinstance(obj, pd.DataFrame):
dbg.dassert_eq(obj.shape[1], 1)
srs = obj.iloc[:, 1]
else:
srs = obj
dbg.dassert_isinstance(srs, pd.Series)
return srs
# TODO(gp): Need to be tested.
def adapt_to_series(f: Callable) -> Callable:
"""
Extend a function working on dataframes so that it can work on series.
"""
def wrapper(
obj: Union[pd.Series, pd.DataFrame], *args: Any, **kwargs: Any
) -> Any:
# Convert a pd.Series to a pd.DataFrame.
was_series = False
if isinstance(obj, pd.Series):
obj = pd.DataFrame(obj)
was_series = True
dbg.dassert_isinstance(obj, pd.DataFrame)
# Apply the function.
res = f(obj, *args, **kwargs)
# Transform the output, if needed.
if was_series:
if isinstance(res, tuple):
res_obj, res_tmp = res[0], res[1:]
res_obj_srs = cast_to_series(res_obj)
res_obj_srs = [res_obj_srs]
res_obj_srs.extend(res_tmp)
res = tuple(res_obj_srs)
else:
res = cast_to_series(res)
return res
return wrapper
# #############################################################################
# Pandas helpers.
# #############################################################################
def drop_axis_with_all_nans(
df: pd.DataFrame,
drop_rows: bool = True,
drop_columns: bool = False,
drop_infs: bool = False,
report_stats: bool = False,
) -> pd.DataFrame:
"""
Remove columns and rows not containing information (e.g., with only nans).
The operation is not performed in place and the resulting df is returned.
Assume that the index is timestamps.
:param df: data frame to process
:param drop_rows: remove rows with only nans
:param drop_columns: remove columns with only nans
:param drop_infs: remove also +/- np.inf
:param report_stats: report the stats of the operations
"""
dbg.dassert_isinstance(df, pd.DataFrame)
if drop_infs:
df = df.replace([np.inf, -np.inf], np.nan)
if drop_columns:
# Remove columns with all nans, if any.
cols_before = df.columns[:]
df = df.dropna(axis=1, how="all")
if report_stats:
# Report results.
cols_after = df.columns[:]
removed_cols = set(cols_before).difference(set(cols_after))
pct_removed = hprint.perc(
len(cols_before) - len(cols_after), len(cols_after)
)
_LOG.info(
"removed cols with all nans: %s %s",
pct_removed,
hprint.list_to_str(removed_cols),
)
if drop_rows:
# Remove rows with all nans, if any.
rows_before = df.index[:]
df = df.dropna(axis=0, how="all")
if report_stats:
# Report results.
rows_after = df.index[:]
removed_rows = set(rows_before).difference(set(rows_after))
if len(rows_before) == len(rows_after):
# Nothing was removed.
min_ts = max_ts = None
else:
# TODO(gp): Report as intervals of dates.
min_ts = min(removed_rows)
max_ts = max(removed_rows)
pct_removed = hprint.perc(
len(rows_before) - len(rows_after), len(rows_after)
)
_LOG.info(
"removed rows with all nans: %s [%s, %s]",
pct_removed,
min_ts,
max_ts,
)
return df
def drop_na(
df: pd.DataFrame,
drop_infs: bool = False,
report_stats: bool = False,
*args: Any,
**kwargs: Any,
) -> pd.DataFrame:
"""
Wrapper around pd.dropna() reporting information about the removed rows.
"""
dbg.dassert_isinstance(df, pd.DataFrame)
num_rows_before = df.shape[0]
if drop_infs:
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna(*args, **kwargs)
if report_stats:
num_rows_after = df.shape[0]
pct_removed = hprint.perc(
num_rows_before - num_rows_after, num_rows_before
)
_LOG.info("removed rows with nans: %s", pct_removed)
return df
def report_zero_nan_inf_stats(
df: pd.DataFrame,
zero_threshold: float = 1e-9,
verbose: bool = False,
as_txt: bool = False,
) -> pd.DataFrame:
"""
Report count and percentage about zeros, nans, infs for a df.
"""
df = cast_to_df(df)
_LOG.info("index in [%s, %s]", df.index.min(), df.index.max())
#
num_rows = df.shape[0]
_LOG.info("num_rows=%s", hprint.thousand_separator(num_rows))
_LOG.info("data=")
display_df(df, max_lines=5, as_txt=as_txt)
#
num_days = len(set(df.index.date))
_LOG.info("num_days=%s", num_days)
#
num_weekdays = len(set(d for d in df.index.date if d.weekday() < 5))
_LOG.info("num_weekdays=%s", num_weekdays)
#
stats_df = pd.DataFrame(None, index=df.columns)
if False:
# Find the index of the first non-nan value.
df = df.applymap(lambda x: not np.isnan(x))
min_idx = df.idxmax(axis=0)
min_idx.name = "min_idx"
# Find the index of the last non-nan value.
max_idx = df.reindex(index=df.index[::-1]).idxmax(axis=0)
max_idx.name = "max_idx"
stats_df["num_rows"] = num_rows
#
num_zeros = (np.abs(df) < zero_threshold).sum(axis=0)
if verbose:
stats_df["num_zeros"] = num_zeros
stats_df["zeros [%]"] = (100.0 * num_zeros / num_rows).apply(
hprint.round_digits
)
#
num_nans = np.isnan(df).sum(axis=0)
if verbose:
stats_df["num_nans"] = num_nans
stats_df["nans [%]"] = (100.0 * num_nans / num_rows).apply(
hprint.round_digits
)
#
num_infs = np.isinf(df).sum(axis=0)
if verbose:
stats_df["num_infs"] = num_infs
stats_df["infs [%]"] = (100.0 * num_infs / num_rows).apply(
hprint.round_digits
)
#
num_valid = df.shape[0] - num_zeros - num_nans - num_infs
if verbose:
stats_df["num_valid"] = num_valid
stats_df["valid [%]"] = (100.0 * num_valid / num_rows).apply(
hprint.round_digits
)
#
display_df(stats_df, as_txt=as_txt)
return stats_df
def drop_duplicates(
df: pd.DataFrame, subset: Optional[List[str]] = None
) -> pd.DataFrame:
"""
Wrapper around pd.drop_duplicates() reporting information about theremoved
rows.
:df: Df to drop duplicates from.
:subset: Columns subset.
:return: Df without duplicates inside given columns subset.
"""
if not subset:
subset = df.columns
num_rows_before = df.shape[0]
df_no_duplicates = df.drop_duplicates(subset=subset)
num_rows_after = df_no_duplicates.shape[0]
pct_removed = hprint.perc(num_rows_before - num_rows_after, num_rows_before)
_LOG.info("Removed duplicated rows: %s", pct_removed)
return df_no_duplicates
# #############################################################################
# Column variability.
# #############################################################################
def _get_unique_elements_in_column(df: pd.DataFrame, col_name: str) -> List[Any]:
try:
vals = df[col_name].unique()
except TypeError:
# TypeError: unhashable type: 'list'
_LOG.error("Column '%s' has unhashable types", col_name)
vals = list(set(map(str, df[col_name])))
cast(List[Any], vals)
return vals
def _get_variable_cols(
df: pd.DataFrame, threshold: int = 1
) -> Tuple[List[str], List[str]]:
"""
Return columns of a df that contain less than <threshold> unique values.
:return: (variable columns, constant columns)
"""
var_cols = []
const_cols = []
for col_name in df.columns:
unique_elems = _get_unique_elements_in_column(df, col_name)
num_unique_elems = len(unique_elems)
if num_unique_elems <= threshold:
const_cols.append(col_name)
else:
var_cols.append(col_name)
return var_cols, const_cols
def remove_columns_with_low_variability(
df: pd.DataFrame, threshold: int = 1, log_level: int = logging.DEBUG
) -> pd.DataFrame:
"""
Remove columns of a df that contain less than <threshold> unique values.
:return: df with only columns with sufficient variability
"""
var_cols, const_cols = _get_variable_cols(df, threshold=threshold)
_LOG.log(log_level, "# Constant cols")
for col_name in const_cols:
unique_elems = _get_unique_elements_in_column(df, col_name)
_LOG.log(
log_level,
" %s: %s",
col_name,
hprint.list_to_str(list(map(str, unique_elems))),
)
_LOG.log(log_level, "# Var cols")
_LOG.log(log_level, hprint.list_to_str(var_cols))
return df[var_cols]
def print_column_variability(
df: pd.DataFrame,
max_num_vals: int = 3,
num_digits: int = 2,
use_thousands_separator: bool = True,
) -> pd.DataFrame:
"""
Print statistics about the values in each column of a data frame.
This is useful to get a sense of which columns are interesting.
"""
print(("# df.columns=%s" % hprint.list_to_str(df.columns)))
res = []
for c in tauton.tqdm(df.columns, desc="Computing column variability"):
vals = _get_unique_elements_in_column(df, c)
try:
min_val = min(vals)
except TypeError as e:
_LOG.debug("Column='%s' reported %s", c, e)
min_val = "nan"
try:
max_val = max(vals)
except TypeError as e:
_LOG.debug("Column='%s' reported %s", c, e)
max_val = "nan"
if len(vals) <= max_num_vals:
txt = ", ".join(map(str, vals))
else:
txt = ", ".join(map(str, [min_val, "...", max_val]))
row = ["%20s" % c, len(vals), txt]
res.append(row)
res = pd.DataFrame(res, columns=["col_name", "num", "elems"])
res.sort_values("num", inplace=True)
# TODO(gp): Fix this.
# res = add_count_as_idx(res)
res = add_pct(
res,
"num",
df.shape[0],
"[diff %]",
num_digits=num_digits,
use_thousands_separator=use_thousands_separator,
)
res.reset_index(drop=True, inplace=True)
return res
def add_pct(
df: pd.DataFrame,
col_name: str,
total: int,
dst_col_name: str,
num_digits: int = 2,
use_thousands_separator: bool = True,
) -> pd.DataFrame:
"""
Add to df a column "dst_col_name" storing the percentage of values in
column "col_name" with respect to "total". The rest of the parameters are
the same as hprint.round_digits().
:return: updated df
"""
# Add column with percentage right after col_name.
pos_col_name = df.columns.tolist().index(col_name)
df.insert(pos_col_name + 1, dst_col_name, (100.0 * df[col_name]) / total)
# Format.
df[col_name] = [
hprint.round_digits(
v, num_digits=None, use_thousands_separator=use_thousands_separator
)
for v in df[col_name]
]
df[dst_col_name] = [
hprint.round_digits(
v, num_digits=num_digits, use_thousands_separator=False
)
for v in df[dst_col_name]
]
return df
# #############################################################################
# Pandas data structure stats.
# #############################################################################
# TODO(gp): Explain what this is supposed to do.
def breakdown_table(
df: pd.DataFrame,
col_name: str,
num_digits: int = 2,
use_thousands_separator: bool = True,
verbosity: bool = False,
) -> pd.DataFrame:
if isinstance(col_name, list):
for c in col_name:
print(("\n" + hprint.frame(c).rstrip("\n")))
res = breakdown_table(df, c)
print(res)
return None
#
if verbosity:
print(("# col_name=%s" % col_name))
first_col_name = df.columns[0]
res = df.groupby(col_name)[first_col_name].count()
res = pd.DataFrame(res)
res.columns = ["count"]
res.sort_values(["count"], ascending=False, inplace=True)
res = res.append(
pd.DataFrame([df.shape[0]], index=["Total"], columns=["count"])
)
res["pct"] = (100.0 * res["count"]) / df.shape[0]
# Format.
res["count"] = [
hprint.round_digits(
v, num_digits=None, use_thousands_separator=use_thousands_separator
)
for v in res["count"]
]
res["pct"] = [
hprint.round_digits(
v, num_digits=num_digits, use_thousands_separator=False
)
for v in res["pct"]
]
if verbosity:
for k, df_tmp in df.groupby(col_name):
print((hprint.frame("%s=%s" % (col_name, k))))
cols = [col_name, "description"]
with pd.option_context(
"display.max_colwidth", 100000, "display.width", 130
):
print((df_tmp[cols]))
return res
def find_common_columns(
names: List[str], dfs: List[pd.DataFrame]
) -> pd.DataFrame:
df = []
for i, df1 in enumerate(dfs):
df1 = dfs[i].columns
for j in range(i + 1, len(dfs)):
df2 = dfs[j].columns
common_cols = [c for c in df1 if c in df2]
df.append(
(
names[i],
len(df1),
names[j],
len(df2),
len(common_cols),
", ".join(common_cols),
)
)
df = pd.DataFrame(
df,
columns=[
"table1",
"num_cols1",
"num_cols2",
"table2",
"num_comm_cols",
"common_cols",
],
)
return df
# #############################################################################
# Filter.
# #############################################################################
def remove_columns(
df: pd.DataFrame, cols: Collection[str], log_level: int = logging.DEBUG
) -> pd.DataFrame:
to_remove = set(cols).intersection(set(df.columns))
_LOG.log(log_level, "to_remove=%s", hprint.list_to_str(to_remove))
df.drop(to_remove, axis=1, inplace=True)
_LOG.debug("df=\n%s", df.head(3))
_LOG.log(log_level, hprint.list_to_str(df.columns))
return df
def filter_with_df(
df: pd.DataFrame, filter_df: pd.DataFrame, log_level: int = logging.DEBUG
) -> pd.Series:
"""
Compute a mask for DataFrame df using common columns and values in
"filter_df".
"""
mask = None
for c in filter_df:
dbg.dassert_in(c, df.columns)
vals = filter_df[c].unique()
if mask is None:
mask = df[c].isin(vals)
else:
mask &= df[c].isin(vals)
mask: pd.DataFrame
_LOG.log(log_level, "after filter=%s", hprint.perc(mask.sum(), len(mask)))
return mask
def filter_around_time(
df: pd.DataFrame,
col_name: str,
timestamp: Union[datetime.datetime, pd.Timestamp],
timedelta_before: pd.Timedelta,
timedelta_after: Optional[pd.Timedelta] = None,
log_level: int = logging.DEBUG,
) -> pd.DataFrame:
dbg.dassert_in(col_name, df)
dbg.dassert_lte(pd.Timedelta(0), timedelta_before)
if timedelta_after is None:
timedelta_after = timedelta_before
dbg.dassert_lte(pd.Timedelta(0), timedelta_after)
#
lower_bound = timestamp - timedelta_before
upper_bound = timestamp + timedelta_after
mask = (df[col_name] >= lower_bound) & (df[col_name] <= upper_bound)
#
_LOG.log(
log_level,
"Filtering in [%s, %s] selected rows=%s",
lower_bound,
upper_bound,
hprint.perc(mask.sum(), df.shape[0]),
)
return df[mask]
def filter_by_val(
df: pd.DataFrame,
col_name: str,
min_val: float,
max_val: float,
use_thousands_separator: bool = True,
log_level: int = logging.DEBUG,
) -> pd.DataFrame:
"""
Filter out rows of df where df[col_name] is not in [min_val, max_val].
"""
# TODO(gp): If column is ordered, this can be done more efficiently with
# binary search.
num_rows = df.shape[0]
if min_val is not None and max_val is not None:
dbg.dassert_lte(min_val, max_val)
mask = None
if min_val is not None:
mask = min_val <= df[col_name]
if max_val is not None:
mask2 = df[col_name] <= max_val
if mask is None:
mask = mask2
else:
mask &= mask2
res = df[mask]
dbg.dassert_lt(0, res.shape[0])
_LOG.log(
log_level,
"Rows kept %s, removed %s rows",
hprint.perc(
res.shape[0],
num_rows,
use_thousands_separator=use_thousands_separator,
),
hprint.perc(
num_rows - res.shape[0],
num_rows,
use_thousands_separator=use_thousands_separator,
),
)
return res
# #############################################################################
# PCA
# #############################################################################
def _get_num_pcs_to_plot(num_pcs_to_plot: int, max_pcs: int) -> int:
"""
Get the number of principal components to cplott.
"""
if num_pcs_to_plot == -1:
num_pcs_to_plot = max_pcs
dbg.dassert_lte(0, num_pcs_to_plot)
dbg.dassert_lte(num_pcs_to_plot, max_pcs)
return num_pcs_to_plot
# TODO(gp): Add some stats about how many nans where filled.
def handle_nans(df: pd.DataFrame, nan_mode: str) -> pd.DataFrame:
if nan_mode == "drop":
df = df.dropna(how="any")
elif nan_mode == "fill_with_zero":
df = df.fillna(0.0)
elif nan_mode == "abort":
num_nans = np.isnan(df).sum().sum()
if num_nans > 0:
raise ValueError("df has %d nans\n%s" % (num_nans, df))
else:
raise ValueError("Invalid nan_mode='%s'" % nan_mode)
return df
def sample_rolling_df(
rolling_df: pd.DataFrame, periods: int
) -> Tuple[pd.DataFrame, pd.DatetimeIndex]:
"""
Given a rolling metric stored as multiindex (e.g., correlation computed by
pd.ewm) sample `periods` equispaced samples.
:return: sampled df, array of timestamps selected
"""
timestamps = rolling_df.index.get_level_values(0)
ts = timestamps[:: math.ceil(len(timestamps) / periods)]
_LOG.debug("timestamps=%s", str(ts))
# rolling_df_out = rolling_df.unstack().reindex(ts).stack(dropna=False)
rolling_df_out = rolling_df.loc[ts]
return rolling_df_out, ts
# NOTE:
# - DRY: We have a rolling corr function elsewhere.
# - Functional style: This one seems to be able to modify `ret` through
# `nan_mode`.
def rolling_corr_over_time(
df: pd.DataFrame, com: float, nan_mode: str
) -> pd.DataFrame:
"""
Compute rolling correlation over time.
:return: corr_df is a multi-index df storing correlation matrices with
labels
"""
dbg.dassert_strictly_increasing_index(df)
df = handle_nans(df, nan_mode)
corr_df = df.ewm(com=com, min_periods=3 * com).corr()
return corr_df
def _get_eigvals_eigvecs(
df: pd.DataFrame, dt: datetime.date, sort_eigvals: bool
) -> Tuple[np.array, np.array]:
dbg.dassert_isinstance(dt, datetime.date)
df_tmp = df.loc[dt].copy()
# Compute rolling eigenvalues and eigenvectors.
# TODO(gp): Count and report inf and nans as warning.
df_tmp.replace([np.inf, -np.inf], np.nan, inplace=True)
df_tmp.fillna(0.0, inplace=True)
eigval, eigvec = np.linalg.eigh(df_tmp)
# Sort eigenvalues, if needed.
if not (sorted(eigval) == eigval).all():
_LOG.debug("eigvals not sorted: %s", eigval)
if sort_eigvals:
_LOG.debug(
"Before sorting:\neigval=\n%s\neigvec=\n%s", eigval, eigvec
)
_LOG.debug("eigvals: %s", eigval)
idx = eigval.argsort()[::-1]
eigval = eigval[idx]
eigvec = eigvec[:, idx]
_LOG.debug("After sorting:\neigval=\n%s\neigvec=\n%s", eigval, eigvec)
#
if (eigval == 0).all():
eigvec = np.nan * eigvec
return eigval, eigvec
def rolling_pca_over_time(
df: pd.DataFrame, com: float, nan_mode: str, sort_eigvals: bool = True
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Compute rolling PCAs over time.
:param sort_eigvals: sort the eigenvalues in descending orders
:return:
- eigval_df stores eigenvalues for the different components indexed by
timestamps
- eigvec_df stores eigenvectors as multiindex df
"""
# Compute rolling correlation.
corr_df = rolling_corr_over_time(df, com, nan_mode)
# Compute eigvalues and eigenvectors.
timestamps = corr_df.index.get_level_values(0).unique()
eigval = np.zeros((timestamps.shape[0], df.shape[1]))
eigvec = np.zeros((timestamps.shape[0], df.shape[1], df.shape[1]))
for i, dt in tauton.tqdm(
enumerate(timestamps),
total=timestamps.shape[0],
desc="Computing rolling PCA",
):
eigval[i], eigvec[i] = _get_eigvals_eigvecs(corr_df, dt, sort_eigvals)
# Package results.
eigval_df = pd.DataFrame(eigval, index=timestamps)
dbg.dassert_eq(eigval_df.shape[0], len(timestamps))
dbg.dassert_strictly_increasing_index(eigval_df)
# Normalize by sum.
# TODO(gp): Move this up.
eigval_df = eigval_df.multiply(1 / eigval_df.sum(axis=1), axis="index")
#
# pylint ref: github.com/PyCQA/pylint/issues/3139
eigvec = eigvec.reshape(
(-1, eigvec.shape[-1])
) # pylint: disable=unsubscriptable-object
idx = pd.MultiIndex.from_product(
[timestamps, df.columns], names=["datetime", None]
)
eigvec_df = pd.DataFrame(
eigvec, index=idx, columns=range(df.shape[1])
) # pylint: disable=unsubscriptable-object
dbg.dassert_eq(
len(eigvec_df.index.get_level_values(0).unique()), len(timestamps)
)
return corr_df, eigval_df, eigvec_df
def plot_pca_over_time(
eigval_df: pd.DataFrame,
eigvec_df: pd.DataFrame,
num_pcs_to_plot: int = 0,
num_cols: int = 2,
) -> None:
"""
Similar to plot_pca_analysis() but over time.
"""
# Plot eigenvalues.
eigval_df.plot(title="Eigenvalues over time", ylim=(0, 1))
# Plot cumulative variance.
eigval_df.cumsum(axis=1).plot(
title="Fraction of variance explained by top PCs over time", ylim=(0, 1)
)
# Plot eigenvalues.
max_pcs = eigvec_df.shape[1]
num_pcs_to_plot = _get_num_pcs_to_plot(num_pcs_to_plot, max_pcs)
_LOG.info("num_pcs_to_plot=%s", num_pcs_to_plot)
if num_pcs_to_plot > 0:
_, axes = cplott.get_multiple_plots(
num_pcs_to_plot,
num_cols=num_cols,
y_scale=4,
sharex=True,
sharey=True,
)
for i in range(num_pcs_to_plot):
eigvec_df[i].unstack(1).plot(
ax=axes[i], ylim=(-1, 1), title="PC%s" % i
)
def plot_time_distributions(
dts: List[Union[datetime.datetime, pd.Timestamp]],
mode: str,
density: bool = True,
) -> mpl.axes.Axes:
"""
Compute distribution for an array of timestamps `dts`.
- mode: see below
"""
dbg.dassert_type_in(dts[0], (datetime.datetime, pd.Timestamp))
dbg.dassert_in(
mode,
(
"time_of_the_day",
"weekday",
"minute_of_the_hour",
"day_of_the_month",
"month_of_the_year",
"year",
),
)
if mode == "time_of_the_day":
# Convert in minutes from the beginning of the day.
data = [dt.time() for dt in dts]
data = [t.hour * 60 + t.minute for t in data]
# 1 hour bucket.
step = 60
bins = np.arange(0, 24 * 60 + step, step)
vals = pd.cut(
data,
bins=bins,
include_lowest=True,
right=False,
retbins=False,
labels=False,
)
# Count.
count = pd.Series(vals).value_counts(sort=False)
# Compute the labels.
yticks = ["%02d:%02d" % (bins[k] / 60, bins[k] % 60) for k in count.index]
elif mode == "weekday":
data = [dt.date().weekday() for dt in dts]
bins = np.arange(0, 7 + 1)
vals = pd.cut(
data,
bins=bins,
include_lowest=True,
right=False,
retbins=False,
labels=False,
)
# Count.
count = pd.Series(vals).value_counts(sort=False)
# Compute the labels.
yticks = "Mon Tue Wed Thu Fri Sat Sun".split()
elif mode == "minute_of_the_hour":
vals = [dt.time().minute for dt in dts]
# Count.
count = pd.Series(vals).value_counts(sort=False)
# Compute the labels.
yticks = list(map(str, list(range(1, 60 + 1))))
elif mode == "day_of_the_month":
vals = [dt.date().day for dt in dts]
# Count.
count = pd.Series(vals).value_counts(sort=False)
# Compute the labels.
yticks = list(map(str, list(range(1, 31 + 1))))
elif mode == "month_of_the_year":
vals = [dt.date().month for dt in dts]
# Count.
count = pd.Series(vals).value_counts(sort=False)
# Compute the labels.
yticks = "Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split()
elif mode == "year":
vals = [dt.date().year for dt in dts]
# Count.
count = | pd.Series(vals) | pandas.Series |
# IPython log file
import pickle
with open('sim-results.pickle', 'rb') as fin:
s = pickle.load(fin)
len(s)
next(s.keys())
next(iter(s.keys()))
plt.plot(s[(160000, 0.2, 0)])
from matplotlib import pyplot as plt
plt.plot(s[(160000, 0.2, 0)])
plt.plot(np.cumsum(s[(160000, 0.2, 0)]))
import numpy as np
plt.plot(np.cumsum(s[(160000, 0.2, 0)]))
worst = (160000, 0.2)
worst_loads = [s[worst + (rep,)] for rep in range(5)]
worst_total = [np.cumsum(load)[-1] for load in worst_loads]
worst_total
np.mean(worst_total)
np.std(worst_total)
def multiplier(loads):
return 2 * np.sum(loads) / len(loads)
import pandas as pd
df_source = [[nrow, q, np.sum(load), np.sum(load) / nrow]
for (nrow, q, i), load in s.items()]
get_ipython().set_next_input('data = pd.DataFrame');get_ipython().magic('pinfo pd.DataFrame')
data = | pd.DataFrame(df_source, columns=['nrows', 'q', 'total size', 'load factor']) | pandas.DataFrame |
import pandas as pd
def format(mode, x, y):
if mode == 'df':
x = pd.DataFrame(x)
y = | pd.DataFrame(y) | pandas.DataFrame |
################################################################################
#
# Collect statistics from the dataset
#
# Author(s): <NAME>
################################################################################
import pathlib
from collections import defaultdict
import click
import webdataset as wds
import pandas as pd
from tqdm import tqdm
from src.data.modules.wds_util import _find_shard_paths
################################################################################
# main function
@click.command()
@click.option("--shard_folder", type=pathlib.Path, required=True)
def main(shard_folder: pathlib.Path):
shard_list = _find_shard_paths(shard_folder, "*.tar*")
print(shard_list)
ds = wds.WebDataset(urls=shard_list)
if not isinstance(ds, wds.Processor):
raise ValueError("init of webdataset failed")
ds = ds.decode(wds.torch_audio)
data = defaultdict(lambda: defaultdict(list))
audio_length_seconds_count = []
for x in tqdm(ds):
speaker_id = x["json"]["speaker_id"]
youtube_id = x["json"]["youtube_id"]
utterance_id = x["json"]["utterance_id"]
utterance_length_seconds = x["json"]["num_frames"] / x["json"]["sampling_rate"]
data[speaker_id][youtube_id].append(utterance_id)
audio_length_seconds_count.append(utterance_length_seconds)
print("num files")
print(len(audio_length_seconds_count))
print("num speakers")
print(len(data))
print("sessions")
sessions_per_speaker = [len(x) for x in data.values()]
print("total:", sum(sessions_per_speaker))
print(pd.Series(sessions_per_speaker).describe())
print("utterances")
sessions_collection = []
for sessions in data.values():
for session in sessions.values():
sessions_collection.append(session)
utterances_per_session = [len(s) for s in sessions_collection]
print("total:", sum(utterances_per_session))
print(pd.Series(utterances_per_session).describe())
print("audio length")
print( | pd.Series(audio_length_seconds_count) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 6 11:33:59 2017
解析天软数据格式
@author: ws
"""
import pandas as pd
_max_iter_stocks = 100
def _int2date(int_date):
if int_date < 10000000:
return pd.NaT
return pd.datetime(int_date//10000, int_date%10000//100, int_date%100)
def parseByStock(TSData, date_parse=None):
"""
按照股票为单位,逐个解析数据。
数据格式为两列的Array,第一列是股票代码,第二列是对应的子Array。
不同的股票的每个子Array中的列名是相同的,以此保证可以把所有股票的数据按行连接起来。
Return:
=======
DataFrame(index=IDs, columns=data)
"""
if TSData[0] != 0:
raise ValueError("天软数据提取失败!")
iter_stock = 0
table = pd.DataFrame()
temp_table = []
for idata in TSData[1]:
stockID = idata[b'IDs'].decode('utf8')[2:]
stockData = []
iter_stock += 1
for itable in idata[b'data']:
new_dict = {k.decode('gbk'): v for k, v in itable.items()}
new_data = pd.DataFrame(new_dict, index=pd.Index([stockID], name='IDs'))
stockData.append(new_data)
if stockData:
stockData = pd.concat(stockData)
else:
continue
temp_table.append(stockData)
if iter_stock >= _max_iter_stocks:
_ = | pd.concat(temp_table) | pandas.concat |
import getpass
from PySide2.QtWidgets import QMainWindow, QMessageBox
from PySide2 import QtWidgets, QtGui
from components.mensagens import Mensagens
from dao.chamado_dao import ChamadoDao
from model.chamado import Chamado
from view.tela_fechar_chamado import TelaFecharChamado
from view.ui_tela_chamado import Ui_TelaChamado
from datetime import datetime
import pandas as pd
class TelaChamado(QMainWindow, Ui_TelaChamado):
"""Tela de Chamados
Tela responsavel pela interação do usuário para inclusão, alteração e exclusão de dados chamados.
A mesma também possui área para visualização de chamados para consulta.
"""
def __init__(self):
super(TelaChamado, self).__init__()
self.setupUi(self)
self.setWindowTitle("Controle de Chamados")
self.setFixedSize(1245, 658)
self.mensagem = Mensagens()
self.popula_campo_solucao()
"""Chama o método para popular a combobox de Solução."""
self.pegar_data_atual()
"""Popula o campo de data atual com a data do dia."""
self.btn_atualizar_data.clicked.connect(self.atualizar_data)
"""Atualiza o campo com a data do dia."""
self.listar_chamado_tabela()
"""Função para chamar o método de listar os chamados na tabela da tela de consulta de chamados."""
self.btn_cadastrar.clicked.connect(self.cadastrar_chamado)
"""Função que chama o método para cadastrar um chamado e salvar em banco de dados."""
self.btn_buscar_contrato.clicked.connect(self.buscar_contrato_cliente_no_formulario)
"""Função que chama o método de consulta a clientes e popula o formulário de Chamado com alguns dados."""
self.btn_carregar.clicked.connect(self.carregar_dados_tabela_para_formulario)
"""Função que chama o método de popular o formulário com os dados selecionados."""
self.btn_limpar_tela.clicked.connect(self.limpar_campos_formulario)
"""Função que chama o método de limpar os campos do formulário."""
self.btn_atualizar_data_abertura.clicked.connect(self.pegar_data_atual)
"""Função que chama o método para atualizar a data atual."""
self.btn_alterar.clicked.connect(self.alterar_chamado)
"""Função que chama o método para alterar os dados do formulário."""
self.btn_excluir.clicked.connect(self.excluir_chamado)
"""Função que chama o métodod e excluir chamado."""
self.btn_fechar_chamado.clicked.connect(self.fechar_chamado)
"""Função que chama o método de fechar chamado."""
self.btn_consultar_numero_chamado.clicked.connect(self.listar_chamado_tabela_por_numero_chamado)
"""Função que chama o método de listar chamados e retornar na tabela de chamados."""
self.btn_carregar_tabela.clicked.connect(self.listar_chamado_tabela)
"""Função que chama o método que recarrega a lista de chamados."""
self.btn_consulta_contrato.clicked.connect(self.listar_chamado_tabela_por_contrato)
"""Função que chama o método de listar chamados por número de contrato."""
self.btn_consultar_nome_cliente.clicked.connect(self.listar_chamado_por_nome_cliente)
"""Função que chamado o método de listar chamado por nome do cliente."""
self.btn_gerar_relatrio.clicked.connect(self.gerar_relatorio_chamados)
"""Função que chama o método para geração de Relatórios"""
self.btn_fechar_tela.clicked.connect(self.close)
"""Fecha e encerra a janela."""
self.btn_alterar.setEnabled(False)
self.btn_excluir.setEnabled(False)
self.btn_fechar_chamado.setEnabled(False)
def popula_campo_solucao(self):
"""Popular Campo de solução
Este método popula a comboBox com o nome das soluções cadastradas.
:return: Lista de soluções.
"""
try:
chamado_dao = ChamadoDao()
resultado = chamado_dao.popular_combo_solucao()
for i in resultado:
self.combo_solucao.addItem(str(i[0]))
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def listar_chamado_tabela(self):
"""Listar chamado
Este Método lista os chamados na tabela de consulta de chamados.
:return: Lista de chamados cadastrados.
"""
try:
chamado_dao = ChamadoDao()
resultado = chamado_dao.listar_chamado_tabela()
self.tabela_chamado.setRowCount(len(resultado))
self.tabela_chamado.setColumnCount(14)
for i in range(len(resultado)):
for j in range(0, 14):
self.tabela_chamado.setItem(i, j, QtWidgets.QTableWidgetItem(str(resultado[i][j])))
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def pegar_data_atual(self):
"""Pegar Data Atual.
Método para capturar a data atual.
"""
data = datetime.today().strftime('%d/%m/%Y')
self.txt_data_abertura.setText(data)
def atualizar_data(self):
"""Atualizar data.
Método para atualizar a data do chamado.
"""
data = datetime.today().strftime('%d/%m/%Y')
self.txt_data_atualizacao.setText(data)
def buscar_contrato_cliente_no_formulario(self):
"""Consultar contrato no Formulário
Efetua uma consulta por contrato na tabela de cliente e retorna os valores para popular o formulário
da tela de chamados.
:return: Retorna um cliente por número de contrato.
"""
if self.txt_numero_contrato.text() == "":
self.mensagem.mensagem_campo_vazio('NÚMERO CONTRATO')
elif self.txt_numero_contrato.text().isnumeric():
try:
chamado = Chamado()
chamado.numero_contrato = self.txt_numero_contrato.text()
chamado_dao = ChamadoDao()
resultado = chamado_dao.buscar_contrato_cliente_formulario_banco(chamado.numero_contrato)
self.txt_nome_cliente.setText(str(resultado[0][2]))
self.txt_endereco.setText(str(resultado[0][3]))
self.txt_contato.setText(str(resultado[0][4]))
self.txt_telefone.setText(str(resultado[0][5]))
self.txt_email.setText(str(resultado[0][6]))
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
except IndexError as i_error:
print(i_error)
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Consulta Contrato")
msg.setText('Contrato não encontrado!')
msg.exec_()
self.limpar_campos_formulario()
else:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Consulta Contrato")
msg.setText('No campo CONTRATO informe somente números!')
msg.exec_()
def carregar_dados_tabela_para_formulario(self):
"""Carregar dados para o formulário.
Carrega os dados do chamado no formulário com os dados consultados no banco de dados.
:return:
"""
try:
linha = self.tabela_chamado.currentItem().text()
if not linha.isdigit():
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Consultar Chamados")
msg.setText('Selecione somente a coluna Número do chamado')
msg.exec_()
else:
chamado_dao = ChamadoDao()
resultado = chamado_dao.consultar_numero_chamado(linha)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Consulta Contrato")
msg.setText('Selecione somente a coluna Número do chamado!')
msg.exec_()
else:
self.tab_chamado.setCurrentWidget(self.tab_cadastro_chamado)
self.txt_numero_chamado.setText(str(resultado[0][0]))
self.txt_numero_contrato.setText(str(resultado[0][1]))
self.txt_nome_cliente.setText(str(resultado[0][2]))
self.txt_endereco.setText(str(resultado[0][3]))
self.txt_contato.setText(str(resultado[0][4]))
self.txt_telefone.setText(str(resultado[0][5]))
self.txt_email.setText(str(resultado[0][6]))
self.txt_problema.setText(str(resultado[0][7]))
self.txt_observacao.setText(str(resultado[0][8]))
self.combo_status_chamado.setCurrentText(resultado[0][9])
self.combo_tipo_chamado.setCurrentText(str(resultado[0][10]))
self.combo_solucao.setCurrentText(str(resultado[0][11]))
self.txt_data_abertura.setText(str(resultado[0][12]))
self.txt_data_atualizacao.setText(str(resultado[0][13]))
self.btn_alterar.setEnabled(True)
self.btn_excluir.setEnabled(True)
self.btn_cadastrar.setEnabled(False)
self.txt_numero_contrato.setEnabled(False)
self.btn_fechar_chamado.setEnabled(True)
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
except AttributeError as at_erro:
print(at_erro)
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Consultar Chamado")
msg.setText('Selecione um item da coluna Número do chamado.')
msg.exec_()
def consultar_numero_contrato(self):
"""Consultar Número de contrato
Efetua uma consulta por número de contrato e retorna os possíveis resultados na tabela de chamados.
:return: Lista de chamados por número de contrato.
"""
if self.txt_consulta_contrato.text() == "":
self.mensagem.mensagem_campo_vazio('NÚMERO DO CONTRATO')
elif self.txt_contato.text().isnumeric():
try:
chamado = Chamado()
chamado.numero_contrato = self.txt_consulta_contrato.text()
chamado_dao = ChamadoDao()
resultado = chamado_dao.consultar_contrato_banco(chamado.numero_contrato)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Consulta Contrato")
msg.setText('Numero do contrato não encontrado!')
msg.exec_()
self.txt_consulta_contrato.setText("")
self.listar_chamado_tabela()
else:
self.tabela_chamado.setRowCount(len(resultado))
self.tabela_chamado.setColumnCount(14)
for i in range(len(resultado)):
for j in range(0, 14):
self.tabela_chamado.setItem(i, j, QtWidgets.QTableWidgetItem(str(resultado[i][j])))
self.txt_consulta_contrato.setText()
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def cadastrar_chamado(self):
"""Cadastrar Chamado.
Método que cadastra um chamado no banco de dados e exibe o resultado na tabela da tela de chamados.
:return: Cadastro de Chamado
"""
if self.txt_numero_chamado.text() == "":
self.mensagem.mensagem_campo_vazio("NÚMERO CHAMADO")
elif self.txt_numero_contrato.text() == "":
self.mensagem.mensagem_campo_vazio('NÚMERO CONTRATO')
elif self.txt_nome_cliente.text() == "":
self.mensagem.mensagem_campo_vazio('NOME CLIENTE')
elif self.txt_contato.text() == "":
self.mensagem.mensagem_campo_vazio('CONTATO')
elif self.txt_telefone.text() == "":
self.mensagem.mensagem_campo_vazio('TELEFONE')
elif self.txt_email.text() == "":
self.mensagem.mensagem_campo_vazio("E-MAIL")
elif self.txt_problema.toPlainText() == "":
self.mensagem.mensagem_campo_vazio("PROBLEMA")
elif self.combo_status_chamado.currentText() == "Selecione uma Opção":
self.mensagem.mensagem_combo('STATUS DE CHAMADO')
elif self.combo_tipo_chamado.currentText() == "Selecione uma Opção":
self.mensagem.mensagem_combo('TIPO CHAMADO')
elif self.combo_solucao.currentText() == "Selecione uma Opção":
self.mensagem.mensagem_combo('SOLUÇÃO')
elif self.txt_data_atualizacao.text() == "":
self.mensagem.mensagem_campo_vazio('DATA ATUALIZAÇÃO')
else:
chamado = Chamado()
chamado.numero_chamado = self.txt_numero_chamado.text()
chamado.numero_contrato = self.txt_numero_contrato.text()
chamado.nome_cliente = self.txt_nome_cliente.text()
chamado.endereco = self.txt_endereco.text()
chamado.contato = self.txt_contato.text()
chamado.telefone = self.txt_telefone.text()
chamado.email = self.txt_email.text()
chamado.problema = self.txt_problema.toPlainText()
chamado.observacao = self.txt_observacao.text()
chamado.status = self.combo_status_chamado.currentText()
chamado.tipo = self.combo_tipo_chamado.currentText()
chamado.solucao = self.combo_solucao.currentText()
data_abertura = self.txt_data_abertura.text()
data_atualizacao = self.txt_data_atualizacao.text()
chamado.data_abertura = datetime.strptime(data_abertura, '%d/%m/%Y').strftime('%Y-%m-%d')
chamado.data_atualizacao = datetime.strptime(data_atualizacao, '%d/%m/%Y').strftime('%Y-%m-%d')
try:
chamado_dao = ChamadoDao()
chamado_dao.cadastrar_chamado_banco(chamado.numero_chamado, chamado.numero_contrato,
chamado.nome_cliente, chamado.endereco, chamado.contato,
chamado.telefone, chamado.email, chamado.problema,
chamado.observacao, chamado.status, chamado.tipo,
chamado.solucao, chamado.data_abertura,
chamado.data_atualizacao)
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Cadastro de Chamado")
msg.setText(f'Chamado {chamado.numero_chamado} cadastrado com sucesso.')
msg.exec_()
self.listar_chamado_tabela()
self.limpar_campos_formulario()
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def alterar_chamado(self):
"""Alterar Chamado.
Método que altera os dados do chamado caso necessário.
:return: Alteração chamado
"""
if self.txt_numero_chamado.text() == "":
self.mensagem.mensagem_campo_vazio("NÚMERO CHAMADO")
elif self.txt_numero_contrato.text() == "":
self.mensagem.mensagem_campo_vazio('NÚMERO CONTRATO')
elif self.txt_nome_cliente.text() == "":
self.mensagem.mensagem_campo_vazio('NOME CLIENTE')
elif self.txt_contato.text() == "":
self.mensagem.mensagem_campo_vazio('CONTATO')
elif self.txt_telefone.text() == "":
self.mensagem.mensagem_campo_vazio('TELEFONE')
elif self.txt_email.text() == "":
self.mensagem.mensagem_campo_vazio("E-MAIL")
elif self.txt_problema.toPlainText() == "":
self.mensagem.mensagem_campo_vazio("PROBLEMA")
elif self.combo_status_chamado.currentText() == "Selecione uma Opção":
self.mensagem.mensagem_combo('STATUS DE CHAMADO')
elif self.combo_tipo_chamado.currentText() == "Selecione uma Opção":
self.mensagem.mensagem_combo('TIPO CHAMADO')
elif self.combo_solucao.currentText() == "Selecione uma Opção":
self.mensagem.mensagem_combo('SOLUÇÃO')
elif self.txt_data_atualizacao.text() == "":
self.mensagem.mensagem_campo_vazio('DATA ATUALIZAÇÃO')
else:
chamado = Chamado()
chamado.numero_chamado = self.txt_numero_chamado.text()
chamado.numero_contrato = self.txt_numero_contrato.text()
chamado.telefone = self.txt_telefone.text()
chamado.email = self.txt_email.text()
chamado.problema = self.txt_problema.toPlainText()
chamado.observacao = self.txt_observacao.text()
chamado.status = self.combo_status_chamado.currentText()
chamado.solucao = self.combo_solucao.currentText()
chamado.tipo = self.combo_tipo_chamado.currentText()
data_abertura = self.txt_data_abertura.text()
data_atualizacao = self.txt_data_atualizacao.text()
chamado.data_abertura = datetime.strptime(data_abertura, '%d/%m/%Y').strftime('%Y-%m-%d')
chamado.data_atualizacao = datetime.strptime(data_atualizacao, '%d/%m/%Y').strftime('%Y-%m-%d')
try:
chamado_dao = ChamadoDao()
chamado_dao.alterar_chamado_banco(chamado.numero_chamado, chamado.telefone, chamado.email,
chamado.problema, chamado.observacao, chamado.status, chamado.solucao,
chamado.tipo, chamado.data_abertura, chamado.data_atualizacao)
chamado_dao.alterar_cliente_telefone_email(chamado.numero_contrato, chamado.telefone, chamado.email)
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Alterar Chamado")
msg.setText(f'Chamado {chamado.numero_chamado} alterado com sucesso.')
msg.exec_()
self.limpar_campos_formulario()
self.listar_chamado_tabela()
self.txt_numero_contrato.setEnabled(True)
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def excluir_chamado(self):
"""Excluir Chamado
Método que exclui um chamado passando como referência o número do chamado.
:return: Exclusão de chamado.
"""
if self.txt_numero_chamado.text() == "":
self.mensagem.mensagem_campo_vazio('NÚMERO CHAMADO')
else:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setWindowTitle("Exclusão de Chamado!")
msg.setText("Tem certeza que deseja excluir o Chamado?")
msg.setStandardButtons(QMessageBox.Yes)
msg.addButton(QMessageBox.No)
msg.setDefaultButton(QMessageBox.No)
if msg.exec_() == QMessageBox.Yes:
chamado = Chamado()
chamado.numero_chamado = self.txt_numero_chamado.text()
try:
chamado_dao = ChamadoDao()
chamado_dao.excluir_chamado_banco(chamado.numero_chamado)
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Exclusão de Chamado")
msg.setText(f'Chamado {chamado.numero_chamado} excluido com sucesso.')
msg.exec_()
self.limpar_campos_formulario()
self.listar_chamado_tabela()
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def fechar_chamado(self):
"""Fechar Chamado
Método que abre a tela de fechamento de chamados passando alguns parâmetros para encerramento do mesmo.
:return: Tela fechar chamado
"""
if not self.txt_numero_chamado.text().isnumeric():
self.mensagem.mensagem_campo_numerico('NÚMERO CHAMADO')
elif not self.txt_numero_contrato.text().isnumeric():
self.mensagem.mensagem_campo_numerico('NÚMERO CONTRATO')
elif self.txt_nome_cliente.text() == "":
self.mensagem.mensagem_campo_vazio('NOME CLIENTE')
elif self.txt_contato.text() == "":
self.mensagem.mensagem_campo_vazio('CONTATO')
elif self.txt_telefone.text() == "":
self.mensagem.mensagem_campo_vazio('TELEFONE')
elif self.txt_problema.toPlainText() == "":
self.mensagem.mensagem_campo_vazio('PROBLEMA')
elif self.combo_tipo_chamado.currentText() == 'Selecione uma Opção':
self.mensagem.mensagem_combo('TIPO CHAMADO')
elif self.combo_solucao.currentText() == 'Selecione uma opção':
self.mensagem.mensagem_combo('SOLUÇÃO')
elif self.combo_status_chamado.currentText() == 'Selecione uma opção':
self.mensagem.mensagem_combo('STATUS DO CHAMADO')
else:
self.tela_fechar_chamado = TelaFecharChamado()
self.tela_fechar_chamado.txt_numero_chamado.setText(self.txt_numero_chamado.text())
self.tela_fechar_chamado.txt_contrato.setText(self.txt_numero_contrato.text())
self.tela_fechar_chamado.txt_nome_cliente.setText(self.txt_nome_cliente.text())
self.tela_fechar_chamado.txt_contato.setText(self.txt_contato.text())
self.tela_fechar_chamado.txt_telefone.setText(self.txt_telefone.text())
self.tela_fechar_chamado.txt_problema.setText(self.txt_problema.toPlainText())
self.tela_fechar_chamado.txt_tipo_chamado.setText(self.combo_tipo_chamado.currentText())
self.tela_fechar_chamado.show()
self.close()
def listar_chamado_tabela_por_numero_chamado(self):
"""Listar Chamado tabela
Método que efetua uma consulta conforme parâmetro informado pelo usuário e retorna o resultado na tabela.
:return: lista de chamados conforme parâmetro passado.
"""
if self.txt_consulta_numero_chamado.text() == "":
self.mensagem.mensagem_campo_vazio('CONCULTA NÚMERO CHAMADO')
elif not self.txt_consulta_numero_chamado.text().isdigit():
self.mensagem.mensagem_campo_numerico("CONSULTA NÚMERO CHAMADO")
self.txt_consulta_numero_chamado.setText("")
else:
chamado = Chamado()
chamado.numero_chamado = self.txt_consulta_numero_chamado.text()
try:
chamado_dao = ChamadoDao()
resultado = chamado_dao.listar_numero_chamado_tabela(chamado.numero_chamado)
if len(resultado) == 0:
self.mensagem.mensagem_registro_não_encontrado(chamado.numero_chamado)
self.txt_consulta_numero_chamado.setText("")
self.listar_chamado_tabela()
else:
self.tabela_chamado.setRowCount(len(resultado))
self.tabela_chamado.setColumnCount(14)
for i in range(len(resultado)):
for j in range(0, 14):
self.tabela_chamado.setItem(i, j, QtWidgets.QTableWidgetItem(str(resultado[i][j])))
self.txt_consulta_numero_chamado.setText("")
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def listar_chamado_tabela_por_contrato(self):
"""Listar chamado por contrato
Método que lista os chamados conforme número do contrato passado como parâmetro.
:return: Lista de chamados conforme parâmetro.
"""
if self.txt_consulta_contrato.text() == "":
self.mensagem.mensagem_campo_vazio('NÚMERO DO CONTRATO')
elif not self.txt_consulta_contrato.text().isdigit():
self.mensagem.mensagem_campo_numerico('NÚMERO DO CONTRATO')
self.txt_consulta_contrato.setText("")
else:
chamado = Chamado()
chamado.numero_contrato = self.txt_consulta_contrato.text()
try:
chamado_dao = ChamadoDao()
resultado = chamado_dao.listar_chamado_por_contrato(chamado.numero_contrato)
if len(resultado) == 0:
self.mensagem.mensagem_registro_não_encontrado(chamado.numero_contrato)
self.txt_consulta_contrato.setText("")
else:
self.tabela_chamado.setRowCount(len(resultado))
self.tabela_chamado.setColumnCount(14)
for i in range(len(resultado)):
for j in range(0, 14):
self.tabela_chamado.setItem(i, j, QtWidgets.QTableWidgetItem(str(resultado[i][j])))
self.txt_consulta_contrato.setText("")
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def listar_chamado_por_nome_cliente(self):
"""Listar Chamado por nome do cliente.
Lista os chamados por nome do cliente passado como parâmetro.
:return: Listagem de camados conforme parâmetro.
"""
if self.txt_consulta_nome_cliente.text() == "":
self.mensagem.mensagem_campo_vazio('NOME DO CLIENTE')
else:
chamado = Chamado()
chamado.nome_cliente = self.txt_consulta_nome_cliente.text()
try:
chamado_dao = ChamadoDao()
resultado = chamado_dao.listar_chamado_por_nome_cliente(chamado.nome_cliente)
if len(resultado) == 0:
self.mensagem.mensagem_registro_não_encontrado(chamado.nome_cliente)
self.txt_consulta_nome_cliente.setText("")
else:
self.tabela_chamado.setRowCount(len(resultado))
self.tabela_chamado.setColumnCount(14)
for i in range(len(resultado)):
for j in range(0, 14):
self.tabela_chamado.setItem(i, j, QtWidgets.QTableWidgetItem(str(resultado[i][j])))
self.txt_consulta_nome_cliente.setText("")
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def gerar_relatorio_chamados(self):
"""Gerar Relatório
Método que gera um relatório em .xlsx e salva na pasta download do usuário.
:return: Geração de relatório.
"""
user_windows = getpass.getuser()
try:
chamado_dao = ChamadoDao()
resultado = chamado_dao.listar_chamado_tabela()
dados = | pd.DataFrame(resultado) | pandas.DataFrame |
"""
Analytics Vidhya Jobathon
File Description: Utils + Constants
Date: 27/02/2021
Author: <EMAIL>
"""
#import required libraries
import pandas as pd
import numpy as np
import logging
import xgboost as xgb
from catboost import CatBoostClassifier, Pool
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, MinMaxScaler, StandardScaler
from sklearn.feature_extraction import FeatureHasher
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.metrics import roc_auc_score
from sklearn import manifold
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
#global constants
PRIMARY_KEY = 'ID'
TARGET = 'Response'
#FEATURES = None
RAW_TRAIN_DATA_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance\train_Df64byy.csv'
RAW_TEST_DATA_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance\test_YCcRUnU.csv'
PROCESSED_TRAIN_DATA_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance\train_processed.csv'
PROCESSED_TEST_DATA_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance\test_processed.csv'
SUBMISSION_FILE_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance'
def load_data(path):
'''
Function to load data, in this case a csv file
'''
return | pd.read_csv(path) | pandas.read_csv |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = | pd.DataFrame.from_dict(dicMLP) | pandas.DataFrame.from_dict |
"""
ECCC national hourly data import routine
Get hourly data for weather stations in Canada.
The code is licensed under the MIT license.
"""
from datetime import datetime
from multiprocessing.pool import ThreadPool
import pandas as pd
from jasper import Jasper
from jasper.actions import persist
from jasper.convert import pres_to_msl
from jasper.helpers import get_stations
from jasper.schema import hourly_national
# General configuration
THREADS = 8 # Number of threads for parallel processing
# Base URL of ECCC interface
BASE_URL = (
"https://climate.weather.gc.ca/climate_data/bulk_data_e.html"
+ "?format=csv&time=UTC&timeframe=1&submit=Download+Data"
)
FIRST_YEAR = datetime.now().year - 1 # Start year
CURRENT_YEAR = datetime.now().year # Current year
STATIONS_PER_CYCLE = 1 # How many stations per cycle?
# Which parameters should be included?
PARAMETERS = {
"Temp (°C)": "temp",
"Rel Hum (%)": "rhum",
"Precip. Amount (mm)": "prcp",
"Wind Dir (10s deg)": "wdir",
"Wind Spd (km/h)": "wspd",
"Stn Press (kPa)": "pres",
}
# Create Jasper instance
jsp = Jasper("import.eccc.hourly.national")
def load(station: str, year: int, month: int) -> | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import LeaveOneOut
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import pandas as pd
import seaborn as sn
from sklearn.linear_model import LogisticRegressionCV
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer
class TrainValTestModel:
# create object storing path of data
def __init__(self, X_train, X_val, X_test, y_train, y_val, y_test, model_name, cross_val=False):
# X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=.2, random_state=0)
self.scaler = StandardScaler()
self.scaler.fit(X_train)
self.X_train = self.scaler.transform(X_train)
self.X_val = self.scaler.transform(X_val)
self.X_test = self.scaler.transform(X_test)
self.y_train = y_train
self.y_val = y_val
self.y_test = y_test
self.model_name = model_name
if cross_val == True:
best_params = self.tuning_hyperparameter()
self.best_params = best_params
self.clf = RandomForestClassifier(bootstrap=best_params['bootstrap'],
max_depth=best_params['max_depth'],
max_features=best_params['max_features'],
min_samples_leaf=best_params['min_samples_leaf'],
min_samples_split=best_params['min_samples_split'],
n_estimators=best_params['n_estimators'])
self.clf.fit(self.X_train, self.y_train)
else:
self.clf = self.get_model(model_name, self.X_train, self.y_train)
self.fpr, self.tpr, self.thrshd_roc = self.get_fpr_tpr()
# bulid model
def get_model(self, model_name, X_train, y_train):
# logistic regression
if model_name == 'LR':
clf = LogisticRegression(solver='lbfgs')
# random forest
elif model_name == 'RF':
clf = RandomForestClassifier(max_depth=2, random_state=0)
# C-Support Vector Classification
elif model_name == 'GB':
clf = clf = GradientBoostingClassifier(random_state=0)
clf.fit(X_train, y_train)
return clf
def tuning_hyperparameter(self):
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
def my_scorer(clf, X, y_true):
y_pred_proba = clf.predict_proba(X)
y_pred = np.where(y_pred_proba > 0.5, 1, 0)
error = np.sum(np.logical_and(y_pred != y_true, y_pred == 1)) / np.count_nonzero(y_true == 0)
return error
def fp(y_true, y_pred): return confusion_matrix(y_true, y_pred)[0, 1]
score = make_scorer(fp)
rf = RandomForestClassifier()
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, scoring=score, n_iter=100, cv=4, verbose=2, random_state=42, n_jobs=-1)
# Fit the random search model
rf_random.fit(self.X_train, self.y_train)
return rf_random.best_params_
def get_fpr_tpr(self):
prob_on_val = self.clf.predict_proba(self.X_val)[:,1]
fpr, tpr, thrshd_roc = metrics.roc_curve(self.y_val, prob_on_val, pos_label=1)
return fpr, tpr, thrshd_roc
# see the metrics of model
def get_metrics(self, thresh=None):
if thresh == None:
p = 0.5
else:
p = thresh
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
y_pred = pred_proba_df.applymap(lambda x: 1 if x>p else 0).to_numpy().reshape((pred_proba_df.shape[0]))
print("%s:\n%s\n" % (self.model_name,
metrics.classification_report(self.y_test, y_pred)))
# get the indices of important features
def get_important_feature(self):
# logistic regression
if self.model_name == 'LR':
importance = self.clf.coef_[0]
# random forest
elif self.model_name == 'RF':
importance = self.clf.feature_importances_
# gradient boosting
elif self.model_name == 'GB':
importance = self.clf.feature_importances_
return importance
# false-positive rate
def test_false_positive(self):
# choose threshold
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
threshold_list = [0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,.7,.75,.8,.85,.9, .95,.99]
for i in threshold_list:
print ('\n******** For i = {} ******'.format(i))
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>i else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
dataset = {'y_Actual': self.y_test,
'y_Predicted': y_test_pred
}
df = pd.DataFrame(dataset, columns=['y_Actual','y_Predicted'])
confusion_matrix = pd.crosstab(df['y_Actual'], df['y_Predicted'], rownames= ['Actual'], colnames=['Predicted'])
plt.show()
sn.heatmap(confusion_matrix, annot=True)
# get the index of false-positive image
def false_positive_index(self, clf, X_test, y_test, threshold):
pred_proba_df = pd.DataFrame(clf.predict_proba(X_test)[:,1])
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>threshold else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
false_positives = np.logical_and(y_test != y_test_pred, y_test_pred == 1)
return np.arange(len(y_test))[false_positives]
# get the index of false-negtive image
def false_negtive_index(self, clf, X_test, y_test, threshold):
pred_proba_df = pd.DataFrame(clf.predict_proba(X_test)[:,1])
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>threshold else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
false_negtives = np.logical_and(y_test != y_test_pred, y_test_pred == 0)
return np.arange(len(y_test))[false_negtives]
class LeaveOneOutModel:
# create object storing path of data
def __init__(self, X_train, X_test, y_train, y_test, model_name):
self.scaler = StandardScaler()
self.scaler.fit(X_train)
self.X_train = self.scaler.transform(X_train)
self.X_test = self.scaler.transform(X_test)
self.y_train = y_train
self.y_test = y_test
self.model_name = model_name
self.bst_thresh, self._y_prob, self.fpr, self.tpr, self.thrshd_roc = self.leave_one_out_cv_v1(self.X_train, self.y_train, self.model_name)
self.clf = self.get_model(model_name, self.X_train, self.y_train)
def leave_one_out_cv_v0(self, X, y, model_name):
# choose threshold
threshold_list = np.arange(0.01, 1, 0.01)
score = np.zeros(threshold_list.shape)
test_num = len(X)
TP = np.zeros(threshold_list.shape)
FN = np.zeros(threshold_list.shape)
FP = np.zeros(threshold_list.shape)
TN = np.zeros(threshold_list.shape)
for i in range(len(threshold_list)):
loo = LeaveOneOut()
# leave one out loop
for _train_index, _test_index in loo.split(X):
_X_train, _X_test = X[_train_index], X[_test_index]
_y_train, _y_test = y[_train_index], y[_test_index]
clf = self.get_model(model_name, _X_train, _y_train)
pred_proba_df = clf.predict_proba(_X_test)[:,1]
if _y_test == 0:
if pred_proba_df <= threshold_list[i]:
score[i] += 1 / test_num
TN[i] += 1
else:
FN[i] += 1
elif _y_test == 1:
if pred_proba_df > threshold_list[i]:
score[i] += 1 / test_num
TP[i] += 1
else:
FP[i] += 1
# compute ROC
# ######################
# have error when denominator == 0
TPR = TP / (TP + FN)
FPR = TN / (TN + FP)
# get the threshold of best score
threshold = threshold_list[np.argmax(score)]
return threshold, TPR, FPR
def leave_one_out_cv_v1(self, X, y, model_name):
# choose threshold
threshold_list = np.arange(0.01, 1, 0.01)
score = np.zeros(threshold_list.shape)
test_num = len(X)
_y_prob = np.zeros(len(X))
loo = LeaveOneOut()
# leave one out loop
for _train_index, _test_index in loo.split(X):
_X_train, _X_test = X[_train_index], X[_test_index]
_y_train, _y_test = y[_train_index], y[_test_index]
clf = self.get_model(model_name, _X_train, _y_train)
pred_proba_df = clf.predict_proba(_X_test)[:,1]
_y_prob[_test_index] = pred_proba_df
for i in range(len(threshold_list)):
if _y_test == 0 and pred_proba_df <= threshold_list[i]:
score[i] += 1 / test_num
elif _y_test == 1 and pred_proba_df > threshold_list[i]:
score[i] += 1 / test_num
# get the threshold of best score
threshold = threshold_list[np.argmax(score)]
fpr, tpr, thrshd_roc = metrics.roc_curve(y, _y_prob, pos_label=1)
# fpr, tpr, thrshd_roc = None, None, None
return threshold, _y_prob, fpr, tpr, thrshd_roc
# bulid model
def get_model(self, model_name, X_train, y_train):
# logistic regression
if model_name == 'LR':
clf = LogisticRegression(solver='lbfgs')
# random forest
elif model_name == 'RF':
clf = RandomForestClassifier(max_depth=2, random_state=0)
# C-Support Vector Classification
elif model_name == 'GB':
clf = clf = GradientBoostingClassifier(random_state=0)
clf.fit(X_train, y_train)
return clf
# see the metrics of model
def get_metrics(self, thresh=None):
if thresh == None:
p = self.bst_thresh
else:
p = thresh
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
Y_pred = pred_proba_df.applymap(lambda x: 1 if x>p else 0).to_numpy().reshape((pred_proba_df.shape[0]))
print("%s:\n%s\n" % (self.model_name,
metrics.classification_report(self.y_test, Y_pred)))
return 0
# get the indices of important features
def get_important_feature(self):
# logistic regression
if self.model_name == 'LR':
importance = self.clf.coef_[0]
# random forest
elif self.model_name == 'RF':
importance = self.clf.feature_importances_
return importance
# false-positive rate
def test_false_positive(self):
# choose threshold
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
threshold_list = [0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,.7,.75,.8,.85,.9, .95,.99]
for i in threshold_list:
print ('\n******** For i = {} ******'.format(i))
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>i else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
dataset = {'y_Actual': self.y_test,
'y_Predicted': y_test_pred
}
df = | pd.DataFrame(dataset, columns=['y_Actual','y_Predicted']) | pandas.DataFrame |
import pandas as pd
import ConnectToGbase
import numpy as np
from sklearn.cluster import KMeans
from sklearn import metrics
OD_UPNUM_TABLENAME = "rmdssdb.ttrms_upnum_from_statistics_result"
HOLIDAY_FLAG_TABLENAME="rmdssdb.d_period_holiday"
#OD_CODE=[['KNH','AOH'],['AOH','KNH'],['LJP','VNP'],['VNP','LJP'],['NKH','AOH'],['AOH','NKH'],['UUH','NKH'],['NKH','UUH'],['JGK','VNP'],['VNP','NKH'],['AOH','VNP'],['VNP','AOH']]
OD_CODE=[['VNP','AOH']]
class UpNumClassify:
def __init__(self,lst_od_comb,str_start_date,str_end_date,split_n = 4,n_var = 1):
self.gbase = ConnectToGbase.ConnetcToGbase()
self.str_start_date = str_start_date
self.str_end_date = str_end_date
self.split_n = split_n
self.n_var = n_var
self.lst_od_comb = lst_od_comb
self.__df_classify_result = None
self.__col_names = ['start_depart_date','end_depart_date','from_station_telecode','to_station_telecode','train_no','start_time_int','tra_time','cluster_label']
self.__default_cluster_n = 5
def __grap_upnum_on_train_code(self,from_station_telecode,to_station_telecode,train_no):
sql = f"select A.depart_date,sum(up_num) from "+OD_UPNUM_TABLENAME+" A ,"+HOLIDAY_FLAG_TABLENAME+" B "+f" where B.solar_day=A.depart_date and length(peak_name)<1 and train_no=\'{train_no}\' and depart_date>=\'{self.str_start_date}\' and depart_date<=\'{self.str_end_date}\' and from_station_telecode=\'{from_station_telecode}\' and to_station_telecode=\'{to_station_telecode}\' group by A.depart_date order by A.depart_date"
rows = self.gbase.getManyRows(sql)
lst_up_num = [int(row[1]) for row in rows]
return lst_up_num
def __grap_distinct_train_code(self,from_station_telecode,to_station_telecode):
sql = f"select distinct train_no,cast(substring(start_time,1,2) as int)+round(cast(substring(start_time,3,2) as int)/60,1) as start_time_int,tra_time from "+OD_UPNUM_TABLENAME+" A ,"+HOLIDAY_FLAG_TABLENAME+" B "+f" where B.solar_day=A.depart_date and tra_time is not null and length(peak_name)<1 and depart_date>=\'{self.str_start_date}\' and depart_date<=\'{self.str_end_date}\' and from_station_telecode=\'{from_station_telecode}\' and to_station_telecode=\'{to_station_telecode}\' "
rows = self.gbase.getManyRows(sql)
lst_train_detail = [[str(row[0]).strip(),float(row[1]),float(row[2])] for row in rows]
print(lst_train_detail)
return lst_train_detail
def __split_upnum_range(self):
lst_data_sources = []
for i,val in enumerate(self.lst_od_comb):
lst_train_detail = self.__grap_distinct_train_code(val[0],val[-1])
lst_res = []
lst_data_sources_tmp = []
for train_detail in lst_train_detail:
lst_up_num = self.__grap_upnum_on_train_code(val[0],val[-1],train_detail[0])
if lst_up_num :
avg_val = np.average(lst_up_num)
var_val = np.var(lst_up_num)**0.5
lst_res.append([avg_val+self.n_var*var_val,avg_val-self.n_var*var_val])
lst_data_sources_tmp.append([self.str_start_date,self.str_end_date,val[0],val[-1],train_detail[0],train_detail[1],train_detail[2]])
if len(lst_res) > self.__default_cluster_n:
kmeans = KMeansAlg(lst_res,self.__default_cluster_n)
best_labels = kmeans.KMAlg()
for j,label_val in enumerate(best_labels):
lst_data_sources_tmp[j].append(label_val)
else:
continue
lst_data_sources = lst_data_sources_tmp + lst_data_sources
self.__df_classify_result = | pd.DataFrame(lst_data_sources, columns=self.__col_names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import os
import six
import json
import shutil
import sqlite3
import pandas as pd
import gramex.cache
from io import BytesIO
from lxml import etree
from nose.tools import eq_, ok_
from gramex import conf
from gramex.http import BAD_REQUEST, FOUND
from gramex.config import variables, objectpath, merge
from orderedattrdict import AttrDict, DefaultAttrDict
from pandas.util.testing import assert_frame_equal as afe
from . import folder, TestGramex, dbutils, tempfiles
xlsx_mime_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
class TestFormHandler(TestGramex):
sales = gramex.cache.open(os.path.join(folder, 'sales.xlsx'), 'xlsx')
@classmethod
def setUpClass(cls):
dbutils.sqlite_create_db('formhandler.db', sales=cls.sales)
@classmethod
def tearDownClass(cls):
try:
dbutils.sqlite_drop_db('formhandler.db')
except OSError:
pass
def check_filter(self, url, df=None, na_position='last', key=None):
# Modelled on testlib.test_data.TestFilter.check_filter
def eq(args, expected):
result = self.get(url, params=args).json()
actual = pd.DataFrame(result[key] if key else result)
expected.index = actual.index
if len(expected) > 0:
afe(actual, expected, check_like=True)
sales = self.sales if df is None else df
eq({}, sales)
eq({'देश': ['भारत']},
sales[sales['देश'] == 'भारत'])
eq({'city': ['Hyderabad', 'Coimbatore']},
sales[sales['city'].isin(['Hyderabad', 'Coimbatore'])])
eq({'product!': ['Biscuit', 'Crème']},
sales[~sales['product'].isin(['Biscuit', 'Crème'])])
eq({'city>': ['Bangalore'], 'city<': ['Singapore']},
sales[(sales['city'] > 'Bangalore') & (sales['city'] < 'Singapore')])
eq({'city>~': ['Bangalore'], 'city<~': ['Singapore']},
sales[(sales['city'] >= 'Bangalore') & (sales['city'] <= 'Singapore')])
eq({'city~': ['ore']},
sales[sales['city'].str.contains('ore')])
eq({'product': ['Biscuit'], 'city': ['Bangalore'], 'देश': ['भारत']},
sales[(sales['product'] == 'Biscuit') & (sales['city'] == 'Bangalore') &
(sales['देश'] == 'भारत')])
eq({'city!~': ['ore']},
sales[~sales['city'].str.contains('ore')])
eq({'sales>': ['100'], 'sales<': ['1000']},
sales[(sales['sales'] > 100) & (sales['sales'] < 1000)])
eq({'growth<': [0.5]},
sales[sales['growth'] < 0.5])
eq({'sales>': ['100'], 'sales<': ['1000'], 'growth<': ['0.5']},
sales[(sales['sales'] > 100) & (sales['sales'] < 1000) & (sales['growth'] < 0.5)])
eq({'देश': ['भारत'], '_sort': ['sales']},
sales[sales['देश'] == 'भारत'].sort_values('sales', na_position=na_position))
eq({'product<~': ['Biscuit'], '_sort': ['-देश', '-growth']},
sales[sales['product'] == 'Biscuit'].sort_values(
['देश', 'growth'], ascending=[False, False], na_position=na_position))
eq({'देश': ['भारत'], '_offset': ['4'], '_limit': ['8']},
sales[sales['देश'] == 'भारत'].iloc[4:12])
cols = ['product', 'city', 'sales']
eq({'देश': ['भारत'], '_c': cols},
sales[sales['देश'] == 'भारत'][cols])
ignore_cols = ['product', 'city']
eq({'देश': ['भारत'], '_c': ['-' + c for c in ignore_cols]},
sales[sales['देश'] == 'भारत'][[c for c in sales.columns if c not in ignore_cols]])
# Non-existent column does not raise an error for any operation
for op in ['', '~', '!', '>', '<', '<~', '>', '>~']:
eq({'nonexistent' + op: ['']}, sales)
# Non-existent sorts do not raise an error
eq({'_sort': ['nonexistent', 'sales']},
sales.sort_values('sales', na_position=na_position))
# Non-existent _c does not raise an error
eq({'_c': ['nonexistent', 'sales']}, sales[['sales']])
# Invalid limit or offset raise an error
eq_(self.get(url, params={'_limit': ['abc']}).status_code, BAD_REQUEST)
eq_(self.get(url, params={'_offset': ['abc']}).status_code, BAD_REQUEST)
# Check if metadata is returned properly
def meta_headers(url, params):
r = self.get(url, params=params)
result = DefaultAttrDict(AttrDict)
for header_name, value in r.headers.items():
name = header_name.lower()
if name.startswith('fh-'):
parts = name.split('-')
dataset_name, key = '-'.join(parts[1:-1]), parts[-1]
result[dataset_name][key] = json.loads(value)
return result
header_key = 'data' if key is None else key
headers = meta_headers(url, {'_meta': 'y'})[header_key]
eq_(headers.offset, 0)
eq_(headers.limit, conf.handlers.FormHandler.default._limit)
# There may be some default items pass as ignored or sort or filter.
# Just check that this is a list
ok_(isinstance(headers.filters, list))
ok_(isinstance(headers.ignored, list))
ok_(isinstance(headers.sort, list))
if 'count' in headers:
eq_(headers.count, len(sales))
headers = meta_headers(url, {
'_meta': 'y',
'देश': 'USA',
'c': ['city', 'product', 'sales'],
'_sort': '-sales',
'_limit': 10,
'_offset': 3
})[header_key]
ok_(['देश', '', ['USA']] in headers.filters)
ok_(['c', ['city', 'product', 'sales']] in headers.ignored)
ok_(['sales', False] in headers.sort)
ok_(headers.offset, 3)
ok_(headers.limit, 10)
if 'count' in headers:
eq_(headers.count, (sales['देश'] == 'USA').sum())
def eq(self, url, expected):
out = self.get(url).content
actual = pd.read_csv(BytesIO(out), encoding='utf-8')
expected.index = range(len(expected))
afe(actual, expected, check_column_type=six.PY3)
def test_file(self):
self.check_filter('/formhandler/file', na_position='last')
self.check_filter('/formhandler/url', na_position='last')
self.check_filter('/formhandler/file-multi', na_position='last', key='big',
df=self.sales[self.sales['sales'] > 100])
self.check_filter('/formhandler/file-multi', na_position='last', key='by-growth',
df=self.sales.sort_values('growth'))
def test_sqlite(self):
self.check_filter('/formhandler/sqlite', na_position='first')
self.check_filter('/formhandler/sqlite-multi', na_position='last', key='big',
df=self.sales[self.sales['sales'] > 100])
self.check_filter('/formhandler/sqlite-multi', na_position='last', key='by-growth',
df=self.sales.sort_values('growth'))
self.check_filter('/formhandler/sqlite-multi', na_position='last', key='big-by-growth',
df=self.sales[self.sales['sales'] > 100].sort_values('growth'))
self.check_filter('/formhandler/sqlite-queryfunction', na_position='last')
self.check_filter('/formhandler/sqlite-queryfunction?ct=Hyderabad&ct=Coimbatore',
na_position='last',
df=self.sales[self.sales['city'].isin(['Hyderabad', 'Coimbatore'])])
def test_mysql(self):
dbutils.mysql_create_db(variables.MYSQL_SERVER, 'test_formhandler', sales=self.sales)
try:
self.check_filter('/formhandler/mysql', na_position='first')
finally:
dbutils.mysql_drop_db(variables.MYSQL_SERVER, 'test_formhandler')
def test_postgres(self):
dbutils.postgres_create_db(variables.POSTGRES_SERVER, 'test_formhandler', sales=self.sales)
try:
self.check_filter('/formhandler/postgres', na_position='last')
finally:
dbutils.postgres_drop_db(variables.POSTGRES_SERVER, 'test_formhandler')
def test_default(self):
cutoff, limit = 50, 2
self.eq('/formhandler/default', self.sales[self.sales['sales'] > cutoff].head(limit))
def test_function(self):
self.eq('/formhandler/file-function?col=sales&_format=csv', self.sales[['sales']])
self.eq('/formhandler/file-function?col=देश&col=product&_format=csv',
self.sales[['देश', 'product']])
def test_modify(self):
self.eq('/formhandler/modify', self.sales.sum(numeric_only=True).to_frame().T)
def test_modify_multi(self):
city = self.sales.groupby('city')['sales'].sum().reset_index()
city['rank'] = city['sales'].rank()
big = self.sales[self.sales['sales'] > 100]
self.eq('/formhandler/modify-multi', big.merge(city, on='city'))
def test_prepare(self):
self.eq('/formhandler/prepare', self.sales[self.sales['product'] == 'Biscuit'])
def test_download(self):
# Modelled on testlib.test_data.TestDownload
big = self.sales[self.sales['sales'] > 100]
by_growth = self.sales.sort_values('growth')
big.index = range(len(big))
by_growth.index = range(len(by_growth))
out = self.get('/formhandler/file?_format=html')
# Note: In Python 2, pd.read_html returns .columns.inferred_type=mixed
# instead of unicde. So check column type only in PY3 not PY2
afe(pd.read_html(out.content, encoding='utf-8')[0], self.sales, check_column_type=six.PY3)
eq_(out.headers['Content-Type'], 'text/html;charset=UTF-8')
eq_(out.headers.get('Content-Disposition'), None)
out = self.get('/formhandler/file-multi?_format=html')
result = pd.read_html(BytesIO(out.content), encoding='utf-8')
afe(result[0], big, check_column_type=six.PY3)
afe(result[1], by_growth, check_column_type=six.PY3)
eq_(out.headers['Content-Type'], 'text/html;charset=UTF-8')
eq_(out.headers.get('Content-Disposition'), None)
out = self.get('/formhandler/file?_format=xlsx')
afe(pd.read_excel(BytesIO(out.content)), self.sales)
eq_(out.headers['Content-Type'], xlsx_mime_type)
eq_(out.headers['Content-Disposition'], 'attachment;filename=data.xlsx')
out = self.get('/formhandler/file-multi?_format=xlsx')
result = pd.read_excel(BytesIO(out.content), sheet_name=None)
afe(result['big'], big)
afe(result['by-growth'], by_growth)
eq_(out.headers['Content-Type'], xlsx_mime_type)
eq_(out.headers['Content-Disposition'], 'attachment;filename=data.xlsx')
out = self.get('/formhandler/file?_format=csv')
ok_(out.content.startswith(''.encode('utf-8-sig')))
afe(pd.read_csv(BytesIO(out.content), encoding='utf-8'), self.sales)
eq_(out.headers['Content-Type'], 'text/csv;charset=UTF-8')
eq_(out.headers['Content-Disposition'], 'attachment;filename=data.csv')
out = self.get('/formhandler/file-multi?_format=csv')
lines = out.content.splitlines(True)
eq_(lines[0], 'big\n'.encode('utf-8-sig'))
actual = pd.read_csv(BytesIO(b''.join(lines[1:len(big) + 2])), encoding='utf-8')
afe(actual, big)
eq_(lines[len(big) + 3], 'by-growth\n'.encode('utf-8'))
actual = pd.read_csv(BytesIO(b''.join(lines[len(big) + 4:])), encoding='utf-8')
afe(actual, by_growth)
eq_(out.headers['Content-Type'], 'text/csv;charset=UTF-8')
eq_(out.headers['Content-Disposition'], 'attachment;filename=data.csv')
for fmt in ['csv', 'html', 'json', 'xlsx']:
out = self.get('/formhandler/file?_format=%s&_download=test.%s' % (fmt, fmt))
eq_(out.headers['Content-Disposition'], 'attachment;filename=test.%s' % fmt)
out = self.get('/formhandler/file-multi?_format=%s&_download=test.%s' % (fmt, fmt))
eq_(out.headers['Content-Disposition'], 'attachment;filename=test.%s' % fmt)
@staticmethod
def copy_file(source, target):
target = os.path.join(folder, target)
source = os.path.join(folder, source)
shutil.copyfile(source, target)
tempfiles[target] = target
return target
def call(self, url, args, method, headers):
r = self.check('/formhandler/edits-' + url, data=args, method=method, headers=headers)
meta = r.json()
# meta has 'ignored' with list of ignored columns
ok_(['x', args.get('x', [1])] in objectpath(meta, 'data.ignored'))
# meta has 'filters' for PUT and DELETE. It is empty for post
if method.lower() == 'post':
eq_(objectpath(meta, 'data.filters'), [])
else:
ok_(isinstance(objectpath(meta, 'data.filters'), list))
return r
def check_edit(self, method, source, args, count):
# Edits the correct count of records, returns empty value and saves to file
target = self.copy_file('sales.xlsx', 'sales-edits.xlsx')
self.call('xlsx-' + source, args, method, {'Count-Data': str(count)})
result = gramex.cache.open(target)
# Check result. TODO: check that the values are correctly added
if method == 'delete':
eq_(len(result), len(self.sales) - count)
elif method == 'post':
eq_(len(result), len(self.sales) + count)
elif method == 'put':
eq_(len(result), len(self.sales))
target = os.path.join(folder, 'formhandler-edits.db')
dbutils.sqlite_create_db(target, sales=self.sales)
tempfiles[target] = target
self.call('sqlite-' + source, args, method, {'Count-Data': str(count)})
# Check result. TODO: check that the values are correctly added
con = sqlite3.connect(target)
result = pd.read_sql('SELECT * FROM sales', con)
# TODO: check that the values are correctly added
if method == 'delete':
eq_(len(result), len(self.sales) - count)
elif method == 'post':
eq_(len(result), len(self.sales) + count)
elif method == 'put':
eq_(len(result), len(self.sales))
def test_invalid_edit(self):
self.copy_file('sales.xlsx', 'sales-edits.xlsx')
for method in ['delete', 'put']:
# Editing with no ID columns defined raises an error
self.check('/formhandler/file?city=A&product=B', method=method, code=400)
# Edit record without ID columns in args raises an error
self.check('/formhandler/edits-xlsx-multikey', method=method, code=400)
self.check('/formhandler/edits-xlsx-singlekey', method=method, code=400)
def test_edit_singlekey(self):
# Operations with a single key works
self.check_edit('post', 'singlekey', {
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Crème'],
'sales': ['100'],
'growth': ['0.32'],
}, count=1)
self.check_edit('put', 'singlekey', {
'sales': ['513.7'],
'city': [123],
'product': ['abc'],
}, count=1)
# Delete with single ID as primary key works
self.check_edit('delete', 'singlekey', {
'sales': ['513.7']
}, count=1)
def test_edit_multikey_single_value(self):
# POST single value
self.check_edit('post', 'multikey', {
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Alpha'],
'sales': ['100'],
}, count=1)
self.check_edit('put', 'multikey', {
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Eggs'],
'sales': ['100'],
'growth': ['0.32'],
}, count=1)
self.check_edit('delete', 'multikey', {
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Crème'],
}, count=1)
def test_edit_multikey_multi_value(self):
self.check_edit('post', 'multikey', {
'देश': ['भारत', 'भारत', 'भारत'],
'city': ['Bangalore', 'Bangalore', 'Bangalore'],
'product': ['Alpha', 'Beta', 'Gamma'],
'sales': ['100', '', '300'],
'growth': ['0.32', '0.50', '0.12'],
# There is a default ?x=1. Override that temporarily
'x': ['', '', '']
}, count=3)
# NOTE: PUT behaviour for multi-value is undefined
self.check_edit('delete', 'multikey', {
'देश': ['भारत', 'भारत', 'भारत', 'invalid'],
'city': ['Bangalore', 'Bangalore', 'Bangalore', 'invalid'],
'product': ['芯片', 'Eggs', 'Biscuit', 'invalid'],
}, count=3)
def test_edit_redirect(self):
self.copy_file('sales.xlsx', 'sales-edits.xlsx')
# redirect: affects POST, PUT and DELETE
for method in ['post', 'put', 'delete']:
r = self.get('/formhandler/edits-xlsx-redirect', method=method, data={
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Eggs'],
'sales': ['100'],
}, allow_redirects=False)
eq_(r.status_code, FOUND)
ok_('Count-Data' in r.headers) # Any value is fine, we're not checking that
eq_(r.headers['Location'], '/redirected')
# GET is not redirected
r = self.get('/formhandler/edits-xlsx-redirect', allow_redirects=False)
ok_('Location' not in r.headers)
def test_edit_multidata(self):
csv_path = os.path.join(folder, 'sales-edits.csv')
self.sales.to_csv(csv_path, index=False, encoding='utf-8')
tempfiles[csv_path] = csv_path
dbutils.mysql_create_db(variables.MYSQL_SERVER, 'test_formhandler', sales=self.sales)
try:
row = {'देश': 'भारत', 'city': 'X', 'product': 'Q', 'growth': None}
self.check('/formhandler/edits-multidata', method='post', data={
'csv:देश': ['भारत'],
'csv:city': ['X'],
'csv:product': ['Q'],
'csv:sales': ['10'],
'sql:देश': ['भारत'],
'sql:city': ['X'],
'sql:product': ['Q'],
'sql:sales': ['20'],
}, headers={
'count-csv': '1',
'count-sql': '1',
})
data = self.check('/formhandler/edits-multidata').json()
eq_(data['csv'][-1], merge(row, {'sales': 10}))
eq_(data['sql'][-1], merge(row, {'sales': 20}))
eq_(len(data['csv']), len(self.sales) + 1)
eq_(len(data['sql']), len(self.sales) + 1)
self.check('/formhandler/edits-multidata', method='put', data={
'csv:city': ['X'],
'csv:product': ['Q'],
'csv:sales': ['30'],
'sql:city': ['X'],
'sql:product': ['Q'],
'sql:sales': ['40'],
}, headers={
'count-csv': '1',
'count-sql': '1',
})
data = self.check('/formhandler/edits-multidata').json()
eq_(data['csv'][-1], merge(row, {'sales': 30}))
eq_(data['sql'][-1], merge(row, {'sales': 40}))
eq_(len(data['csv']), len(self.sales) + 1)
eq_(len(data['sql']), len(self.sales) + 1)
self.check('/formhandler/edits-multidata', method='delete', data={
'csv:city': ['X'],
'csv:product': ['Q'],
'sql:city': ['X'],
'sql:product': ['Q'],
}, headers={
'count-csv': '1',
'count-sql': '1',
})
data = self.check('/formhandler/edits-multidata').json()
eq_(len(data['csv']), len(self.sales))
eq_(len(data['sql']), len(self.sales))
finally:
dbutils.mysql_drop_db(variables.MYSQL_SERVER, 'test_formhandler')
def test_edit_multidata_modify(self):
csv_path = os.path.join(folder, 'sales-edits.csv')
self.sales.to_csv(csv_path, index=False, encoding='utf-8')
tempfiles[csv_path] = csv_path
dbutils.mysql_create_db(variables.MYSQL_SERVER, 'test_formhandler', sales=self.sales)
try:
row = {'देश': 'भारत', 'city': 'X', 'product': 'Q', 'growth': None}
result = self.check('/formhandler/edits-multidata-modify', method='post', data={
'csv:देश': ['भारत'],
'csv:city': ['X'],
'csv:product': ['Q'],
'csv:sales': ['10'],
'sql:देश': ['भारत'],
'sql:city': ['X'],
'sql:product': ['Q'],
'sql:sales': ['20'],
}, headers={
'count-csv': '1',
'count-sql': '1',
}).json()
eq_(result['csv']['modify'], 8)
eq_(result['modify'], 8)
data = self.check('/formhandler/edits-multidata').json()
eq_(data['csv'][-1], merge(row, {'sales': 10}))
eq_(data['sql'][-1], merge(row, {'sales': 20}))
eq_(len(data['csv']), len(self.sales) + 1)
eq_(len(data['sql']), len(self.sales) + 1)
finally:
dbutils.mysql_drop_db(variables.MYSQL_SERVER, 'test_formhandler')
def test_edit_json(self):
target = self.copy_file('sales.xlsx', 'sales-edits.xlsx')
target = os.path.join(folder, 'formhandler-edits.db')
dbutils.sqlite_create_db(target, sales=self.sales)
tempfiles[target] = target
for fmt in ('xlsx', 'sqlite'):
kwargs = {
'url': '/formhandler/edits-%s-multikey' % fmt,
'request_headers': {'Content-Type': 'application/json'},
}
# POST 2 records. Check that 2 records where added
self.check(method='post', data=json.dumps({
'देश': ['भारत', 'USA'],
'city': ['HYD', 'NJ'],
'product': ['खुश', 'खुश'],
'sales': [100, 200],
}), headers={'Count-Data': '2'}, **kwargs)
eq_(self.get(kwargs['url'], params={'product': 'खुश'}).json(), [
{'देश': 'भारत', 'city': 'HYD', 'product': 'खुश', 'sales': 100.0, 'growth': None},
{'देश': 'USA', 'city': 'NJ', 'product': 'खुश', 'sales': 200.0, 'growth': None},
])
# PUT a record. Check that the record was changed
self.check(method='put', data=json.dumps({
'city': ['HYD'],
'product': ['खुश'],
'sales': [300],
'growth': [0.3],
}), headers={'Count-Data': '1'}, **kwargs)
eq_(self.get(kwargs['url'], params={'city': 'HYD', 'product': 'खुश'}).json(), [
{'देश': 'भारत', 'city': 'HYD', 'product': 'खुश', 'sales': 300.0, 'growth': 0.3},
])
# DELETE 2 records one by one. Check that 2 records were deleted
self.check(method='delete', data=json.dumps({
'city': ['HYD'],
'product': ['खुश'],
}), headers={'Count-Data': '1'}, **kwargs)
self.check(method='delete', data=json.dumps({
'city': ['NJ'],
'product': ['खुश'],
}), headers={'Count-Data': '1'}, **kwargs)
eq_(self.get(kwargs['url'], params={'product': 'खुश'}).json(), [])
def test_chart(self):
r = self.get('/formhandler/chart', data={
'_format': 'svg',
'chart': 'barplot',
'x': 'देश',
'y': 'sales',
'dpi': 72,
'width': 500,
'height': 300,
})
tree = etree.fromstring(r.text.encode('utf-8'))
eq_(tree.get('viewBox'), '0 0 500 300')
# TODO: expand on test cases
# Check spec, data for vega, vega-lite, vegam formats
base = '/formhandler/chart?_format={}'
data = pd.DataFrame(self.get(base.format('json')).json())
for fmt in {'vega', 'vega-lite', 'vegam'}:
r = self.get(base.format(fmt))
var = json.loads(re.findall(r'}\)\((.*?)}\)', r.text)[-1] + '}')
var = var['spec']
if 'fromjson' in var:
df = var['fromjson'][0]['data']
var['fromjson'][0]['data'] = '__DATA__'
else:
df = var.pop('data')
df = (df[0] if isinstance(df, list) else df)['values']
yaml_path = os.path.join(folder, '{}.yaml'.format(fmt))
spec = gramex.cache.open(yaml_path, 'yaml')
afe(pd.DataFrame(df), data)
self.assertDictEqual(var, spec)
def test_headers(self):
self.check('/formhandler/headers', headers={
'X-JSON': 'ok', 'X-Base': 'ok', 'X-Root': 'ok'
})
def test_args(self):
# url: and sheet_name: accepts query formatting for files
url = '/formhandler/arg-url?path=sales&sheet=sales&ext=excel'
afe(pd.DataFrame(self.get(url).json()), self.sales, check_like=True)
url = '/formhandler/arg-url?path=sales&sheet=census'
census = gramex.cache.open(os.path.join(folder, 'sales.xlsx'), sheet_name='census')
afe(pd.DataFrame(self.get(url).json()), census, check_like=True)
# url: and table: accept query formatting for SQLAlchemy
url = '/formhandler/arg-table?db=formhandler&table=sales'
afe(pd.DataFrame(self.get(url).json()), self.sales, check_like=True)
# url: and table: accept query formatting for SQLAlchemy
# TODO: In Python 2, unicode keys don't work well on Tornado. So use safe keys
key, val = ('product', '芯片') if six.PY2 else ('देश', 'भारत')
url = '/formhandler/arg-query?db=formhandler&col=%s&val=%s' % (key, val)
actual = pd.DataFrame(self.get(url).json())
expected = self.sales[self.sales[key] == val]
expected.index = actual.index
afe(actual, expected, check_like=True)
# Files with ../ etc should be skipped
self.check('/formhandler/arg-url?path=../sales',
code=500, text='KeyError')
# Test that the ?skip= parameter is used to find the table.
self.check('/formhandler/arg-table?db=formhandler&table=sales&skip=ab',
code=500, text='NoSuchTableError')
# Spaces are ignored in SQLAlchemy query. So ?skip= will be a missing key
self.check('/formhandler/arg-table?db=formhandler&table=sales&skip=a b',
code=500, text='KeyError')
def test_path_arg(self):
url = '/formhandler/%s/formhandler/sales?group=product&col=city&val=Bangalore'
for sub_url in ['path_arg', 'path_kwarg']:
actual = pd.DataFrame(self.get(url % sub_url).json())
expected = self.sales[self.sales['city'] == 'Bangalore'].groupby('product')
expected = expected['sales'].sum().reset_index()
afe(actual, expected, check_like=True)
def test_date_comparison(self):
data = gramex.cache.open(os.path.join(folder, 'sales.xlsx'), 'xlsx', sheet_name='dates')
for dt in ('2018-01-10', '2018-01-20T15:34Z'):
url = '/formhandler/dates?date>=%s' % dt
r = self.get(url, params={'_format': 'json', '_meta': 'y'})
# Check ISO output
pd.to_datetime(pd.DataFrame(r.json())['date'], format='%Y-%m-%dT%H:%M:%S.%fZ')
actual = pd.read_excel(BytesIO(self.get(url, params={'_format': 'xlsx'}).content))
expected = data[data['date'] > | pd.to_datetime(dt) | pandas.to_datetime |
# The analyser
import pandas as pd
import matplotlib.pyplot as plt
import dill
import os
import numpy as np
from funcs import store_namespace
from funcs import load_namespace
import datetime
from matplotlib.font_manager import FontProperties
from matplotlib import rc
community = 'ResidentialCommunity'
sim_ids = ['MinEne_0-2']
model_id = 'R2CW_HP'
bldg_list = load_namespace(os.path.join('path to models', 'teaser_bldgs_residential'))
#
bldg_list = [bldg_list[0], bldg_list[1]]
print(bldg_list)
folder = 'results'
step = 300
nodynprice=0
mon = 'jan'
constr_folder = 'decentr_enemin_constr_'+mon
#bldg_list = bldg_list[0:1]
if mon == 'jan':
start = '1/7/2017 16:30:00'
end = '1/7/2017 19:00:00'
controlseq_time = '01/07/2017 16:55:00'
elif mon == 'mar':
start = '3/1/2017 16:30:00'
end = '3/1/2017 19:00:00'
controlseq_time = '03/01/2017 16:55:00'
elif mon=='nov':
start = '11/20/2017 16:30:00'
end = '11/20/2017 19:00:00'
controlseq_time = '11/20/2017 16:55:00'
sim_range = pd.date_range(start, end, freq = str(step)+'S')
simu_path = "path to simulation folder"
other_input = {}
price = {}
flex_cost = {}
ref_profile = {}
controlseq = {}
opt_control = {}
emutemps = {}
mpctemps = {}
opt_stats = {}
flex_down = {}
flex_up = {}
power = {}
for bldg in bldg_list:
building = bldg+'_'+model_id
for sim_id in sim_ids:
opt_stats[sim_id] = {}
controlseq[sim_id] = {}
mpctemps[sim_id] = {}
emutemps[sim_id] = {}
power[sim_id] = {}
for time_idx in sim_range:
time_idx = time_idx.strftime('%m/%d/%Y %H:%M:%S')
t = time_idx.replace('/','-').replace(':','-').replace(' ','-')
opt_stats[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'opt_stats_'+sim_id+'_'+t))
emutemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'emutemps_'+sim_id+'_'+t))
mpctemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'mpctemps_'+sim_id+'_'+t))
controlseq[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'controlseq_'+sim_id)+'_'+t)
power[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'power_'+sim_id)+'_'+t)
#flex_down[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_down'+sim_id))
#flex_up[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_up'+sim_id))
i=0
for sim_id in sim_ids:
if i == 0:
emutemps_df = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df.index = pd.to_datetime(emutemps_df.index)
emutemps_df.index = emutemps_df.index.shift(1, freq=str(step)+'S')
power_df = pd.DataFrame.from_dict(power[sim_id],orient='index')
power_df.index = pd.to_datetime(power_df.index)
opt_stats_df = pd.DataFrame.from_dict(opt_stats[sim_id],orient='index')
opt_stats_df.index = pd.to_datetime(opt_stats_df.index)
power_df.index = power_df.index.shift(1, freq=str(step)+'S')
else:
emutemps_df1 = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df1.index = pd.to_datetime(emutemps_df1.index)
emutemps_df1.index = emutemps_df1.index.shift(1, freq=str(step) + 'S')
emutemps_df = pd.concat([emutemps_df, emutemps_df1])
power_df1 = pd.DataFrame.from_dict(power[sim_id],orient='index')
power_df1.index = pd.to_datetime(power_df1.index)
power_df1.index = power_df1.index.shift(1, freq=str(step)+'S')
power_df = pd.concat([power_df, power_df1])
opt_stats_df1 = pd.DataFrame.from_dict(opt_stats[sim_id],orient='index')
opt_stats_df1.index = pd.to_datetime(opt_stats_df1.index)
opt_stats_df = | pd.concat([opt_stats, opt_stats_df1]) | pandas.concat |
import os
from pathlib import Path
from time import ctime, perf_counter
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, check_cv
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.utils import shuffle, check_random_state
_intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
# Code taken from https://stackoverflow.com/a/24542445/4553309
def _display_time(seconds, granularity=4):
if seconds < 60:
return f"{seconds:.2f} seconds"
result = []
for name, count in _intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
ROOT_DATASET_DIR = Path(__file__).parent.parent / 'datasets'
# TODO: Add support for downloading dataset
def load_data(data_name=None, data_path=None, stats=False):
if data_name is not None:
path = os.path.join(ROOT_DATASET_DIR, f"{data_name}.csv")
else:
path = data_path
try:
df = pd.read_csv(path, header=None)
except FileNotFoundError as e:
raise e
df = df.astype('float64')
data = df.values
X, Y = data[:, :-1], data[:, -1]
Y = LabelEncoder().fit_transform(Y)
X = MinMaxScaler().fit_transform(X)
if stats:
labels, freq = np.unique(Y, return_counts=True)
print(f"{X.shape}, {len(labels)}, {freq.min() / freq.max():.3f}\n")
return shuffle(X, Y, random_state=42)
# TODO: Support resuming inside cross_val_score, use Dask?
def compare(models: dict, datasets: list, cv, df_path=None, n_jobs=-1,
scoring='accuracy', random_state=None, verbose=True,
fit_params=None, **kwargs):
"""
Compare different methods across several datasets, with support for \
parallelization, reproducibility and automatic resumption. Output is \
a csv file where each row represents a dataset and each column \
represents a method/ algorithm. It's basically a wrapper around \
`sklearn.model_selection.cross_val_score`- check this for more details.
Note that support for resumption is somewhat limited, it can only \
recover output of (dataset, method) pair for whom computation is fully \
complete. In other words, if a 10-fold cross-validation is stopped \
after 5-fold, the results of that 5-fold is lost.
Parameters
--------------
models : dict
Keys are model name, values are scikit-learn API compatible classifiers.
datasets : list
A list of either `string`, denoting dataset names to be loaded with \
`load_data`, or a nested tuple of (name, (X, y)), denoting dataset \
name, features and labels respectively.
cv : int, cross-validation generator or an iterable
if int, no of folds to use in stratified k-fold
df_path : string, default=None
Path to (csv) file to store results- will be overwritten if already \
present.
scoring : string, or a scorer callable object / function with signature \
``scorer(estimator, X, y)`` which should return only a single value.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility. Note that this will overwrite \
existing random state of methods even if it's already present.
verbose : Controls the verbosity level
fit_params : dict, default=None
Parameters to send to fit() method of each classifiers. Keys should be
classifier names, same as in `models`, and values themselves are ``dict``
of parameter-name, value pairs.
kwargs : Other parameters for ``cross_val_score``.
"""
rns = check_random_state(random_state)
cv = check_cv(cv)
cv.random_state = rns.randint(100)
seeds = iter(rns.randint(10 ** 8, size=len(models) * len(datasets)))
try:
df = pd.read_csv(df_path, index_col=0)
if verbose:
print("Result file found, resuming...") # Word 'resuming' is used in test
except (FileNotFoundError, ValueError):
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/14 21:21
Desc: 中国-香港-宏观指标
https://data.eastmoney.com/cjsj/foreign_8_0.html
"""
import demjson
import pandas as pd
import requests
def marco_china_hk_cpi() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数
https://data.eastmoney.com/cjsj/foreign_8_0.html
:return: 消费者物价指数
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "0",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_da | tetime(temp_df['发布日期']) | pandas.to_datetime |
#!/usr/bin/env python
######### WORKFLOW DOCUMENTATION of FUNCTIONS #############################################
# First *InputArrays* to output 2 arrays (ppt value and xy values)
# Second *Run_IDW* for interpolation of the ppt-values, note has daughter classes
# Third *classify* classification of precipitation
# Fourth *interpolate_map* create of array for plotting, plot and export jpg
# Import functions
import os
import sys
import math
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from matplotlib import colors
from scipy.spatial import cKDTree
from datetime import datetime
import earthpy as et
import earthpy.plot as ep
############### Function 1 Create Arrays from HydroMet Network Data #######################
def InputArrays(ppt_locations, flagged_ppt):
""" Takes measured values, calculates cumulative sum, joins them to
a list of sample locations, and produces a arrays for interpolation.
Parameters
------------
values: string
name of csv file as time-series of values measured at each location
used to derive cumulative sum
locations: string
names of csv file as list of point name, x and y coordinate as UTM
Returns
------------
ar_ppt_wks: np array
array of 3-week cumulative precipitation in inches
ppt_xy_list: np array
as list of x y coordinates for each derived sum
date1: date
at beginning of range of sum
date2: date
at end of range of sum
"""
ppt_xy = pd.read_csv(ppt_locations)
ppt_xy_sort = ppt_xy.sort_values(by=['pasture'], ascending=True)
df_ppt = pd.read_csv(flagged_ppt, parse_dates=['date'], delimiter = ",",
usecols = (5,6,8,10,11,12))
df_ppt.rename(columns={'raw.value': 'raw_value', 'raw.measurement' :
'raw_measurement'}, inplace=True)
df_ppt['date'] = pd.to_datetime( df_ppt['date'], format='%Y-%m-%d')
# get last date in the dataset
date2 = (df_ppt['date'].max())
date1 = date2 - | pd.offsets.Day(7) | pandas.offsets.Day |
import argparse
import json
import os
import subprocess
import sys
import numpy
import pandas
# sys.path.insert(0, '.')
# sys.path.insert(0, '..')
import torch
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from helpers.text import devectorize
from helpers.training import load_checkpoint
from modules.data.collates import LMCollate
from modules.data.datasets import SequenceDataset
from modules.data.samplers import BucketTokensSampler
from modules.data.utils import fix_paths
from modules.models import Seq2SeqTransformer, Seq2SeqRNN, RNNLM, TransformerLM
def prior_model_from_checkpoint(cp):
model_type = cp["config"]["model"].get("type", "rnn")
if model_type == "rnn":
prior_model = RNNLM
elif model_type == "transformer":
prior_model = TransformerLM
else:
raise NotImplementedError
prior = prior_model(len(cp['vocab']), **cp["config"]["model"])
prior.load_state_dict(cp["model"])
# due to a bug in PyTorch we cannot backpropagate through a model in eval
# mode. Therefore, we have to manually turn off the regularizations.
for name, module in prior.named_modules():
if isinstance(module, nn.Dropout):
module.p = 0
elif isinstance(module, nn.LSTM):
module.dropout = 0
elif isinstance(module, nn.GRU):
module.dropout = 0
return prior
def seq2seq_translate_ids(model,
data_loader,
vocab,
**kwargs):
"""
Translate greedily the data in the data_loader and return the token ids
"""
output_ids = []
device = next(model.parameters()).device
sos_id = vocab.SOS_id
eos_id = vocab.EOS_id
pad_id = vocab.PAD_id
beam = kwargs.get("beam_size", 1)
with torch.no_grad():
for batch in tqdm(data_loader, total=len(data_loader),
desc=f"Translating (beam={beam})..."):
batch = list(map(lambda x: x.to(device), batch))
if beam == 1:
_, dec = model.translate(batch[0], batch[2], sos_id, **kwargs)
output_ids.extend(dec["logits"].max(2)[1].tolist())
dec.clear()
del batch[:]
del batch, dec
else:
batch_ids = model.beam(batch[0], batch[2],
sos_id, eos_id, pad_id, **kwargs)
output_ids.extend(batch_ids)
return numpy.array(output_ids)
def seq2seq_output_ids_to_file(output_ids, trg_vocab, out_file):
"""
Devectorize and Detokenize the translated token ids and write the
translations to a text file
"""
output_tokens = devectorize(output_ids.tolist(),
trg_vocab.id2tok,
trg_vocab.EOS_id,
strip_eos=True,
pp=True)
with open(out_file, "w") as fo:
for sent in output_tokens:
text = trg_vocab.detokenize(sent)
fo.write(text + "\n")
def eval_nmt_checkpoint(checkpoint, device, beams=None, lm=None, fusion_a=None,
results=None, results_low=None):
if beams is None:
beams = [1, 5, 10]
_base, _file = os.path.split(checkpoint)
cp = load_checkpoint(checkpoint)
def score(dataset, beam_size) -> (float, float):
hyp_file = os.path.join(_base, f"hyps_{dataset}_beam-{beam_size}.txt")
src_file = cp["config"]["data"]["src"][f"{dataset}_path"]
ref_file = cp["config"]["data"]["trg"][f"{dataset}_path"]
src_file = fix_paths(src_file, "datasets")
ref_file = fix_paths(ref_file, "datasets")
fusion = cp["config"]["model"]["decoding"].get("fusion")
batch_tokens = max(10000 // beam_size, 1000)
if fusion is None and lm is not None and fusion_a is not None:
fusion = "shallow"
seq2seq_translate(checkpoint=cp,
src_file=src_file,
out_file=hyp_file,
beam_size=beam_size,
length_penalty=1,
lm=lm,
fusion=fusion,
fusion_a=fusion_a,
batch_tokens=batch_tokens,
device=device)
_mixed = compute_bleu_score(hyp_file, ref_file)
_lower = compute_bleu_score(hyp_file, ref_file, True)
return _mixed, _lower
if results is None:
results = {d: {k: None for k in beams} for d in ["val", "test"]}
if results_low is None:
results_low = {d: {k: None for k in beams} for d in ["val", "test"]}
for d in ["val", "test"]:
for k in beams:
try:
mixed, lower = score(d, k)
results[d][k] = mixed
results_low[d][k] = lower
except Exception as e:
print(e)
results[d][k] = None
results_low[d][k] = None
text = | pandas.DataFrame.from_dict(results) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from scipy import stats
import sys, os, time, json
from pathlib import Path
import pickle as pkl
sys.path.append('../PreProcessing/')
sys.path.append('../Lib/')
sys.path.append('../Analyses/')
import sklearn.linear_model as lm
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import balanced_accuracy_score as bac
from joblib import Parallel, delayed
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.text import Text
import seaborn as sns
import analyses_table as AT
import TreeMazeFunctions as TMF
sns.set(style="whitegrid",font_scale=1,rc={
'axes.spines.bottom': False,
'axes.spines.left': False,
'axes.spines.right': False,
'axes.spines.top': False,
'axes.edgecolor':'0.5'})
def main(sePaths, doPlots=False, overwrite = False):
try:
dat = AT.loadSessionData(sePaths)
nUnits = dat['fitTable2'].shape[0]
# univariate analyses.
fn = sePaths['CueDesc_SegUniRes']
if ( (not fn.exists()) or overwrite):
CueDescFR_Dat, all_dat_spl = CueDesc_SegUniAnalysis(dat)
CueDescFR_Dat.to_csv(sePaths['CueDesc_SegUniRes'])
if doPlots:
plotCueVDes(CueDescFR_Dat,sePaths)
plotUnitRvL(CueDescFR_Dat,all_dat_spl,sePaths)
else:
CueDescFR_Dat = pd.read_csv(fn)
# decododer analyses
fn = sePaths['CueDesc_SegDecRes']
if ((not fn.exists()) or overwrite):
singCellDec,singCellDecSummary, popDec = CueDesc_SegDecAnalysis(dat)
singCellDec['se'] = sePaths['session']
singCellDecSummary['se'] = sePaths['session']
popDec['se'] = sePaths['session']
singCellDec.to_csv(fn)
singCellDecSummary.to_csv(sePaths['CueDesc_SegDecSumRes'])
popDec.to_csv(sePaths['PopCueDesc_SegDecSumRes'])
if doPlots:
f,_ = plotMultipleDecoderResults(singCellDecSummary)
fn = sePaths['CueDescPlots'] / ('DecResByUnit.jpeg')
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
f,_ = plotMultipleDecoderResults(popDec)
fn = sePaths['CueDescPlots'] / ('PopDecRes.jpeg')
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
for unit in np.arange(nUnits):
f,_ = plotMultipleDecoderResults(singCellDec[(singCellDec['unit']==unit)])
fn = sePaths['CueDescPlots'] / ('DecRes_UnitID-{}.jpeg'.format(unitNum) )
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
else:
singCellDec = pd.read_csv(fn)
singCellDecSummary = pd.read_csv(sePaths['CueDesc_SegDecSumRes'])
popDec = pd.read_csv(sePaths['PopCueDesc_SegDecSumRes'])
return CueDescFR_Dat, singCellDec,singCellDecSummary, popDec
except:
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
return [],[],[],[]
def CueDesc_SegUniAnalysis(dat):
trDat = dat['TrialLongMat']
trConds = dat['TrialConds']
nCells = len(dat['ids']['cells'])
nMua = len(dat['ids']['muas'])
nUnits = nCells+nMua
# fixed variables (don't change with cell)
locs = TMF.ZonesNames
Trials = trConds[trConds['Good']].index.values
nTrials = len(Trials)
FeatIDs = {'A':[1],'Stem':[0,1,2],'Arm': [3,4]}
Segs = FeatIDs.keys()
HA = ['Home','SegA']
Stem = ['Home','SegA','Center']
L_Arm = ['SegE', 'I2', 'SegF', 'G3', 'SegG', 'G4']
R_Arm = ['SegB', 'I1', 'SegC', 'G1', 'SegD', 'G2']
# variable to be stored
#uni_LvR_Analyses = {'Stats':{'Cue':{},'Desc':{},'Cue_Desc':{}},'Mean':{'Cue':{},'Desc':{},'Cue_Desc':{}},'SD':{'Cue':{},'Desc':{},'Cue_Desc':{}} }
uni_LvR_Analyses = {'Cue':{'Stats':{},'Mean':{},'SD':{}},'Desc':{'Stats':{},'Mean':{},'SD':{}},'Cue_Desc':{'Stats':{},'Mean':{},'SD':{}}}
Conds = ['Cue','Desc','Cue_Desc']
dat_meas = ['Stats','Mean','SD']
all_dat_spl = {} # only used for plotting as it has overlapping data points; not necessary to store it.
for unitNum in np.arange(nUnits):
# splits of data per cell
dat_splits = {}
for k in ['Cue','Desc']:
dat_splits[k] = {}
for kk in FeatIDs.keys():
dat_splits[k][kk] = {}
dat_splits['Cue_Desc'] = {'Co_Arm':{},'L_Arm':{},'R_Arm':{}}
if unitNum==0:
for k in Conds:
for ii in dat_meas:
if ii=='Stats':
for jj in ['T','P','S']:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=dat_splits[k].keys())
else:
for jj in ['L','R']:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=dat_splits[k].keys())
if unitNum<nCells:
tt = dat['ids']['cells'][str(unitNum)][0]
cl = dat['ids']['cells'][str(unitNum)][1]
fr = dat['TrialFRLongMat']['cell_'+str(unitNum)]
#tR2 = dat['TrialModelFits']['testR2'][unitNum]
#selMod = dat['TrialModelFits']['selMod'][unitNum]
tR2 = dat['fitTable2']['testR2'][unitNum]
selMod = dat['fitTable2']['selMod'][unitNum]
else:
muaID = unitNum-nCells
tt = dat['ids']['muas'][str(muaID)][0]
cl = dat['ids']['muas'][str(muaID)][1]
fr = dat['TrialFRLongMat']['mua_'+str(muaID)]
tR2 = dat['fitTable2']['testR2'][unitNum]
selMod = dat['fitTable2']['selMod'][unitNum]
# get mean fr per trial per partition
mPartFRDat = pd.DataFrame(np.zeros((nTrials,3)),columns=FeatIDs)
cue = trConds.loc[Trials,'Cues'].values
desc = trConds.loc[Trials,'Desc'].values
cnt =0
for tr in Trials:
subset = (trDat['trID']==tr) & (trDat['IO']=='Out')
for k,v in FeatIDs.items():
mPartFRDat.loc[cnt,k]=np.nanmean(fr[subset].values[v])
cnt+=1
# univariate cue and desciscion tests by maze part
LvR = {}
l = {}
r = {}
# First & Second analyses: Cue/Desc
k = 'Cue'
l[k] = cue=='L'
r[k] = cue=='R'
k = 'Desc'
l[k]=desc=='L'
r[k]=desc=='R'
for k in ['Cue','Desc']:
LvR[k] = pd.DataFrame(np.zeros((3,3)),index=Segs,columns=['T','P','S'])
for kk in Segs:
lfr = mPartFRDat[kk][l[k]]
rfr = mPartFRDat[kk][r[k]]
temp = stats.ttest_ind(lfr,rfr)
LvR[k].loc[kk,'T'] = temp[0]
LvR[k].loc[kk,'P'] = temp[1]
dat_splits[k][kk]['l'] = lfr.values
dat_splits[k][kk]['r'] = rfr.values
LvR[k]['S'] = getSigLevel(LvR[k]['P'])
# thir analysis: Correct v Incorrect by L/R arm
k = 'Cue_Desc'
LvR[k] = pd.DataFrame(np.zeros((3,3)),index=['Co_Arm','L_Arm','R_Arm'],columns=['T','P','S'])
l = {}
r = {}
kk = 'Co_Arm'
l[kk] = mPartFRDat['Arm'][(cue=='L')&(desc=='L')]
r[kk] = mPartFRDat['Arm'][(cue=='R')&(desc=='R')]
kk = 'L_Arm'
l[kk]=mPartFRDat['Arm'][(desc=='L')&(cue=='L')]
r[kk]=mPartFRDat['Arm'][(desc=='L')&(cue=='R')]
kk = 'R_Arm'
l[kk]=mPartFRDat['Arm'][(desc=='R')&(cue=='L')]
r[kk]=mPartFRDat['Arm'][(desc=='R')&(cue=='R')]
for kk in ['Co_Arm','L_Arm','R_Arm']:
temp = stats.ttest_ind(l[kk],r[kk])
LvR[k].loc[kk,'T'] = temp[0]
LvR[k].loc[kk,'P'] = temp[1]
dat_splits[k][kk]['l'] = l[kk].values
dat_splits[k][kk]['r'] = r[kk].values
LvR[k]['S'] = getSigLevel(LvR[k]['P'])
# aggreagate results.
mlr = {}
slr = {}
for k,v in dat_splits.items():
mlr[k] = pd.DataFrame(np.zeros((3,2)),index=v.keys(),columns=['L','R'])
slr[k] = pd.DataFrame(np.zeros((3,2)),index=v.keys(),columns=['L','R'])
cnt = 0
for kk,vv in v.items():
l = vv['l']
r = vv['r']
mlr[k].loc[kk] = [np.mean(l),np.mean(r)]
slr[k].loc[kk] = [stats.sem(l),stats.sem(r)]
cnt+=1
for k in Conds: # keys : Cue, Desc, Cue_Desc
for ii in dat_meas:
if ii=='Stats':
for jj in ['T','P','S']:
if unitNum == 0:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=LvR[k].index.values)
uni_LvR_Analyses[k]['Stats'][jj].loc[unitNum] = LvR[k][jj]
else:
for jj in ['L','R']:
if unitNum == 0:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=LvR[k].index.values)
uni_LvR_Analyses[k]['Mean'][jj].loc[unitNum] = mlr[k][jj]
uni_LvR_Analyses[k]['SD'][jj].loc[unitNum] = slr[k][jj]
all_dat_spl[unitNum] = dat_splits
# reorg LvR to a pandas data frame with all the units
CueDescFR_Dat = pd.DataFrame()
for k in Conds:
cnt = 0
for kk in ['Mean','SD']:
for kkk in ['L','R']:
if kk=='Mean':
valName = 'MzFR_'+ kkk
elif kk == 'SD':
valName = 'SzFR_' + kkk
if cnt==0:
y = uni_LvR_Analyses[k][kk][kkk].copy()
y = y.reset_index()
y = y.melt(value_vars = uni_LvR_Analyses[k][kk][kkk].columns,id_vars='index',var_name='Seg',value_name= valName)
y['Cond'] = k
else:
z = uni_LvR_Analyses[k][kk][kkk].copy()
z = z.reset_index()
z = z.melt(value_vars = uni_LvR_Analyses[k][kk][kkk].columns,id_vars='index',value_name= valName)
y[valName] = z[valName].copy()
cnt+=1
for jj in ['T','P','S']:
z = uni_LvR_Analyses[k]['Stats'][jj].copy()
z = z.reset_index()
z = z.melt(value_vars = uni_LvR_Analyses[k]['Stats'][jj].columns ,id_vars='index', var_name = 'Seg', value_name = jj)
y[jj] = z[jj]
CueDescFR_Dat = pd.concat((CueDescFR_Dat,y))
CueDescFR_Dat['Sig'] = CueDescFR_Dat['P']<0.05
CueDescFR_Dat.rename(columns={'index':'unit'},inplace=True)
return CueDescFR_Dat, all_dat_spl
def CueDesc_SegDecAnalysis(dat):
nPe = 100
nRepeats = 10
nSh = 50
njobs = 20
trConds = dat['TrialConds']
trDat = dat['TrialLongMat']
nUnits = dat['fitTable2'].shape[0]
gTrialsIDs = trConds['Good']
Trials = trConds[gTrialsIDs].index.values
nTrials = len(Trials)
allZoneFR,unitIDs = reformatFRDat(dat,Trials)
CoTrials = trConds[gTrialsIDs & (trConds['Co']=='Co')].index.values
InCoTrials = trConds[gTrialsIDs & (trConds['Co']=='InCo')].index.values
nInCo = len(InCoTrials)
TrSets = {}
TrSets['all'] = np.arange(nTrials)
_,idx,_=np.intersect1d(np.array(Trials),np.array(CoTrials),return_indices=True)
TrSets['co'] = idx
_,idx,_=np.intersect1d(np.array(Trials),np.array(InCoTrials),return_indices=True)
TrSets['inco'] = idx
cueVec = trConds.loc[gTrialsIDs]['Cues'].values
descVec = trConds.loc[gTrialsIDs]['Desc'].values
predVec = {'Cue':cueVec, 'Desc':descVec}
nFeatures = {'h':np.arange(1),'a':np.arange(2),'center':np.arange(3),'be':np.arange(4),'int':np.arange(5),'cdfg':np.arange(6),'goal':np.arange(7)}
def correctTrials_Decoder(train,test):
res = pd.DataFrame(np.zeros((3,4)),columns=['Test','BAc','P','Z'])
temp = mod.fit(X_train[train],y_train[train])
res.loc[0,'Test'] = 'Model'
y_hat = temp.predict(X_train[test])
res.loc[0,'BAc'] = bac(y_train[test],y_hat)*100
# shuffle for held out train set
mod_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
mod_sh[sh] = bac(y_train[test],y_perm_hat)*100
res.loc[0,'Z'] = getPerm_Z(mod_sh, res.loc[0,'BAc'] )
res.loc[0,'P'] = getPerm_Pval(mod_sh, res.loc[0,'BAc'] )
# predictions on x test
y_hat = temp.predict(X_test)
res.loc[1,'Test'] = 'Cue'
res.loc[1,'BAc'] = bac(y_test_cue,y_hat)*100
res.loc[2,'Test'] = 'Desc'
res.loc[2,'BAc'] = bac(y_test_desc,y_hat)*100
# shuffles for ytest cue/desc
cue_sh = np.zeros(nSh)
desc_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
cue_sh[sh] = bac(y_test_cue,y_perm_hat)*100
desc_sh[sh] = bac(y_test_desc,y_perm_hat)*100
res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )
res.loc[2,'Z'] = getPerm_Z(desc_sh, res.loc[2,'BAc'] )
res.loc[2,'P'] = getPerm_Pval(desc_sh, res.loc[2,'BAc'] )
res['nSeUnits'] = nUnits
return res
def balancedCoIncoTrial_Decoder(pe,feats):
res = pd.DataFrame(np.zeros((2,4)),columns=['Test','BAc','P','Z'])
# sample correct trials to match the number of incorrect trials.
samp_co_trials = np.random.choice(TrSets['co'],nInCo,replace=False)
train = np.concatenate( (TrSets['inco'], samp_co_trials ))
test = np.setdiff1d(TrSets['co'], samp_co_trials)
X_train = allZoneFR.loc[train,feats].values
X_test = allZoneFR.loc[test,feats].values
Y_cue_train = predVec['Cue'][train]
Y_desc_train = predVec['Desc'][train]
Y_test = predVec['Cue'][test] # cue and desc trials are the on the test set.
# model trained on the cue
res.loc[0,'Test'] = 'Cue'
cue_mod = mod.fit(X_train,Y_cue_train)
y_cue_hat = cue_mod.predict(X_test)
res.loc[0,'BAc'] = bac(Y_test,y_cue_hat)*100
cue_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm = np.random.permutation(Y_test)
cue_sh[sh] = bac(y_perm,y_cue_hat)*100
res.loc[0,'Z'] = getPerm_Z(cue_sh, res.loc[0,'BAc'] )
res.loc[0,'P'] = getPerm_Pval(cue_sh, res.loc[0,'BAc'] )
# model trained on the desc
res.loc[1,'Test'] = 'Desc'
desc_mod = mod.fit(X_train,Y_desc_train)
y_desc_hat = desc_mod.predict(X_test)
res.loc[1,'BAc'] = bac(Y_test,y_desc_hat)*100
desc_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm = np.random.permutation(Y_test)
desc_sh[sh] = bac(y_perm,y_desc_hat)*100
res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )
return res
def IncoTrial_Decoder(train,test):
res = pd.DataFrame(np.zeros((3,4)),columns=['Test','BAc','P','Z'])
temp = mod.fit(X_train[train],y_train[train])
res.loc[0,'Test'] = 'Model'
y_hat = temp.predict(X_train[test])
res.loc[0,'BAc'] = bac(y_train[test],y_hat)*100
# shuffle for held out train set
mod_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
mod_sh[sh] = bac(y_train[test],y_perm_hat)*100
res.loc[0,'Z'] = getPerm_Z(mod_sh, res.loc[0,'BAc'] )
res.loc[0,'P'] = getPerm_Pval(mod_sh, res.loc[0,'BAc'] )
# predictions on x test
y_hat = temp.predict(X_test)
res.loc[1,'Test'] = 'Cue'
res.loc[1,'BAc'] = bac(y_test_cue,y_hat)*100
res.loc[2,'Test'] = 'Desc'
res.loc[2,'BAc'] = 100-res.loc[1,'BAc']
# shuffles for ytest cue/desc
cue_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
cue_sh[sh] = bac(y_test_cue,y_perm_hat)*100
res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )
res.loc[2,'Z'] = getPerm_Z(100-cue_sh, res.loc[2,'BAc'] )
res.loc[2,'P'] = getPerm_Pval(100-cue_sh, res.loc[2,'BAc'] )
return res
with Parallel(n_jobs=njobs) as parallel:
# correct trials Model:
coModsDec = pd.DataFrame()
popCoModsDec = pd.DataFrame()
try:
nFolds = 10
y_train = predVec['Cue'][TrSets['co']]
y_test_cue = predVec['Cue'][TrSets['inco']]
y_test_desc = predVec['Desc'][TrSets['inco']]
rskf = RepeatedStratifiedKFold(n_splits=nFolds,n_repeats=nRepeats, random_state=0)
t0=time.time()
for unitNum in np.arange(nUnits):
for p,nF in nFeatures.items():
feats = unitIDs[unitNum][nF]
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
X_train = allZoneFR.loc[TrSets['co'], feats ].values
X_test = allZoneFR.loc[TrSets['inco'], feats ].values
cnt=0
r = parallel(delayed(correctTrials_Decoder)(train,test) for train,test in rskf.split(X_train,y_train))
t1=time.time()
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
res['unit'] = unitNum
coModsDec = pd.concat((coModsDec,res))
print(end='.')
coModsDec['Decoder'] = 'Correct'
# -population
for p,nF in nFeatures.items():
feats=np.array([])
for f in nF:
feats=np.concatenate((feats,np.arange(f,nUnits*7,7)))
feats=feats.astype(int)
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
X_train = allZoneFR.loc[TrSets['co'], feats ].values
X_test = allZoneFR.loc[TrSets['inco'], feats ].values
cnt=0
r = parallel(delayed(correctTrials_Decoder)(train,test) for train,test in rskf.split(X_train,y_train))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
popCoModsDec = pd.concat((popCoModsDec,res))
print(end='.')
print('\nDecoding Correct Model Completed. Time = {0:.2f}s \n'.format(time.time()-t0))
popCoModsDec['Decoder'] = 'Correct'
except:
print('CorrectTrials Model Failed.')
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
# balanced correct/inco model:
baModsDec = pd.DataFrame()
popBaModsDec = pd.DataFrame()
try:
t0=time.time()
for unitNum in np.arange(nUnits):
for p,nF in nFeatures.items():
feats = unitIDs[unitNum][nF]
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
r = parallel(delayed(balancedCoIncoTrial_Decoder)(pe, feats) for pe in np.arange(nPe))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
res['unit'] = unitNum
baModsDec = pd.concat((baModsDec,res))
print(end='.')
baModsDec['Decoder'] = 'Balanced'
# -population
for p,nF in nFeatures.items():
feats=np.array([])
for f in nF:
feats=np.concatenate((feats,np.arange(f,nUnits*7,7)))
feats=feats.astype(int)
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
r = parallel(delayed(balancedCoIncoTrial_Decoder)(pe, feats) for pe in np.arange(nPe))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
popBaModsDec = pd.concat((popBaModsDec,res))
print(end='.')
print('\nDecoding Balanced Model Completed. Time = {0:.2f}s \n'.format(time.time()-t0))
popBaModsDec['Decoder'] = 'Balanced'
except:
print('Balanced Model Failed.')
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
# incorrect trials model:
InCoModsDec = pd.DataFrame()
popInCoModsDec = pd.DataFrame()
try:
t0=time.time()
nFolds = 5
y_train = predVec['Cue'][TrSets['inco']]
y_test_cue = predVec['Cue'][TrSets['co']]
y_test_desc = predVec['Desc'][TrSets['co']]
rskf = RepeatedStratifiedKFold(n_splits=nFolds,n_repeats=nRepeats, random_state=0)
for unitNum in np.arange(nUnits):
for p,nF in nFeatures.items():
feats = unitIDs[unitNum][nF]
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
X_train = allZoneFR.loc[TrSets['inco'], feats ].values
X_test = allZoneFR.loc[TrSets['co'], feats ].values
cnt=0
r = parallel(delayed(IncoTrial_Decoder)(train,test) for train,test in rskf.split(X_train,y_train))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
res['unit'] = unitNum
InCoModsDec = | pd.concat((InCoModsDec,res)) | pandas.concat |
"""Module for factory functions that create raw data objects."""
from typing import Tuple, Union, Optional
from faker import Faker
import numpy as np
import pandas as pd
from candystore import CandyStore
from tipping import settings
from tipping.helpers import pivot_team_matches_to_matches
FAKE = Faker()
MATCH_RESULTS_COLS = [
"date",
"year",
"round_number",
"home_score",
"home_team",
"away_score",
"away_team",
]
TEAM_TYPES = ("home", "away")
def _translate_team_names(team_type):
return lambda df: df[f"{team_type}_team"].map(
lambda team: settings.TEAM_TRANSLATIONS.get(team, team)
)
def fake_match_data(
match_results: Optional[pd.DataFrame] = None,
seasons: Union[Tuple[int, int], int] = 1,
) -> pd.DataFrame:
"""Return minimally-valid dummy match results data."""
match_results = (
CandyStore(seasons=seasons).match_results(to_dict=None)
if match_results is None
else match_results
)
return (
match_results.rename(
columns={
"season": "year",
"home_points": "home_score",
"away_points": "away_score",
}
)
# Recreates data cleaning performed in data_import
.assign(
date=lambda df: pd.to_datetime(df["date"], utc=True),
# Team name translations happen in augury's data pipeline
home_team=_translate_team_names("home"),
away_team=_translate_team_names("away"),
)
)
def fake_fixture_data(
fixtures: Optional[pd.DataFrame] = None,
seasons: Union[Tuple[int, int], int] = 1,
) -> pd.DataFrame:
"""
Return minimally-valid data for fixture data.
These matches are usually unplayed, future matches, but it is also possible to get
data for past fixtures.
"""
fixtures = (
CandyStore(seasons=seasons).fixtures(to_dict=None)
if fixtures is None
else fixtures
)
return (
fixtures.rename(columns={"season": "year", "round": "round_number"}).drop(
"season_game", axis=1, errors="ignore"
)
# Recreates data cleaning performed in data_import
.assign(
date=lambda df: pd.to_datetime(df["date"], utc=True),
# Team name translations happen in augury's data pipeline
home_team=_translate_team_names("home"),
away_team=_translate_team_names("away"),
)
)
def fake_match_results_data(
match_results: Optional[pd.DataFrame] = None, round_number: Optional[int] = None
) -> pd.DataFrame:
"""
Generate dummy data that replicates match results data.
Params
------
match_results: Match data on which to base the match results data set.
round_number: Round number to use for match results data (because it's fetched
one round at a time).
Returns
-------
DataFrame of match results data
"""
filter_by_round = (
lambda df: df
if round_number is None
else df.query("round_number == @round_number")
)
match_results = fake_match_data() if match_results is None else match_results
assert (
len(match_results["year"].drop_duplicates()) == 1
), "Match results data is fetched one season at a time."
return (
match_results.pipe(filter_by_round)
.assign(
# Team name translations happen in augury's data pipeline
home_team=_translate_team_names("home"),
away_team=_translate_team_names("away"),
)
.loc[:, MATCH_RESULTS_COLS]
)
def _build_team_matches(match_data: pd.DataFrame, team_type: str) -> pd.DataFrame:
at_home = 1 if team_type == "home" else 0
oppo_team_type = "away" if at_home else "home"
team_match_data = {
"team": match_data[f"{team_type}_team"],
"at_home": at_home,
"oppo_team": match_data[f"{oppo_team_type}_team"],
"year": match_data["year"],
"round_number": match_data["round_number"],
}
return | pd.DataFrame(team_match_data) | pandas.DataFrame |
import numpy as np
import pandas as pd
import ml_metrics
import base64
import matplotlib.pyplot as plt
import seaborn as sns
from mlxtend.frequent_patterns import apriori
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import svds
from IPython.display import HTML
"""
redcarpet: Module for recommender systems using sets
"""
"""
HELPER METHODS
"""
def nonzero_index_set(arr):
"""
Returns a set of indices corresponding to non-zero
entries in a numpy array (or other list-like).
"""
res = set()
for i, val in enumerate(arr):
if val > 0:
res.add(i)
return res
def mat_to_sets(mat):
"""
Converts a numpy matrix into a list of sets of column
indices corresponding to non-zero row entries.
"""
return [nonzero_index_set(row) for row in mat]
def get_recs(user_recs, k=None):
"""
Extracts recommended item indices, leaving out their scores.
params:
user_recs: list of lists of tuples of recommendations where
each tuple has (item index, relevance score) with the
list of tuples sorted in order of decreasing relevance
k: maximumum number of recommendations to include for each
user, if None, include all recommendations
returns:
list of lists of recommendations where each
list has the column indices of recommended items
sorted in order they appeared in user_recs
"""
recs = [[item for item, score in recs][0:k] for recs in user_recs]
return recs
def write_kaggle_recs(recs_list, filename=None, headers=["Id", "Predicted"]):
"""
Writes recommendations to file in Kaggle submission format.
params:
recs_list: list of lists of recommendations where each
list has the column indices of recommended items
sorted in order of decreasing relevance
filename: path to file for writing output
headers: list of strings of output columns, defaults to
submission columns: ["Id", "Predicted"]
returns:
int: number of non-header lines, where each line represents
a user and the recommendations given to them
"""
if filename is None:
raise ValueError("Must provide a filename.")
lines = [",".join(headers)]
for i, recs in enumerate(recs_list):
lines.append("{},{}".format(i, " ".join([str(v) for v in recs])))
text = "\n".join(lines)
with open(filename, "w") as file:
file.write(text)
return len(lines) - 1
def download_kaggle_recs(recs_list, filename=None, headers=["Id", "Predicted"]):
"""
Writes recommendations to file in Kaggle submission format.
params:
recs_list: list of lists of recommendations where each
list has the column indices of recommended items
sorted in order of decreasing relevance
filename: path to file for writing output
headers: list of strings of output columns, defaults to
submission columns: ["Id", "Predicted"]
returns:
html: HTML download link to display in a notebook, click
to download the submission file
"""
# Based on: https://www.kaggle.com/rtatman/download-a-csv-file-from-a-kernel
if filename is None:
raise ValueError("Must provide a filename.")
rec_df = pd.DataFrame(
[(i, " ".join([str(r) for r in recs])) for i, recs in enumerate(recs_list)],
columns=headers,
)
csv = rec_df.to_csv(index=False)
b64 = base64.b64encode(csv.encode())
payload = b64.decode()
html = """<a download="{filename}"
href="data:text/csv;base64,{payload}"
target="_blank">Download ({lines} lines): {filename}</a>"""
html = html.format(payload=payload, filename=filename, lines=len(rec_df))
return HTML(html)
def check_list_of_sets(s_data, var_name):
if not isinstance(s_data, list):
raise ValueError(
"{} must be a list of sets. Got: {}".format(var_name, type(s_data))
)
if len(s_data) > 0:
entry = s_data[0]
if not isinstance(entry, set):
raise ValueError(
"{} must be a list of sets. Got list of: {}".format(
var_name, type(entry)
)
)
"""
EVALUATION METRICS
"""
def mapk_score(s_hidden, recs_pred, k=10):
"""
Computes the mean average precision at k (MAP@K) of recommendations.
MAP@K = mean AP@K score over all users
AP@K = (1 / min(m, k)) * sum from 1 to k of (precision at i * relevance of ith item)
Where m is the number of items in a user's hidden set
Where k is the number of items recommended to each user
params:
s_hidden: list of sets of hidden items for each user
recs_pred: list of lists of recommended items, with each list
k: number of recommendations to use in top set
returns:
float, range [0, 1]
"""
check_list_of_sets(s_hidden, "s_hidden")
return ml_metrics.mapk(s_hidden, recs_pred, k)
def uhr_score(s_hidden, recs_pred, k=10):
"""
Computes the user hit rate (UHR) score of recommendations.
UHR = the fraction of users whose top list included at
least one item also in their hidden set.
params:
s_hidden: list of sets of hidden items for each user
recs_pred: list of lists of recommended items, with each list
k: number of recommendations to use in top set
returns:
float, range [0, 1]
"""
check_list_of_sets(s_hidden, "s_hidden")
if len(s_hidden) != len(recs_pred):
note = "Length of true list {} does not match length of recommended list {}."
raise ValueError(note.format(len(s_hidden), len(recs_pred)))
scores = []
for r_true, r_pred_orig in zip(s_hidden, recs_pred):
r_pred = list(r_pred_orig)[0:k]
intersect = set(r_true).intersection(set(r_pred))
scores.append(1 if len(intersect) > 0 else 0)
return np.mean(scores)
def get_apk_scores(s_hidden, recs_pred, k=10):
"""
Returns the average precision at k (AP@K) for each user.
AP@K = (1 / min(m, k)) * sum from 1 to k of (precision at i * relevance of ith item)
Where m is the number of items in a user's hidden set
Where k is the number of items recommended to each user
params:
s_hidden: list of sets of hidden items for each user
recs_pred: list of lists of recommended items, with each list
k: number of recommendations to use in top set
returns:
list of floats, each float in the range [0, 1]
"""
check_list_of_sets(s_hidden, "s_hidden")
apks = []
for r_true, r_pred in zip(s_hidden, recs_pred):
apk = mapk_score([r_true], [r_pred], k=k)
apks.append(apk)
return apks
def get_hit_counts(s_hidden, recs_pred, k=10):
"""
Returns the number of successful recommendations for each user.
params:
s_hidden: list of sets of hidden items for each user
recs_pred: list of lists of recommended items, with each list
k: number of recommendations to use in top set
returns:
list of integers, each integer in the range [0, k]
"""
check_list_of_sets(s_hidden, "s_hidden")
hits = []
for r_true, r_pred in zip(s_hidden, recs_pred):
ix = r_true.intersection(set(r_pred[0:k]))
hits.append(len(ix))
return hits
def get_all_scores(rec_scores, k=10):
"""
Get scores of all items in the list of lists of recommendations.
"""
all_scores = []
for recs in rec_scores:
for (item, score) in recs[0:k]:
all_scores.append(score)
return all_scores
"""
ANALYSIS TOOLS
"""
def show_apk_dist(s_hidden, models, k=10, bin_size=0.1):
"""
Plot a histogram of average precision scores for all users.
"""
bins = np.arange(0, 1 + bin_size, bin_size)
pal = sns.color_palette("hls", len(models))
for ((rec_scores, name), color) in zip(models, pal):
apks = get_apk_scores(s_hidden, get_recs(rec_scores), k=k)
sns.distplot(apks, kde=False, label=name, bins=bins, color=color)
plt.xticks(bins)
plt.xlabel("Average Precision in Top {}".format(k))
plt.ylabel("Number of Users")
plt.title("AP@K Score Distribution")
plt.gcf().set_size_inches((8, 5))
plt.grid()
plt.legend(
loc="upper left", bbox_to_anchor=(1.0, 1.0), title="Models", frameon=False
)
plt.show()
def show_hit_dist(s_hidden, models, k=10):
"""
Plot a histogram of hit counts for all users.
"""
bins = range(k + 1)
pal = sns.color_palette("hls", len(models))
for ((rec_scores, name), color) in zip(models, pal):
hits = get_hit_counts(s_hidden, get_recs(rec_scores), k=k)
sns.distplot(hits, kde=False, label=name, bins=bins, color=color)
plt.xticks(bins)
plt.xlabel("Number of Successful Recommendations in Top {}".format(k))
plt.ylabel("Number of Users")
plt.title("Hit Count Distribution")
plt.gcf().set_size_inches((8, 5))
plt.grid()
plt.legend(
loc="upper left", bbox_to_anchor=(1.0, 1.0), title="Models", frameon=False
)
plt.show()
def show_score_dist(models, k=10, bins=None):
"""
Plot a histogram of item recommendation scores for all users.
"""
pal = sns.color_palette("hls", len(models))
for ((rec_scores, name), color) in zip(models, pal):
scores = get_all_scores(rec_scores, k=k)
if bins is not None:
sns.distplot(scores, kde=False, label=name, color=color, bins=bins)
else:
sns.distplot(scores, kde=False, label=name, color=color)
if bins is not None:
plt.xticks(bins)
plt.xlabel("Score for Recommended Item in Top {}".format(k))
plt.ylabel("Number of Items")
plt.title("Item Score Distribution")
plt.gcf().set_size_inches((8, 5))
plt.grid()
plt.legend(
loc="upper left", bbox_to_anchor=(1.0, 1.0), title="Models", frameon=False
)
plt.show()
def show_user_detail(s_input, s_hidden, rec_scores, uid, name_fn=None, k=10):
"""
Show the detailed results of recommendations to a user.
"""
s_pred = get_recs(rec_scores)
print("User: {}".format(uid))
print("Given: {}".format(sorted(s_input[uid])))
print("Recommended: {}".format(sorted(s_pred[uid])))
print("Actual: {}".format(sorted(s_hidden[uid])))
set_intersect = set(s_pred[uid]).intersection(set(s_hidden[uid]))
n_intersect = len(set_intersect)
apk = mapk_score([s_hidden[uid]], [s_pred[uid]], k)
print()
print("Recommendation Hits = {}".format(n_intersect))
print("Average Precision = {0:.3f}".format(apk))
print()
print("All Recommendation Scores:")
for i, (item_id, score) in enumerate(rec_scores[uid]):
hit = "Y" if item_id in s_hidden[uid] else " "
item_name = "Item {}".format(item_id)
if name_fn is not None:
item_name = name_fn(item_id)
print(
"{0}. [{3}] ({2:.3f}) {1}".format(
str(i + 1).zfill(2), item_name, score, hit
)
)
def show_user_recs(s_hidden, rec_scores, k=10):
"""
Show a table of recommendation results by user.
"""
apks = get_apk_scores(s_hidden, get_recs(rec_scores), k=k)
hits = get_hit_counts(s_hidden, get_recs(rec_scores), k=k)
cols = ["User", "APK", "Hits"]
data = {"User": range(len(rec_scores)), "APK": apks, "Hits": hits}
return pd.DataFrame(data)[cols]
def show_item_recs(s_hidden, rec_scores, k=10):
"""
Show a table of recommendation results by item.
"""
item_res = {}
for (user, likes) in zip(rec_scores, s_hidden):
for (i, score) in user:
if i not in item_res:
item_res[i] = {"Item": i, "Results": [], "Scores": []}
item_res[i]["Results"].append(1 if i in likes else 0)
item_res[i]["Scores"].append(score)
res = []
for i in item_res:
record = item_res[i]
total = len(record["Results"])
hits = sum(record["Results"])
res.append(
{
"Item": i,
"Recommended": total,
"Hits": hits,
"Hit Rate": hits / total,
"Avg Score": np.mean(record["Scores"]),
}
)
cols = ["Item", "Recommended", "Hits", "Hit Rate", "Avg Score"]
return pd.DataFrame(res)[cols]
"""
SIMILARITY MEASURES
"""
def jaccard_sim(u, v):
"""
Computes the Jaccard similarity between sets u and v.
sim = intersection(u, v) / union(u, v)
params:
u, v: sets to compare
returns:
float between 0 and 1, where 1 represents perfect
similarity and 0 represents no similarity
"""
intersection = len(u.intersection(v))
union = len(u.union(v))
zero = 1e-10
# Add small value to denominator to avoid divide by zero
sim = intersection / (union + zero)
return sim
def cosine_sim(u, v):
"""
Computes the Cosine similarity between sets u and v.
sim = intersection(u, v) / sqrt(|u| * |v|)
Where |s| is the number of items in set s
params:
u, v: sets to compare
returns:
float between 0 and 1, where 1 represents perfect
similarity and 0 represents no similarity
"""
intersection = len(u.intersection(v))
mag_u = len(u)
mag_v = len(v)
zero = 1e-10
# Add small value to denominator to avoid divide by zero
sim = intersection / (np.sqrt(mag_u * mag_v) + zero)
return sim
def forbes_sim(u, v):
"""
Computes the Forbes similarity between sets u and v.
sim = a/((a+b)*(a+c))
Where a = # of items in intersection(u, v)
b = # of items only in u
c = # of items only in v
Note: n is omitted since it is constant for all vectors.
params:
u, v: sets to compare
returns:
float between 0 and 1, where 1 represents perfect
similarity and 0 represents no similarity
"""
a = len(u.intersection(v))
b = len(u) - a
c = len(v) - a
zero = 1e-10
sim = a / (((a + b) * (a + c)) + zero)
return sim
def mcconnoughy_sim(u, v):
"""
Computes the McConnoughy similarity between sets u and v.
sim = (a*a - b*c) / sqrt((a+b)*(a+c))
Where a = # of items in intersection(u, v)
b = # of items only in u
c = # of items only in v
params:
u, v: sets to compare
returns:
float between 0 and 1, where 1 represents perfect
similarity and 0 represents no similarity
"""
a = len(u.intersection(v))
b = len(u) - a
c = len(v) - a
zero = 1e-10
sim = ((a * a) - (b * c)) / (np.sqrt((a + b) * (a + c)) + zero)
return sim
def simpson_sim(u, v):
"""
Computes the Simpson similarity coefficient between sets u and v.
sim = intersection(u, v) / min(|u|, |v|)
Where |s| is the number of items in set s
params:
u, v: sets to compare
returns:
float between 0 and 1, where 1 represents perfect
similarity and 0 represents no similarity
"""
ix = len(u.intersection(v))
zero = 1e-10
sim = ix / (min(len(u), len(v)) + zero)
return sim
def first_kulczynski_sim(u, v):
"""
Computes the first Kulczynski similarity between sets u and v.
sim = a / (b + c)
Where a = # of items in intersection(u, v)
b = # of items only in u
c = # of items only in v
Note: If (b + c) is zero, this measure is undefined. In this
implementation, a small value (1e-4) is added to the
denominator to avoid division by zero. Consequently, the
similarity between two sets with `a` matches will be equal
to a / 1e-4, which is equivalent to a * 1000
params:
u, v: sets to compare
returns:
float from zero to infinity, where higher scores represents
greater similarity and zero represents no similarity
"""
a = len(u.intersection(v))
b = len(u) - a
c = len(v) - a
zero = 1e-4
sim = a / (b + c + zero)
return sim
def second_kulczynski_sim(u, v):
"""
Computes the second Kulczynski similarity between sets u and v.
sim = (1/2) * ((a / (a + b)) + (a / (a + c)) )
Where a = # of items in intersection(u, v)
b = # of items only in u
c = # of items only in v
params:
u, v: sets to compare
returns:
float between 0 and 1, where 1 represents perfect
similarity and 0 represents no similarity
"""
a = len(u.intersection(v))
b = len(u) - a
c = len(v) - a
zero = 1e-10
sim = ((a / (a + b + zero)) + (a / (a + c + zero))) / 2
return sim
def sorenson_dice_sim(u, v):
"""
Computes the Sørensen-Dice similarity coefficient between sets u and v.
sim = (2 * intersection(u, v)) / (|u| + |v|)
Where |s| is the number of items in set s
params:
u, v: sets to compare
returns:
float between 0 and 1, where 1 represents perfect
similarity and 0 represents no similarity
"""
ix = len(u.intersection(v))
zero = 1e-10
sim = (2 * ix) / (len(u) + len(v) + zero)
return sim
"""
ASSOCIATION RULE METRICS
Based on: https://github.com/resumesai/resumesai.github.io/blob/master/analysis/Rule%20Mining.ipynb
"""
def sets_to_contingency(a, b, N):
"""
Creates a contingency table from two sets.
params:
a, b: sets to compare
N: total number of possible items
returns:
(f11, f10, f01, f00) tuple of contingency table entries:
f11 = # of items both in a and b
f10 = # of items only in a
f01 = # of items only in b
f00 = # of items not in either a or b
"""
f11 = len(a.intersection(b))
f10 = len(a) - f11
f01 = len(b) - f11
f00 = N - (f11 + f10 + f01)
return (f11, f10, f01, f00)
def rule_support(f11, f10, f01, f00):
"""
Computes the support for a rule `a -> b` based on the contingency table.
params:
f11 = count a and b appearing together
f10 = count of a appearing without b
f01 = count of b appearing without a
f00 = count of neither a nor b appearing
returns:
float in range [0, 1] where 1 indicates maximum support and
0 indicates no support
"""
N = f11 + f10 + f01 + f00
zero = 1e-10
return f11 / (N + zero)
def rule_confidence(f11, f10, f01, f00):
"""
Computes the confidence for a rule `a -> b` based on the contingency table.
params:
f11 = count a and b appearing together
f10 = count of a appearing without b
f01 = count of b appearing without a
f00 = count of neither a nor b appearing
returns:
float in range [0, 1] where 1 indicates maximum confidence and
0 indicates no confidence
"""
zero = 1e-10
return f11 / (f11 + f10 + zero)
def rule_lift(f11, f10, f01, f00):
"""
Computes the lift for a rule `a -> b` based on the contingency table.
params:
f11 = count a and b appearing together
f10 = count of a appearing without b
f01 = count of b appearing without a
f00 = count of neither a nor b appearing
returns:
float ranging from zero to infinity where 1 implies independence, greater
than 1 implies positive association, less than 1 implies negative association
"""
N = f11 + f10 + f01 + f00
zero = 1e-10
supp_ab = f11 / N
supp_a = f10 / N
supp_b = f01 / N
return supp_ab / ((supp_a * supp_b) + zero)
def rule_conviction(f11, f10, f01, f00):
"""
Computes the conviction for a rule `a -> b` based on the contingency table.
params:
f11 = count a and b appearing together
f10 = count of a appearing without b
f01 = count of b appearing without a
f00 = count of neither a nor b appearing
returns:
float in range [0, 1]
"""
N = f11 + f10 + f01 + f00
zero = 1e-10
supp_b = f01 / N
conf = rule_confidence(f11, f10, f01, f00)
return (1 - supp_b) / ((1 - conf) + zero)
def rule_power_factor(f11, f10, f01, f00):
"""
Computes the rule power factor (RPF) for a rule `a -> b` based on the contingency table.
params:
f11 = count a and b appearing together
f10 = count of a appearing without b
f01 = count of b appearing without a
f00 = count of neither a nor b appearing
returns:
float in range [0, 1]
"""
N = f11 + f10 + f01 + f00
zero = 1e-10
supp_ab = f11 / N
supp_a = f10 / N
return (supp_ab * supp_ab) / (supp_a + zero)
def rule_interest_factor(f11, f10, f01, f00):
"""
Computes the interest factor for a rule `a -> b` based on the contingency table.
params:
f11 = count a and b appearing together
f10 = count of a appearing without b
f01 = count of b appearing without a
f00 = count of neither a nor b appearing
returns:
float in range [0, 1] where 1 indicates maximum interest and
0 indicates no interest
"""
N = f11 + f10 + f01 + f00
zero = 1e-10
f1p = f11 + f10
fp1 = f11 + f01
return (N * f11) / ((f1p * fp1) + zero)
def rule_phi_correlation(f11, f10, f01, f00):
"""
Computes the phi correlation for a rule `a -> b` based on the contingency table.
params:
f11 = count a and b appearing together
f10 = count of a appearing without b
f01 = count of b appearing without a
f00 = count of neither a nor b appearing
returns:
float in range [-1, 1] where 1 indicates perfect positive correlation and
-1 indicates perfect negative correlation.
"""
f1p = f11 + f10
f0p = f01 + f00
fp1 = f11 + f01
fp0 = f10 + f00
num = (f11 * f00) - (f01 * f10)
denom = np.sqrt(f1p * fp1 * f0p * fp0)
if denom == 0:
return 0.0
return num / denom
def rule_is_score(f11, f10, f01, f00):
"""
Computes the IS score for a rule `a -> b` based on the contingency table.
params:
f11 = count a and b appearing together
f10 = count of a appearing without b
f01 = count of b appearing without a
f00 = count of neither a nor b appearing
returns:
float in range [0, 1] where 1 indicates maximum int3erest and
0 indicates no int3erest
"""
intfac = rule_interest_factor(f11, f10, f01, f00)
supp = rule_support(f11, f10, f01, f00)
return np.sqrt(intfac * supp)
def mine_association_rules(m_train, min_support=0.5):
"""
Finds association rules using the Apriori algorithm. Produces rules of the form `a -> b`,
which suggests that if a user likes item `a`, then they may also like item `b`.
params:
m_train: matrix of train data, rows = users, columns = items, 1 = like, 0 otherwise
min_support: A float between 0 and 1 for minumum support of the itemsets returned.
The support is computed as the fraction:
transactions_where_item(s)_occur / total_transactions
returns:
rule_df: Pandas dataframe of association rules, with columns:
"a": antecedent (LHS) item index
"b": consequent (RHS) item index
"ct": tuple of contingency table entries for the rule
"support": support for the rule `a -> b` in m_train
"""
freq_is = apriori(pd.DataFrame(m_train), max_len=2, min_support=min_support)
freq_is["len"] = freq_is["itemsets"].apply(lambda s: len(s))
freq_is = freq_is.query("len == 2")
if len(freq_is) == 0:
return | pd.DataFrame([], columns=["a", "b", "ct", "support"]) | pandas.DataFrame |
'''
******************************************************************
SLEXIL—Software Linking Elan XML to Illuminated Language
Copyright (C) 2019 <NAME> and <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
The full version of the GNU General Public License is found at
<https://www.gnu.org/licenses/>.
Information about the software can be obtained by contacting
david.beck at ualberta.ca.
******************************************************************
'''
import pandas as pd
from xml.etree import ElementTree as etree
from morphemeGloss import *
from pprint import pprint
from yattag import *
import pdb
import formatting
from translationLine import *
# from errors import *
import logging
from LineDataFrame import DataFrame as ldf
import identifyLines
# ------------------------------------------------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------------------------------------------------
class IjalLine:
tierInfo = []
spokenTextID = ""
rootElement = None
rootID = None
tierElements = []
doc = None
lineNumber = None
soundFile = None
grammaticalTerms = None
def __init__(self, doc, lineNumber, tierGuide, grammaticalTerms=[]):
self.doc = doc
self.lineNumber = lineNumber
self.tierGuide = tierGuide
self.rootID = lineNumber + 1
self.grammaticalTerms = grammaticalTerms
self.speechTierList = identifyLines.getList(self.doc, self.tierGuide)
self.rootElement = self.speechTierList[lineNumber]
self.allElements = findChildren(self.doc, self.rootElement)
dataFrame = ldf(doc, self.allElements)
self.tblRaw = dataFrame.getTbl()
self.tierCount = self.tblRaw.shape[0]
def parse(self):
self.tbl = standardizeTable(self.tblRaw, self.tierGuide)
# print(self.tbl)
# print(self.lineNumber)
self.tbl.index = range(len(self.tbl.index))
self.categories = categories = self.tbl["category"].tolist()
# print(self.lineNumber,self.categories.index("speech"))
if 'speech' in self.categories:
self.speechRow = self.categories.index("speech")
else:
logging.warning("EAF error: Line %s has nothing in the transcription line." % (int(self.lineNumber) + 1))
self.speechRow = None
if 'translation' in self.categories:
self.translationRow = self.categories.index("translation")
else:
self.translationRow = None
tierCount = self.tbl.shape[0]
# pdb.set_trace()
self.morphemeRows = [i for i in range(tierCount) if self.categories[i] == "morpheme"]
self.morphemeGlossRows = [i for i in range(tierCount) if self.categories[i] == "morphemeGloss"]
# handle the case of a secondary translation
if 'translation2' in self.categories:
self.translation2Row = self.categories.index("translation2")
else:
self.translation2Row = None
# handle the case of a second transcription line
if 'transcription2' in self.categories:
self.transcription2Row = self.categories.index("transcription2")
else:
self.transcription2Row = None
self.morphemes = self.extractMorphemes()
self.morphemeGlosses = self.extractMorphemeGlosses()
self.calculateMorphemeSpacing()
def getTierCount(self):
return (self.getTable().shape[0])
def getTable(self):
return (self.tbl)
'''the next three methods handle a use case where there is a missing or
empty transcription (line) tier but assume that there is a valid time-
aligned translation tier (not sure we can save a file that has neither)'''
def getStartTime(self):
col = self.tbl.columns.values.tolist().index("START")
if self.speechRow != None:
return (self.tbl.iloc[self.speechRow][self.tbl.columns.values.tolist().index("START")])
else:
return (self.tbl.iloc[self.translationRow][self.tbl.columns.values.tolist().index("START")])
def getEndTime(self):
if self.speechRow != None:
return (self.tbl.iloc[self.speechRow][self.tbl.columns.values.tolist().index("END")])
else:
return (self.tbl.iloc[self.translationRow][self.tbl.columns.values.tolist().index("END")])
def getAnnotationID(self):
if self.speechRow != None:
return (self.tbl.iloc[self.speechRow][self.tbl.columns.values.tolist().index("ANNOTATION_ID")])
else:
return (self.tbl.iloc[self.translationRow][self.tbl.columns.values.tolist().index("ANNOTATION_ID")])
# ----------------------------------------------------------------------------------------------------
def show(self):
pprint(vars(self))
# ----------------------------------------------------------------------------------------------------
def getSpokenText(self):
# categories = self.tbl["category"].tolist()
# row = categories.index("speech")
if self.speechRow == None:
return '<div class="missing_annotation">⚠️ Missing transcription line ⚠️</div>'
else:
return (self.tbl.iloc[self.speechRow, self.tbl.columns.values.tolist().index("TEXT")])
# ----------------------------------------------------------------------------------------------------
def getTranslation(self):
# categories = self.tbl["category"].tolist()
# row = categories.index("translation")
# pdb.set_trace()
if self.translationRow == None:
logging.warning("missing translation at line %d" % (int(self.lineNumber) + 1))
return (None)
translation = self.tbl.iloc[self.translationRow, self.tbl.columns.values.tolist().index("TEXT")]
translationLine = TranslationLine(translation)
return (translationLine.getStandardized())
# ----------------------------------------------------------------------------------------------------
def getTranslation2(self):
if self.translation2Row != None:
translation2 = self.tbl.iloc[self.translation2Row, self.tbl.columns.values.tolist().index("TEXT")]
translationLine2 = TranslationLine(translation2)
return (translationLine2.getStandardized())
else:
return (None)
# ----------------------------------------------------------------------------------------------------
def getTranscription2(self):
if self.transcription2Row != None:
transcription2 = self.tbl.iloc[self.transcription2Row, self.tbl.columns.values.tolist().index("TEXT")]
return (transcription2)
else:
return (None)
# ----------------------------------------------------------------------------------------------------
def extractMorphemes(self):
if (self.morphemeRows == []):
return ([])
rawMorphemeList = self.tbl["TEXT"].iloc[self.morphemeRows].tolist()
rawMorphemes = ''.join(rawMorphemeList)
if "\t" in rawMorphemes:
rawMorphemeText = self.tbl["TEXT"].iloc[self.morphemeRows].tolist()[0]
rawMorphemeList = rawMorphemeText.split('\t')
morphemes = replaceHyphensWithNDashes(rawMorphemeList)
return (morphemes)
# ----------------------------------------------------------------------------------------------------
def extractMorphemeGlosses(self):
if (self.morphemeGlossRows == []):
return ([])
rawMorphemeGlossList = self.tbl["TEXT"].iloc[self.morphemeGlossRows].tolist()
rawMorphemeGlosses = ''.join(rawMorphemeGlossList)
if "\t" in rawMorphemeGlosses:
rawMorphemeGlossText = self.tbl["TEXT"].iloc[self.morphemeGlossRows].tolist()[0]
rawMorphemeGlossList = rawMorphemeGlossText.split('\t')
morphemeGlosses = replaceHyphensWithNDashes(rawMorphemeGlossList)
return (morphemeGlosses)
# ----------------------------------------------------------------------------------------------------
def getMorphemes(self):
return (self.morphemes)
# ----------------------------------------------------------------------------------------------------
def getGrammaticalTerms(self, terms):
try:
if terms[-1] == '':
terms = terms[:-1]
return newTerms
except IndexError:
return
# ----------------------------------------------------------------------------------------------------
def getMorphemeGlosses(self):
return (self.morphemeGlosses)
# ----------------------------------------------------------------------------------------------------
def calculateMorphemeSpacing(self):
"""
the spacing is used to create a styleString, specifying grid cell widths which
accomodate the widest of each morpheme/gloss pair, so that they each member of
each pair is vertically aligned:
m1 m2 ----m3-----
g1 ---g2--- g3
"""
morphemes = self.getMorphemes()
glosses = self.getMorphemeGlosses()
if (len(morphemes) > len(glosses)):
logging.warning("EAF error: There are more morphs (%d) than glosses (%d) in line %s." % (
len(morphemes), len(glosses), int(self.lineNumber) + 1))
theDifference = len(morphemes) - len(glosses)
for i in range(0, theDifference):
glosses.append("⚠️")
elif (len(morphemes) < len(glosses)):
logging.warning("EAF error: There are more glosses (%d) than morphs (%d) in line %s." % (
len(glosses), len(morphemes), int(self.lineNumber) + 1))
theDifference = len(glosses) - len(morphemes)
for i in range(0, theDifference):
morphemes.append("⚠️")
self.morphemeSpacing = []
for i in range(len(morphemes)):
if "<su" in morphemes[i]:
newmorph = morphemes[i].replace("<sub>", "")
newmorph = newmorph.replace("</sub>", "")
newmorph = newmorph.replace("<sup>", "")
newmorph = newmorph.replace("</sup>", "")
morphemeSize = len(newmorph)
else:
morphemeSize = len(morphemes[i])
if "<su" in glosses[i]:
newGloss = glosses[i].replace("<sub>", "")
newGloss = newGloss.replace("</sub>", "")
newGloss = newGloss.replace("<sup>", "")
newGloss = newGloss.replace("</sup>", "")
glossSize = len(newGloss)
else:
glossSize = len(glosses[i])
self.morphemeSpacing.append(max(morphemeSize, glossSize) + 1)
# ----------------------------------------------------------------------------------------------------
def getMorphemeSpacing(self):
return (self.morphemeSpacing)
# ----------------------------------------------------------------------------------------------------
def htmlLeadIn(self, htmlDoc, audioDirectory, audioFileType):
text = "%d)" % (self.lineNumber + 1)
htmlDoc.text(text)
lineID = self.rootID
audioTag = '<audio id="%s"><source src="%s/%s.%s"/></audio>' % (
self.getAnnotationID(), audioDirectory, self.getAnnotationID(),audioFileType)
htmlDoc.asis(audioTag)
onError = "this.style.display=\'none\'"
buttonTag = '<button onclick="playSample(\'%s\')">🔈</button>' % self.getAnnotationID()
htmlDoc.asis(buttonTag)
# ----------------------------------------------------------------------------------------------------
def toHTML(self, htmlDoc):
with htmlDoc.tag("div", klass="line-content"):
with htmlDoc.tag("div", klass="line"):
styleString = "grid-template-columns: %s;" % ''.join(["%dch " % p for p in self.morphemeSpacing])
with htmlDoc.tag("div", klass="speech-tier"):
htmlDoc.asis(self.getSpokenText())
transcription2 = self.getTranscription2()
if transcription2 != None:
with htmlDoc.tag("div", klass="secondTranscription-tier"):
htmlDoc.asis(self.getTranscription2())
morphemes = self.getMorphemes()
if (len(morphemes) > 0):
with htmlDoc.tag("div", klass="morpheme-tier", style=styleString):
for morpheme in morphemes:
with htmlDoc.tag("div", klass="morpheme-cell"):
htmlDoc.asis(morpheme)
morphemeGlosses = self.getMorphemeGlosses()
if (len(morphemeGlosses) > 0):
with htmlDoc.tag("div", klass="morpheme-tier", style=styleString):
for morphemeGloss in self.getMorphemeGlosses():
with htmlDoc.tag("div", klass="morpheme-cell"):
mg = MorphemeGloss(morphemeGloss, self.grammaticalTerms)
mg.parse()
mg.toHTML(htmlDoc)
translation = self.getTranslation()
if translation:
with htmlDoc.tag("div", klass="freeTranslation-tier"):
htmlDoc.asis(self.getTranslation())
translation2 = self.getTranslation2()
if translation2 != None:
with htmlDoc.tag("div", klass="freeTranslation-tier"):
htmlDoc.text(translation2)
# add a div to hold annotations
with htmlDoc.tag("div", klass="annotationDiv"):
pass#;
# ------------------------------------------------------------------------------------------------------------------------
def findChildren(doc, rootElement):
elementsToDo = [rootElement]
elementsCompleted = []
while (len(elementsToDo) > 0):
currentElement = elementsToDo[0]
parentRef = currentElement.attrib["ANNOTATION_ID"]
pattern = "TIER/ANNOTATION/REF_ANNOTATION[@ANNOTATION_REF='%s']" % parentRef
childElements = doc.findall(pattern)
elementsToDo.remove(currentElement)
elementsCompleted.append(currentElement)
if (len(childElements) > 0):
elementsToDo.extend(childElements)
return (elementsCompleted)
# ------------------------------------------------------------------------------------------------------------------------
def buildTable(doc, lineElements):
tbl_elements = pd.DataFrame(e.attrib for e in lineElements)
# print(tbl_elements)
startTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF1')]
pattern = "TIME_ORDER/TIME_SLOT[@TIME_SLOT_ID='%s']" % startTimeSlotID
startTime = int(doc.find(pattern).attrib["TIME_VALUE"])
startTimes = [startTime]
rowCount = tbl_elements.shape[0]
for i in range(1, rowCount):
startTimes.append(float('NaN'))
endTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF2')]
pattern = "TIME_ORDER/TIME_SLOT[@TIME_SLOT_ID='%s']" % endTimeSlotID
endTime = int(doc.find(pattern).attrib["TIME_VALUE"])
endTimes = [endTime]
for i in range(1, rowCount):
endTimes.append(float('NaN'))
tbl_times = pd.DataFrame({"START": startTimes, "END": endTimes})
# print(tbl_times)
ids = [e.attrib["ANNOTATION_ID"] for e in lineElements]
tierInfo = []
text = []
for id in ids:
parentPattern = "*/*/*/[@ANNOTATION_ID='%s']/../.." % id
tierAttributes = doc.find(parentPattern).attrib
tierInfo.append(tierAttributes)
childPattern = "*/*/*/[@ANNOTATION_ID='%s']/ANNOTATION_VALUE" % id
elementText = doc.find(childPattern).text
if (elementText is None):
elementText = ""
# print("elementText: %s" % elementText)
text.append(elementText.strip())
tbl_tierInfo = | pd.DataFrame(tierInfo) | pandas.DataFrame |
import numpy as np
import pandas as pd
df = pd.read_csv('Data/world_population_and_projection.csv')
df = df[df.Year>=2000]
for c in df.columns.to_list()[1:]:
df[c] = df[c].str.replace(' ','')
df[c] = df[c].str.replace(',','')
df[c] = | pd.to_numeric(df[c]) | pandas.to_numeric |
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels import regression
import matplotlib.pyplot as plt
import requests
import re
import bs4
import akshare as ak
import pickle
import numpy as np
from datetime import datetime
from datetime import date
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels import regression
import matplotlib.pyplot as plt
import requests
import re
import bs4
import akshare as ak
def cal_rsrs(data2, data1, N=16):
"""
计算data2中标的所在日期的RSRS斜率指标和拟合优度
"""
data11 = data1[data1['sec_code'].isin(data2['sec_code'])].reset_index()
loc = data11[data11['tradeday'].isin(data2['tradeday'])].index.tolist()
if loc[0] >= N:
x = sm.add_constant(data11['low_slice'][(loc[0] - N):loc[0]]).astype('float64')
y = data11['high_slice'][(loc[0] - N):loc[0]].astype('float64')
est = sm.OLS(y, x)
res = est.fit()
params = res.params.low_slice
r_squared = res.rsquared
else:
params = None
r_squared = None
data2['rsrs'] = params
data2['r_squared'] = r_squared
return data2
def rsrs(data, N=16):
"""
由日频数据计算RSRS斜率指标和拟合优度
"""
data = data.dropna()
data['tradeday'] = pd.to_datetime(data['tradeday'], format='%Y/%m/%d %H:%M:%S')
data.sort_values(by=['sec_code', 'tradeday'], inplace=True)
data1 = data.copy()
data1 = data1.groupby(['sec_code', 'tradeday']).apply(lambda x, data1=data1, N=N: cal_rsrs(x, data1, N))
print("RSRS斜率指标和拟合优度已计算")
return data1
def cal_rsrs_std(data2, data1, M=300, N=16):
"""
计算data2中标的所在日期的RSRS标准分
"""
data11 = data1[data1['sec_code'].isin(data2['sec_code'])].reset_index()
loc = data11[data11['tradeday'].isin(data2['tradeday'])].index.tolist()
# print(loc[0])
rsrs = data2['rsrs']
if loc[0] < M + N:
rsrs_std = None
else:
rsrs_mean = np.mean(data11['rsrs'][(loc[0] - M):loc[0]])
rsrs_s = np.std(data11['rsrs'][(loc[0] - M):loc[0]])
rsrs_std = (rsrs - rsrs_mean) / rsrs_s
data2['rsrs_std'] = rsrs_std
return data2
def rsrs_std(data1, M=300, N=16):
"""
由RSRS斜率计算RSRS标准分
"""
data2 = data1.groupby(['sec_code', 'tradeday']).apply(lambda x, data1=data1, M=M, N=N: cal_rsrs_std(x, data1, M, N))
print("RSRS标准分已计算")
return data2
def rsrs_std_cor(data2):
"""
由RSRS标准分计算RSRS修正标准分
"""
data2['rsrs_std_cor'] = data2['r_squared'] * data2['rsrs_std']
print('RSRS修正标准分已计算')
return data2
def rsrs_std_cor_right(data2):
"""
由RSRS修正标准分计算RSRS右偏修正标准分
"""
data2['rsrs_std_cor_right'] = data2['rsrs_std_cor'] * data2['rsrs']
print('RSRS右偏修正标准分已计算')
return data2
def rsrs_std_cor_right_mean(data2, ndays=5):
"""
计算RSRS右偏修正标准分的ndays均线
"""
data2['rsrs_std_cor_right_mean']=data2.groupby('sec_code')['rsrs_std_cor_right'].rolling(window=ndays).mean()
# data2['rsrs_std_cor_right_mean'] = data2.groupby('sec_code')['rsrs_std_cor_right'].apply(pd.rolling_mean, ndays)
print('RSRS右偏修正标准分均线已计算')
return data2
def get_rsrs(data, N=16, M=300, ndays=5):
"""
根据日频数据计算RSRS相关指标
data 数据,包括:标的代码sec_code,交易日tradeday,最高价high_slice、最低价low_slice、收盘价close_slice
N,回归时间窗口,默认取16,不足16赋值为None
M,标准分计算窗口,默认取300,不足300赋值为None
"""
data1 = rsrs(data, N)
data2 = rsrs_std(data1, M, N)
data2 = rsrs_std_cor(data2)
data2 = rsrs_std_cor_right(data2)
data2 = rsrs_std_cor_right_mean(data2, ndays)
return data2
def get_signal(data2, S):
"""
根据RSRS指标和阈值S判断是否有交易信号(最简单的情况),trade_dir为0代表买入,为1代表卖出,为-1代表无信号
"""
data3 = data2.copy()
data3.loc[:,'trade_dir']=-1
data3.loc[(data3['rsrs_std_cor_right'] > S) & (data3['trade_dir'] == -1), 'trade_dir'] = 0
data3.loc[(data3['rsrs_std_cor_right'] > S) & (data3['trade_dir'] == 1), 'trade_dir'] = -1
data3.loc[(data3['rsrs_std_cor_right'] < -S) & (data3['trade_dir'] == -1), 'trade_dir'] = 1
data3.loc[(data3['rsrs_std_cor_right'] < -S) & (data3['trade_dir'] == 0), 'trade_dir'] = -1
# data3.drop(['rsrs','r_squared','rsrs_std','rsrs_std_cor','rsrs_std_cor_right'], axis = 1, inplace = True)
return data3
def RSRS(data, N=16, M=300, S=0.7, ndays=5):
"""
根据日频数据计算RSRS相关指标,并判断交易信号,更新trade_dir,删除中间变量
"""
data2 = get_rsrs(data, N, M, ndays)
data3 = get_signal(data2, S)
return data3
def single_stock_tradeback(stock_code,etf_kline,money,trade_pay_rate,start_date,end_date):
'''
:param stock_code: str 股票代码,例:'sz159966'
:param etf_kline: dict 全部etf基金k线,
:param money: float 初始买入成本,包含手续费
:param trade_pay_rate: float,手续费费率,【0-1】
:param start_date: date 回测开始日期,
:param end_date: date 回测结束日期,
:return: pandas.Dataframe 包含净值等信息的
'''
etf_kline_stock=etf_kline[stock_code]
etf_hold = etf_kline_stock[start_date:end_date]
etf_close = etf_hold['close']
etf_close_shift = etf_close.shift(1)
etf_delta = (etf_close - etf_close_shift) / etf_close_shift
etf_delta = etf_delta.drop(start_date)
etf_hold['incresing_rate'] = etf_delta
etf_hold.loc[start_date, 'incresing_rate'] = (etf_hold.loc[start_date, 'close'] - etf_hold.loc[
start_date, 'open']) / etf_hold.loc[start_date, 'open']
etf_hold['日增长倍数'] = etf_hold['incresing_rate'] + 1
etf_hold['净值倍数'] = etf_hold['日增长倍数'].cumprod()
money_after_trade=money* (1 - trade_pay_rate)
etf_hold['金额'] = etf_hold['净值倍数'] * money_after_trade
etf_hold.loc[start_date, '手续费'] = money * trade_pay_rate
etf_hold.loc[end_date, '手续费'] = etf_hold.loc[end_date, '金额'] * trade_pay_rate
etf_hold.loc[end_date, '卖出金额_手续费后'] = etf_hold.loc[end_date, '金额'] * (1 - trade_pay_rate)
return etf_hold,etf_hold.loc[end_date, '金额'] * (1 - trade_pay_rate)
def etf_get():
etf_list = ak.fund_etf_category_sina(symbol="ETF基金")
res={}
res1 = {}
res2 = {}
res3 = {}
res4 = {}
res5 = {}
n = 1
for i in etf_list['symbol']:
if i == 'sh513200' or i == 'sh513150':
continue
print(i)
print(n)
fund_etf_hist_sina_df = ak.fund_etf_hist_sina(symbol=i)
fund_etf_hist_sina_df.set_index(['date'], inplace=True)
# ma12 = fund_em_etf_fund_info_df['单位净值'].rolling(window=5).mean()
close = fund_etf_hist_sina_df['close']
open_etf = fund_etf_hist_sina_df['open']
high = fund_etf_hist_sina_df['high']
low = fund_etf_hist_sina_df['low']
volume = fund_etf_hist_sina_df['volume']
fund_etf_hist_sina_df=pd.to_numeric(fund_etf_hist_sina_df).sort_index()
close = pd.to_numeric(close).sort_index()
open_etf = pd.to_numeric(open_etf).sort_index()
high = pd.to_numeric(high).sort_index()
low = pd.to_numeric(low).sort_index()
volume = pd.to_numeric(volume).sort_index()
fund_etf_hist_sina_df=fund_etf_hist_sina_df.sort_index()
close = close.sort_index()
open_etf = open_etf.sort_index()
high = high.sort_index()
low = low.sort_index()
volume = volume.sort_index()
# jinzi_delta=jinzi.shift(20)
# mtm_20=(jinzi-jinzi_delta)/jinzi
res[i]=fund_etf_hist_sina_df
res1[i] = close
res2[i] = open_etf
res3[i] = high
res4[i] = low
res5[i] = volume
n = n + 1
save_obj(res, 'etf_all')
# save_obj(res1, 'etf_close')
# save_obj(res2, 'etf_open')
# save_obj(res3, 'etf_high')
# save_obj(res4, 'etf_low')
# save_obj(res5, 'etf_volume')
def date_mtm():
money=1
trade_pay_rate=0.1#0.00015
money_after_trade=money*(1-trade_pay_rate)
etf_kline=ak.fund_etf_hist_sina('sz159966')
etf_kline.set_index(etf_kline['date'],inplace=True)
start_date=date(2019,12,27)
end_date=date(2020,1,6)
etf_close = load_obj('etf_close')
etf_all = | pd.concat(etf_close, axis=1) | pandas.concat |
import pandas as pd
from datetime import datetime
import numpy as np
import scipy.stats as ss
from sklearn import preprocessing
data_root = '/media/jyhkylin/本地磁盘1/study/数据挖掘竞赛/SMPCUP2017/'
post_data = pd.read_table(data_root+'SMPCUP2017dataset/2_Post.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
browse_data = pd.read_table(data_root+'SMPCUP2017dataset/3_Browse.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
comment_data = pd.read_table(data_root+'SMPCUP2017dataset/4_Comment.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
voteup_data = pd.read_table(data_root+'SMPCUP2017dataset/5_Vote-up.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
votedown_data = pd.read_table(data_root+'SMPCUP2017dataset/6_Vote-down.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
favorite_data = | pd.read_table(data_root+'SMPCUP2017dataset/7_Favorite.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date']) | pandas.read_table |
import pandas as pd
from powersimdata.input import const
class AbstractGrid:
"""Grid Builder."""
def __init__(self):
"""Constructor"""
self.data_loc = None
self.interconnect = None
self.zone2id = {}
self.id2zone = {}
self.sub = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class RedRio:
def __init__(self,codigo = None,**kwargs):
self.info = | pd.Series() | pandas.Series |
"""
Tests that apply specifically to the Python parser. Unless specifically
stated as a Python-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
import csv
from io import BytesIO, StringIO
import pytest
from pandas.errors import ParserError
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
def test_default_separator(python_parser_only):
# see gh-17333
#
# csv.Sniffer in Python treats "o" as separator.
data = "aob\n1o2\n3o4"
parser = python_parser_only
expected = DataFrame({"a": [1, 3], "b": [2, 4]})
result = parser.read_csv(StringIO(data), sep=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("skipfooter", ["foo", 1.5, True])
def test_invalid_skipfooter_non_int(python_parser_only, skipfooter):
# see gh-15925 (comment)
data = "a\n1\n2"
parser = python_parser_only
msg = "skipfooter must be an integer"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=skipfooter)
def test_invalid_skipfooter_negative(python_parser_only):
# see gh-15925 (comment)
data = "a\n1\n2"
parser = python_parser_only
msg = "skipfooter cannot be negative"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=-1)
@pytest.mark.parametrize("kwargs", [dict(sep=None), dict(delimiter="|")])
def test_sniff_delimiter(python_parser_only, kwargs):
data = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_sniff_delimiter_encoding(python_parser_only, encoding):
parser = python_parser_only
data = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
if encoding is not None:
from io import TextIOWrapper
data = data.encode(encoding)
data = BytesIO(data)
data = TextIOWrapper(data, encoding=encoding)
else:
data = StringIO(data)
result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_single_line(python_parser_only):
# see gh-6607: sniff separator
parser = python_parser_only
result = parser.read_csv(StringIO("1,2"), names=["a", "b"], header=None, sep=None)
expected = DataFrame({"a": [1], "b": [2]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(skipfooter=2), dict(nrows=3)])
def test_skipfooter(python_parser_only, kwargs):
# see gh-6607
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), **kwargs)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")]
)
def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
# see gh-6607
parser = python_parser_only
with open(csv1, "rb") as f:
data = f.read()
data = data.replace(b",", b"::")
expected = parser.read_csv(csv1)
module = pytest.importorskip(compression)
klass = getattr(module, klass)
with tm.ensure_clean() as path:
tmp = klass(path, mode="wb")
tmp.write(data)
tmp.close()
result = parser.read_csv(path, sep="::", compression=compression)
tm.assert_frame_equal(result, expected)
def test_read_csv_buglet_4x_multi_index(python_parser_only):
# see gh-6607
data = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
parser = python_parser_only
expected = DataFrame(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.3640],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.3580, 0.89145, 2.5838],
],
columns=["A", "B", "C", "D", "E"],
index=MultiIndex.from_tuples(
[("a", "b", 10.0032, 5), ("a", "q", 20, 4), ("x", "q", 30, 3)],
names=["one", "two", "three", "four"],
),
)
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_read_csv_buglet_4x_multi_index2(python_parser_only):
# see gh-6893
data = " A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9"
parser = python_parser_only
expected = DataFrame.from_records(
[(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list("abcABC"),
index=list("abc"),
)
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("add_footer", [True, False])
def test_skipfooter_with_decimal(python_parser_only, add_footer):
# see gh-6971
data = "1#2\n3#4"
parser = python_parser_only
expected = DataFrame({"a": [1.2, 3.4]})
if add_footer:
# The stray footer line should not mess with the
# casting of the first two lines if we skip it.
kwargs = dict(skipfooter=1)
data += "\nFooter"
else:
kwargs = dict()
result = parser.read_csv(StringIO(data), names=["a"], decimal="#", **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep", ["::", "#####", "!!!", "123", "#1!c5", "%!c!d", "@@#4:2", "_!pd#_"]
)
@pytest.mark.parametrize(
"encoding", ["utf-16", "utf-16-be", "utf-16-le", "utf-32", "cp037"]
)
def test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding):
# see gh-3404
expected = DataFrame({"a": [1], "b": [2]})
parser = python_parser_only
data = "1" + sep + "2"
encoded_data = data.encode(encoding)
result = parser.read_csv(
BytesIO(encoded_data), sep=sep, names=["a", "b"], encoding=encoding
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
#########################################################
### DNA variant annotation tool
### Version 1.0.0
### By <NAME>
### <EMAIL>
#########################################################
import pandas as pd
import numpy as np
import allel
import argparse
import subprocess
import sys
import os.path
import pickle
import requests
import json
def extract_most_deleterious_anno(row, num_ann_max):
ann_order = pd.read_csv(anno_order_file, sep=' ')
alt = row[:num_ann_max]
anno = row[num_ann_max:]
alt.index = range(0, len(alt))
anno.index = range(0, len(anno))
ann_all_alt = pd.DataFrame()
alt_unique = alt.unique()
for unique_alt in alt_unique:
if unique_alt != '':
anno_all = anno[alt == unique_alt]
ann_order_all = pd.DataFrame()
for ann_any in anno_all:
if sum(ann_any == ann_order.Anno) > 0:
ann_any_order = ann_order[ann_order.Anno == ann_any]
else:
ann_any_order = ann_order.iloc[ann_order.shape[0]-1]
ann_order_all = ann_order_all.append(ann_any_order)
small_ann = ann_order_all.sort_index(ascending=True).Anno.iloc[0]
ann_unique_alt = [unique_alt, small_ann]
ann_all_alt = ann_all_alt.append(ann_unique_alt)
ann_all_alt.index = range(0, ann_all_alt.shape[0])
return ann_all_alt.T
def run_snpeff(temp_out_name):
snpeff_command = ['java', '-Xmx4g', '-jar', snpeff_path, \
'-ud', '0', \
# '-v', \
'-canon', '-noStats', \
ref_genome, vcf_file]
temp_output = open(temp_out_name, 'w')
subprocess.run(snpeff_command, stdout=temp_output)
temp_output.close()
def get_max_num_ann(temp_out_name):
num_ann_guess = 500
callset = allel.vcf_to_dataframe(temp_out_name, fields='ANN', numbers={'ANN': num_ann_guess})
num_ann = callset.apply(lambda x: sum(x != ''), axis=1)
num_ann_max = num_ann.max() # num_ann_max = 175
return num_ann_max
def get_ann_from_output_snpeff(temp_out_name):
callset = allel.read_vcf(temp_out_name, fields='ANN', transformers=allel.ANNTransformer(), \
numbers={'ANN': num_ann_max})
df1 = | pd.DataFrame(data=callset['variants/ANN_Allele']) | pandas.DataFrame |
import os, sys, shutil, click, subprocess, time, Bio, cova, pandas
# start clock
start = time.time()
### commands ###
@click.group(chain=True)
@click.version_option()
@click.option('--indr', help='Full path to the working directory', default=os.getcwd(), type=click.Path())
@click.option('--ncpu', help='Number of CPUs to use', default=4, show_default=True, type=int)
@click.option('--ref', help='Reference sequence accession', default=cova.REF, show_default=True)
@click.option('--debug', help='See full traceback for errors.', is_flag=True)
@click.option('--addseq', help='Add new sequences and redo analysis.', is_flag=True)
@click.pass_context
def cli(ctx,indr,ncpu,ref,debug,addseq):
"""
Variant analysis using whole-genome Multiple Sequence Alignments.
By default, it works on the current directory. Directory can be specified using INDR option.
New sequences can be added to update an existing analysis using --addseq.
"""
ctx.ensure_object(dict)
ctx.obj['DR'] = indr
ctx.obj['REF'] = ref
ctx.obj['NCPU'] = str(ncpu)
ctx.obj['ADDSEQ'] = addseq
# control traceback
if debug:
ctx.obj['DEBUG'] = debug
click.echo("Debug mode is ON.\n")
else:
sys.excepthook = lambda exctype,exc,traceback : print("{}: {}".format(exctype.__name__,exc))
click.echo("CoVa will run in the directory: {}\n".format(indr))
@cli.command()
@click.pass_context
@click.option('--prog', default='mafft',
help='''full path to MAFFT program''',show_default=True)
@click.option('--infile', default='genomes.fna', show_default=True)
@click.option('--outfile', default='genome_aln.fna', show_default=True)
@click.option('--mode', type=click.Choice(['standard','fast','ultra'],case_sensitive=False),default='standard',show_default=True)
def msabuild(ctx,prog,infile,outfile,mode):
"""Build whole-genome MSA."""
fin = os.path.join(ctx.obj['DR'],infile)
fout = os.path.join(ctx.obj['DR'],outfile)
# throw error if input file missing
if not os.path.exists(fin):
raise FileNotFoundError("couldn't read the input file %s."%fin)
if cova.utils.outcheck(fout):
# get number of sequences in the input
nseq = len(Bio.SeqIO.index( fin, 'fasta'))
print("%s: Input has %i sequences."%( cova.utils.timer(start), nseq))
# set path variable to find mafft
my_env = os.environ
if 'COVA_BIN_PATH' in my_env.keys():
my_env['PATH'] = ':'.join([ my_env['COVA_BIN_PATH'], my_env['PATH']])
# create commands for different run modes ( FAST / ULTRA / STANDARD)
# FAST
if mode == 'fast':
cmd = [prog, '--quiet', '--retree', '2', '--thread', ctx.obj['NCPU'], fin]
# ULTRA
elif mode == 'ultra':
# with ULTRA mode, a reference genome file is first placed in the project dir
fref = os.path.join(ctx.obj['DR'],'ref.fasta')
Bio.SeqIO.write(cova.GENOME,fref,'fasta')
cmd = [prog, '--quiet', '--auto', '--thread', ctx.obj['NCPU'], '--keeplength', '--addfragments', fin, fref]
# STANDARD
else:
cmd = [prog, '--quiet', '--nomemsave', '--maxiterate', '5', '--thread', ctx.obj['NCPU'], fin]
# run the MAFFT command created above
print("%s: Building MSA from %s in %s mode\n Command: %s,\n Output will be saved to %s"%(\
cova.utils.timer(start),fin,mode,' '.join(cmd),fout))
with open( fout,'w') as flob:
s1 = subprocess.run( cmd, stdout=flob, env=my_env)
# clean-up for ultra mode
if mode == 'ultra':
# remove reference file
if os.path.exists(fref):
os.remove(fref)
# remove additional reference seq from MSA
msa = Bio.AlignIO.read(fout, 'fasta')
msa = msa[1:,:]
Bio.AlignIO.write(msa, fout, 'fasta')
print("%s:\tMSABUILD is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--prog', default='mafft',
help='''full path to MAFFT program''',show_default=True)
@click.option('--inmsa', default='genome_aln.fna', show_default=True)
@click.option('--newseq', default='new_seq.fna', show_default=True)
@click.option('--oldcopy', default='old_genome_aln.fna', show_default=True)
def msad(ctx,prog,inmsa,newseq,oldcopy):
"""Add new sequence(s) to a pre-existing whole-genome MSA."""
fin1 = os.path.join(ctx.obj['DR'],inmsa)
fin2 = os.path.join(ctx.obj['DR'],newseq)
fout = fin1
fcopy = os.path.join(ctx.obj['DR'],oldcopy)
if not os.path.exists(fin1):
raise FileNotFoundError("couldn't read the input file %s."%fin1)
if not os.path.exists(fin2):
raise FileNotFoundError("couldn't read the input file %s."%fin2)
# set path variable to find mafft
my_env = os.environ
if 'COVA_BIN_PATH' in my_env.keys():
my_env['PATH'] = ':'.join([ my_env['COVA_BIN_PATH'], my_env['PATH']])
# first, copy the original file
print("Generating backup for the original MSA")
shutil.copy(src=fin1, dst=fcopy)
# then run the addseq command: FFT-NS-2
cmd = [prog, '--quiet', '--auto', '--thread', ctx.obj['NCPU'], '--addfragments', fin2, fcopy]
print("%s: Adding sequences from %s to %s,\n Command: %s,\n Output will be saved to %s"%(\
cova.utils.timer(start),fin2, fcopy,' '.join(cmd),fout))
# rewrite MSA file
with open( fout,'w') as flob:
s1 = subprocess.run( cmd, stdout=flob, env=my_env)
print("%s:\tMSAD is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile',default='genome_aln.fna',show_default=True)
@click.option('--outfile',default='genome_aln_ref.fna',show_default=True)
def msaref(ctx,infile,outfile):
"""Limit MSA to sites present in the reference."""
fin = os.path.join(ctx.obj['DR'], infile)
fout = os.path.join(ctx.obj['DR'],outfile)
# throw error if input missing
if not os.path.exists(fin):
raise FileNotFoundError("couldn't read the input file %s."%fin)
# if output doesn't exist
if cova.utils.outcheck(fout):
# cova MSA object
msa = cova.utils.MSA(fname=fin,ref=ctx.obj['REF'])
print("{}: Generating reference limited MSA, Output will be saved to {}\n".format(cova.utils.timer(start),fout))
# use its method to limit MSA to a reference sequence
out = msa.limref()
# write output
Bio.AlignIO.write(alignments=[out], handle=fout, format='fasta')
print("%s:\tMSAREF is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile', default='genome_aln_ref.fna', show_default=True)
@click.option('--outfile1',default='genome_aln_unq.fna', show_default=True)
@click.option('--outfile2',default='genome_dups.tsv', show_default=True)
def msaunq( ctx, infile, outfile1, outfile2):
"""Remove duplicate sequences from reference-limited MSA."""
fin = os.path.join(ctx.obj['DR'], infile)
fout1 = os.path.join(ctx.obj['DR'],outfile1)
fout2 = os.path.join(ctx.obj['DR'],outfile2)
# throw error if input missing
if not os.path.exists(fin):
raise FileNotFoundError("couldn't read the input file %s."%fin)
if cova.utils.outcheck(fout1):
# cova MSA object
msa = cova.utils.MSA(fname=fin,ref=ctx.obj['REF'])
print("{}: Removing duplicate sequences from {}\n".format(cova.utils.timer(start),fout1))
out1, out2 = msa.rmdup()
# write alignment to output path
Bio.AlignIO.write(alignments=[out1], handle=fout1, format='fasta')
# write dataframe of genomes retained and their excluded duplicates
out2 = pandas.DataFrame(out2)
out2.to_csv(fout2,sep='\t',index=False,header=False)
print("%s:\tMSAUNQ is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile',default='genome_aln_unq.fna',show_default=True)
@click.option('--typefile',default=None,help='Full path to the file with sequence types definition [Optional]')
@click.option('--outfile',default='genome_types.csv',show_default=True)
def seqtype( ctx, infile, typefile, outfile):
"""Identify sequence types."""
fin = os.path.join(ctx.obj['DR'], infile)
fout = os.path.join(ctx.obj['DR'], outfile)
if cova.utils.outcheck(fout):
print("{}: Sequence Typing genomes from {}".format(cova.utils.timer(start),fin))
# biopython multiple sequence alignment
msa = Bio.AlignIO.read(fin,'fasta')
# genomes and their sequence types
out = [ [k,v] for k,v in cova.genome_seqtype(msa,fst=typefile).items()]
out = sorted(out, key=lambda x: x[1])
# covert to df
out = pandas.DataFrame(out)
# write table to file
out.to_csv(fout,header=False,index=False)
print("%s:\t SEQTYPE is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile',default='genome_aln_unq.fna',show_default=True)
@click.option('--outfp',default='point_mutations.tsv',show_default=True)
@click.option('--outfd',default='deletions.tsv',show_default=True)
def vcalpd(ctx,infile,outfp,outfd):
"""Call point mutations / deletions from Reference-limited MSA."""
fin = os.path.join(ctx.obj['DR'], infile)
fout1 = os.path.join(ctx.obj['DR'],outfp)
fout2 = os.path.join(ctx.obj['DR'],outfd)
# throw error if input missing
if not os.path.exists(fin):
raise FileNotFoundError("couldn't read the input file %s."%fin)
msa = cova.utils.MSA(fname=fin,ref=ctx.obj['REF'])
# point mutations
if cova.utils.outcheck(fout1):
print("{}: Calling point mutations from {}\n".format(cova.utils.timer(start),fin))
tab1 = msa.pointmuts()
tab1.to_csv(fout1,sep='\t',index=False)
# deletions
if cova.utils.outcheck(fout2):
print("{}: Calling deletions from {}\n".format(cova.utils.timer(start),fin))
tab2 = msa.dels()
tab2.to_csv(fout2,sep='\t',index=False)
print("%s:\t VCALPD is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile1',default='point_mutations.tsv',show_default=True)
@click.option('--outfile1',default='point_mutations_ann.tsv',show_default=True)
@click.option('--infile2',default='deletions.tsv',show_default=True)
@click.option('--outfile2',default='deletions_ann.tsv',show_default=True)
def anvpd(ctx,infile1,outfile1,infile2,outfile2):
"""Annotate point mutations and deletions located within protein regions."""
fin1 = os.path.join(ctx.obj['DR'], infile1)
fout1 = os.path.join(ctx.obj['DR'],outfile1)
fin2 = os.path.join(ctx.obj['DR'], infile2)
fout2 = os.path.join(ctx.obj['DR'],outfile2)
# throw error if input missing
if not os.path.exists(fin1):
raise FileNotFoundError("couldn't read the input file %s."%fin1)
if not os.path.exists(fin2):
raise FileNotFoundError("couldn't read the input file %s."%fin2)
# annotate point mutations
if cova.utils.outcheck(fout1):
print("%s: Annotating point mutations located within protein regions"%cova.utils.timer(start))
cova._annotator.annotate_pm(fin1,fout1)
# annotate deletions
if cova.utils.outcheck(fout2):
print("%s: Annotating deletions located within protein regions"%cova.utils.timer(start))
cova._annotator.annotate_del(fin2,fout2)
print("%s:\t ANVPD is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile',default='point_mutations_ann.tsv',show_default=True)
@click.option('--outfile',default='genome_vars.tsv',show_default=True)
def nsvar( ctx, infile, outfile):
"""Get shared and unique non-synonymous variants for genomes."""
fin = os.path.join(ctx.obj['DR'], infile)
fout = os.path.join(ctx.obj['DR'], outfile)
if cova.utils.outcheck(fout):
print("%s: Identifying shared and unique variants"%cova.utils.timer(start))
# dataframe of annotated variants
van = pandas.read_csv(fin,sep='\t')
# dataframe of shared and unique variants
out = cova.genome_var(van)
# write to output path
out.to_csv(fout,sep='\t',index_label='id')
print("%s:\t NSVAR is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile',default='genome_vars.tsv',show_default=True)
@click.option('--outfile',default='genome_w_stopm.tsv',show_default=True)
@click.option('--inmsafile',default='genome_aln_unq.fna',show_default=True)
@click.option('--outmsafile',default='genome_aln_sf.fna',show_default=True)
def rmstop(ctx,infile,outfile,inmsafile,outmsafile):
"""Remove genomes with non-sense mutations."""
## paths
fin = os.path.join(ctx.obj['DR'], infile)
fout = os.path.join(ctx.obj['DR'], outfile)
finmsa = os.path.join(ctx.obj['DR'], inmsafile)
foutmsa = os.path.join(ctx.obj['DR'], outmsafile)
# shall we proceed in case output is present?
if cova.utils.outcheck(fout):
# table of variants
vtab = pandas.read_csv(fin,sep='\t',index_col=0)
print("%s: Identifying and removing genomes with non-sense mutations."%cova.utils.timer(start))
# dataframe of genomes with nonsense variants
out = cova.rm_genome_w_stopm(vtab)
# write dataframe to output path
out.to_csv(fout, sep='\t',header=False)
# input alignment
aln = Bio.AlignIO.read(finmsa,'fasta')
# list of sequence records with no nonsense mutation
alnls = [ i for i in aln if i.id not in out.index]
# write alignment to output path
Bio.SeqIO.write(alnls,foutmsa,'fasta')
print("%s:\t RMSTOP is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile',default='genome_aln_sf.fna',show_default=True)
@click.option('--outdr',default='prots_nmsa',show_default=True)
def msap(ctx,infile,outdr):
"""Extract nucleotide MSA of proteins from Reference-limited MSA."""
fin = os.path.join(ctx.obj['DR'], infile)
dout = os.path.join(ctx.obj['DR'], outdr)
# throw error if input missing
if not os.path.exists(fin):
raise FileNotFoundError("couldn't read the input file %s."%fin)
# create output directory, if not already present
if not os.path.exists(dout):
os.mkdir(dout)
print("%s was not already present. Created now."%outdr)
print('''%s: Extracting nucleotide MSA of proteins from Reference-limited MSA,
Outputs will be saved to %s\n.'''%(cova.utils.timer(start),dout))
cova.extract_nmsa_prots(fmsa=fin, dr=dout)
print("%s:\t MSAP is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile',default='genome_aln.fna',show_default=True)
@click.option('--outfile',default='insertions.tsv',show_default=True)
def vcali(ctx,infile,outfile):
"""Call insertions from MSA."""
fin = os.path.join(ctx.obj['DR'], infile)
fout = os.path.join(ctx.obj['DR'],outfile)
# throw error if input missing
if not os.path.exists(fin):
raise FileNotFoundError("couldn't read the input file %s."%fin)
if cova.utils.outcheck(fout):
msa = cova.utils.MSA(fname=fin,ref=ctx.obj['REF'])
print("%s: Calling insertions from MSA , Output will be saved to %s\n"%(cova.utils.timer(start),fout))
tab = msa.ins()
tab.to_csv(fout,sep='\t',index_label='pos')
print("%s:\t VCALI is done."%cova.utils.timer(start))
@cli.command()
@click.pass_context
@click.option('--infile',default='genome_aln_sf.fna',show_default=True)
@click.option('--indr',default='prots_nmsa',show_default=True)
@click.option('--window',default=300,show_default=True,type=int)
@click.option('--jump',default=20,show_default=True,type=int)
@click.option('--outfile1',default='divs.csv',show_default=True)
@click.option('--outfile2',default='slide_divs.csv',show_default=True)
@click.option('--slide',is_flag=True,help='Should we calculate sliding diversity?')
def div(ctx,infile,indr,window,jump,outfile1,outfile2,slide):
"""Compute nucleotide diversity from Reference-limited MSA."""
fin = os.path.join(ctx.obj['DR'], infile)
din = os.path.join(ctx.obj['DR'], indr)
fout1 = os.path.join(ctx.obj['DR'],outfile1)
fout2 = os.path.join(ctx.obj['DR'],outfile2)
ncpu = int(ctx.obj['NCPU'])
# throw error if input missing
if not os.path.exists(fin):
raise FileNotFoundError("couldn't read the input file %s."%fin)
# load alignment as cova MSA object
msa = cova.utils.MSA(fname=fin)
if cova.utils.outcheck(fout1):
print('''%s: Computing diversity for whole-genome and peptide-encoding regions.
Output will be saved to %s\n'''%(cova.utils.timer(start),fout1))
wndiv = msa.ndiv()
fpmsas = [ i for i in os.listdir(din) if i.endswith('.msa')]
pndivs = [ [ i.replace('.msa',''), cova.utils.MSA(os.path.join(din,i)).ndiv()] for i in fpmsas]
pndivs = [ i for i in pndivs if i[1] is not None]
pndivs = sorted( pndivs, key=lambda x: x[1], reverse=True)
out1 = [ ['genome', wndiv] ] + pndivs
# save dataframe to output file
out1 = | pandas.DataFrame(out1) | pandas.DataFrame |
import numpy as np
import pandas as pd
from conversion import Conversion
obj = Conversion()
obj.convertFilesIntoCSV('ham')
obj.convertFilesIntoCSV('spam')
ham = pd.read_csv('ham.csv')
spam = pd.read_csv('spam.csv')
Class =[]
Content = []
for i in range(ham.shape[0]):
Class.append('ham')
Content.append(ham['Message'][i])
for i in range(spam.shape[0]):
Class.append('spam')
Content.append(spam['Message'][i])
data = {'Message': Content, 'Class': Class}
data = pd.DataFrame(data)
data = data.iloc[np.random.permutation(len(data))] #mixing spam and ham messages
#splitting into train and test data
from sklearn.model_selection import train_test_split
train, test = train_test_split(data,test_size= 0.2,random_state =42)
prediction_ans = test['Class']
prediction_ans = {'Class': prediction_ans}
prediction_ans = | pd.DataFrame(prediction_ans) | pandas.DataFrame |
"""
baseball.txtを主成分分析する
"""
import pandas as pd
from src.pca import pc_analyze
from src.read_data import read_file
if __name__ == "__main__":
# ファイルの読み込み
res = read_file('output/baseball.txt', 'UTF-8')
data = | pd.DataFrame(res[1:], columns=res[0]) | pandas.DataFrame |
from pandas import DataFrame, Series
import numpy as np
def create_dataframe():
'''
Creates a pandas dataframe called 'olympic_medal_count_df' containing the data from
the table of 2014 Sochi winter olympics medal count.
'''
countries = ['Russian Fed.', 'Norway', 'Canada', 'United States',
'Netherlands', 'Germany', 'Switzerland', 'Belarus',
'Austria', 'France', 'Poland', 'China', 'Korea',
'Sweden', 'Czech Republic', 'Slovenia', 'Japan',
'Finland', 'Great Britain', 'Ukraine', 'Slovakia',
'Italy', 'Latvia', 'Australia', 'Croatia', 'Kazakhstan']
gold = [13, 11, 10, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
silver = [11, 5, 10, 7, 7, 6, 3, 0, 8, 4, 1, 4, 3, 7, 4, 2, 4, 3, 1, 0, 0, 2, 2, 2, 1, 0]
bronze = [9, 10, 5, 12, 9, 5, 2, 1, 5, 7, 1, 2, 2, 6, 2, 4, 3, 1, 2, 1, 0, 6, 2, 1, 0, 1]
# initialize the dictionary
dict = {'country_name': | Series(countries) | pandas.Series |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.prefillwithselection'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')},
{u('name'): u('accessibility.typeaheadfind'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.casesensitive'), u('value'): 0},
{u('name'): u('accessibility.warn_on_browsewithcaret'), u('value'): True},
{u('name'): u('accessibility.usetexttospeech'), u('value'): u('')},
{u('name'): u('accessibility.accesskeycausesactivation'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.linksonly'), u('value'): False},
{ | u('name') | pandas.compat.u |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------
# Penji OpDev Fall 2019
# GS Run Wrapper
# Author: <NAME>
# Updated:
# ------------------------
# General
import os
import argparse
import pandas as pd
# For Google Sheets
import pygsheets
# Local
import core.utils as utils
from core import logger
import core.gs_api_utils as gs_api_
from core.config import cfg
from core.gs_parent import GoogleSheetsParent
class GoogleSheetsPullFromArchive(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
# Ensure all files are created and saved properly
self._setup_all()
self.df_ct_prof = self._load('ct_prof')
self.df_ct_courses = self._load('ct_courses')
self.df_arch_prof = self._load('archive_prof')
self.df_arch_courses = self._load('archive_courses')
def run(self, *args, **kwargs):
# Pull data for prof file
self.df_ct_prof = self.df_ct_prof.apply(self.pull_arch_prof, axis=1)
# Pull data for courses file
#self.df_ct_courses = self.df_ct_courses.apply(self.pull_arch_courses, axis=1)
print(self.df_ct_prof)
if self.args.save:
self._save(self.df_ct_prof, 'ct_prof')
self._save(self.df_ct_courses, 'ct_courses')
def pull_arch_prof(self, row):
try:
for ir in self.df_arch_prof.itertuples():
if ir[1] == row['Full Name'] and '@' in str(ir[5]):
print(ir[1], ir[5])
row['Email'] = ir[5]
row['Previous Response'] = ir[6]
row['Term Last Sent'] = ir[7]
break
except:
logger.warn(f'Empty Archive Professor CSV')
return row
def pull_arch_courses(self, row):
try:
for ir in self.df_arch_courses.itertuples():
if ir[1] == row['Course Code'] and not pd.isna(ir[4]):
print(ir[1], ir[4])
row['Archive Demand In'] = ir[4]
break
except:
logger.warn(f'Empty Archive Course CSV')
return row
class GoogleSheetsPrep(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
self.df_ct = self._load('ct')
self.df_ct_prof = self._load('ct_prof')
self.df_ct_courses = self._load('ct_courses')
self.df_arch_courses = self._load('archive_courses')
def run(self, *args, **kwargs):
""" Sets up Professor df for google sheets upload
Needs demand and ranking in order to deduct desired course
Professor Row Reference #s
"""
# Process Current term CSV: Demand, Ranking, Professor Row #
self.df_ct = self.df_ct.apply(self.process_cur_term_csv, axis=1)
# Process Professor CSV: Demand, Ranking, Professor Row #
self.df_ct_prof = self.df_ct_prof.apply(self.process_prof_courses, axis=1)
# Clear out those temporary values
self.df_ct = self.df_ct.apply(self.clear_temp_values, axis=1)
if self.args.save:
self._save(self.df_ct, 'ct')
self._save(self.df_ct_prof, 'ct_prof')
else:
print(self.df_ct)
print(self.df_ct_prof)
def clear_temp_values(self, row):
row['Demand'], row['Ranking'] = None, None
return row
def process_cur_term_csv(self, row):
# Term Sheet: Demand Column
demand = 3 # Default
try:
for ir in self.df_ct_courses.itertuples():
if ir[1] == row['Course Code'] and not pd.isna(ir[6]):
print(ir[1], ir[6])
demand = ir[6]
break
except:
logger.warn(f'Empty Archive Course CSV')
ranking = demand + (row['# Students'] / 100)
# Term Sheet: Professor Row Reference #
row_references = []
if isinstance(row['Professor'], str):
prof_names_in = row['Professor'].split(', ')
for ir in self.df_ct_prof.itertuples():
[row_references.append(ir[0]+2) for name in prof_names_in if ir[1] == name]
assert len(prof_names_in) == len(row_references), \
f'ERROR: prof names {prof_names_in} != {row_references} row references'
row['Demand'], row['Ranking'], row['Professor Row #'] = demand, ranking, row_references
return row
def process_prof_courses(self, row):
# Professor Sheet: All Courses
# Don't select a class if no email available
all_courses = [] # (None, None, 0) # Course Code, Course Row #, Ranking
best_course = (None, None, 0)
if '@' in str(row['Email']):
prof_name = row['<NAME>']
for ir in self.df_ct.itertuples():
if ir[15] and str(row.name+2) in str(ir[15])[1:-1].split(', '):
all_courses.append((ir[1], ir[0]+2, ir[11]))
if all_courses:
# Find their course with the highest ranking
for course in all_courses:
if course[2] > best_course[2]:
best_course = course
else:
all_courses = None
row['Desired Course Code'] = best_course[0]
row['Desired Course Row #'] = int(best_course[1]) if best_course[1] else best_course[1]
row['All Courses'] = all_courses
return row
class GoogleSheetsUpload(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
self.status_arr = ['No', 'Sent for different course', 'Match Error', 'Awaiting Response', 'Yes']
def run(self):
# Create a new sheet in folder
sh = self._connect_google_sheet()
# Make sure the sheets are setup properly
gs_df_arr = self._load_all()
self.num_wks = len(gs_df_arr)
# TODO: Professor row Reference #s add 2
# Find number of rows and columns for each
shapes = []
setup_formulas = [self.setup_term_formulas, self.setup_professor_formulas, self.setup_course_formulas,
None, None, None, None, None, self.setup_arch_course_formulas]
for idx in range(len(gs_df_arr)):
# load csv as pd df and upload it
gs_df = gs_df_arr[idx]
shapes.append(gs_df.shape)
# Create new sheets
if self.reset_gs:
wks = sh.add_worksheet(self.files[self.file_keys[idx]][1], rows=shapes[idx][0]+10, cols=shapes[idx][1], index=idx)
if idx == 0:
sh.del_worksheet(sh.worksheet_by_title('Sheet1'))
else:
wks = sh[idx]
# Upload the data
if self.args.data:
wks.set_dataframe(gs_df, (1, 1))
wks.replace('NaN', '')
# Add The Formulas
if self.args.formulas and setup_formulas[idx]:
term = self.pterm if idx in (3,4,5) else self.cterm
setup_formulas[idx](wks, term)
if self.args.format:
self.format_sheet(sh, shapes)
def format_sheet(self, sh, shapes):
# Format Tutor Columns
gs_api_.format_tutor_col(sh=sh, wks=sh[0], shape=shapes[0], col_idx=10) # Current Term
gs_api_.format_tutor_col(sh=sh, wks=sh[2], shape=shapes[2], col_idx=7) # Current Courses
gs_api_.format_tutor_col(sh=sh, wks=sh[3], shape=shapes[3], col_idx=10) # Prev Term
gs_api_.format_tutor_col(sh=sh, wks=sh[5], shape=shapes[5], col_idx=7) # Prev Courses
gs_api_.format_tutor_col(sh=sh, wks=sh[8], shape=shapes[8], col_idx=6) # Archive Courses
# Freeze first row of each wks
[gs_api_.freeze_row(sh=sh, wks=sh[i]) for i in range(self.num_wks)]
# Headers of editable columns: Add blue background
editable_col_cells = [sh[1].cell('G1'), sh[1].cell('H1'), sh[1].cell('I1'),
sh[1].cell('J1'), sh[1].cell('K1'), sh[1].cell('L1'), sh[2].cell('E1'),
sh[4].cell('G1'), sh[4].cell('H1'), sh[4].cell('I1'),
sh[4].cell('J1'), sh[4].cell('K1'), sh[4].cell('L1'), sh[5].cell('E1')]
for cell in editable_col_cells:
cell.color = (207/255, 226/255, 243/255, 1.0)
tutors_range = sh[6].get_values('A1', 'O1', returnas='range')
for cell in tutors_range[0]:
cell.color = (207/255, 226/255, 243/255, 1.0)
# All Headers: Set Bold
# Current Term
[cell.set_text_format('bold', True) for cell in sh[0].get_values('A1', 'Q1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[1].get_values('A1', 'P1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[2].get_values('A1', 'G1', returnas='range')[0]]
# Previous Term
[cell.set_text_format('bold', True) for cell in sh[3].get_values('A1', 'Q1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[4].get_values('A1', 'P1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[5].get_values('A1', 'G1', returnas='range')[0]]
# Tutors & Archive
[cell.set_text_format('bold', True) for cell in sh[6].get_values('A1', 'O1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[7].get_values('A1', 'G1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[8].get_values('A1', 'F1', returnas='range')[0]]
# Format Status Column
gs_api_.format_status_col(sh=sh, wks=sh[0], shape=shapes[0], col_idx=17, stat_arr=self.status_arr)
gs_api_.format_status_col(sh=sh, wks=sh[3], shape=shapes[3], col_idx=17, stat_arr=self.status_arr)
def setup_term_formulas(self, wks, term):
# Demand
wks.cell('B1').formula = 'ArrayFormula(IF(ROW(A:A)=1,"Demand", VLOOKUP(A1:A, ' + f"'Courses {term[2]}'" + '!$A:$D, 4, FALSE)))'
# Previous Response
wks.cell('H1').formula = 'ArrayFormula(IF(ROW(C:C)=1,"Previous Response",IF(ISBLANK(G1:G), "", ' \
'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + '!$A:$F, 6, False))))'
# # Tutors
wks.cell('J1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"# Tutors", IF(ISBLANK(A1:A), "", COUNTIFS(' \
f'Tutors!E:E, "{self.school_config.NICE_NAME}", Tutors!L:L, "*"&C1:C&D1:D&"*", Tutors!I:I, "TRUE", Tutors!J:J,"YES"))))'
# Ranking
wks.cell('K1').formula = 'ArrayFormula(IF(ROW(A:A)=1,"Ranking", IF(ISBLANK(A1:A), "", B1:B+(I1:I/100))))'
# Course Status: color coded professor info
self.status_arr = stat = ['No', 'Sent for different course', 'Match Error', 'Awaiting Response', 'Yes']
wks.cell('Q1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"Status", IF(ISBLANK(A1:A), "", ' \
f'IFERROR(IF((O1:O="[]") + (VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 14, False) = "") > 0, "{stat[2]}", ' \
f'IFERROR(IFS(VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 9, False)="No", "{stat[0]}",' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 9, False)="Yes", "{stat[4]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 8, False)="No", "{stat[0]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 8, False)="Yes", "{stat[4]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 6, False)="No", "{stat[0]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 6, False)="Yes", "{stat[4]}" ), ' \
f'IF(NE(A1:A, VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 14, False)), "{stat[1]}", ' \
f'IF(VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 12, False)="Fall 19","{stat[3]}",)))),"{stat[2]}" ))))'
def setup_professor_formulas(self, wks, term):
# Previous Response
# To Send
wks.cell('M1').formula = 'ArrayFormula(IF(ROW(A:A)=1,"To Send",IF(ISBLANK(A:A),"", ' \
'IF(RegExMatch(E1:E,"@"), ' \
'IFERROR(' \
'IFS(L1:L="Fall 19", "No",F1:F="No", "No",H1:H="No", "No",I1:I="No", "No"),' \
' "Yes"), "No"))))'
def setup_course_formulas(self, wks, term):
# Demand out
wks.cell('D1').formula = 'ArrayFormula(IF(ROW(F:F)=1,"Demand Out", IFS(' \
'IF((F1:F), F1:F+E1:E, 3+E1:E)>5, 5, ' \
'IF((F1:F), F1:F+E1:E, 3+E1:E)<0, 0, ' \
'IF((F1:F), F1:F+E1:E, 3+E1:E)<5, IF((F1:F), F1:F+E1:E, 3+E1:E))))'
# Demand in
wks.cell('F1').formula = 'ArrayFormula(IF(ROW(E:E)=1,"Archive Demand In", ' \
'IFERROR(VLOOKUP(A1:A, '+"'Spring 19'"+'!$A:$B, 2, FALSE), )))'
# # Tutors
wks.cell('G1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"# Tutors", IF(ISBLANK(A1:A), "", ' \
f'COUNTIFS(Tutors!E:E, "{self.school_config.NICE_NAME}", Tutors!L:L, "*"&SUBSTITUTE(A1:A," ","")&"*", ' \
f'Tutors!I:I, "TRUE", Tutors!J:J, "YES"))))'
def setup_arch_course_formulas(self, wks, term):
# # Tutors
wks.cell('F1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"# Tutors", IF(ISBLANK(A1:A), "", ' \
f'COUNTIFS(Tutors!E:E, "{self.school_config.NICE_NAME}", Tutors!L:L, "*"&SUBSTITUTE(A1:A," ","")&"*", ' \
f'Tutors!I:I, "TRUE", Tutors!J:J, "YES"))))'
class GoogleSheetsUploadStudentOrgs(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
def run(self):
# Create a new sheet in folder
sh = self._connect_google_sheet(sheet_name_in=f'{self.school_config.NICE_NAME} Student Orgs')
gs_df = self._load(file_name_key='student_orgs')
shape = gs_df.shape
if self.reset_gs:
wks = sh.add_worksheet(self.files['student_orgs'][1], rows=shape[0] + 10, cols=shape[1], index=0)
sh.del_worksheet(sh.worksheet_by_title('Sheet1'))
else:
wks = sh[0]
# Upload the data
if self.args.data:
wks.set_dataframe(gs_df, (1, 1))
wks.replace('NaN', '')
if self.args.format:
#self.format_sheet(sh, shape)
[cell.set_text_format('bold', True) for cell in sh[0].get_values('A1', 'C1', returnas='range')[0]]
class GoogleSheetsDownload:
def __init__(self, args):
self.args = args
def run(self):
""" Pulls Data From GS Previous Term and saves to proper csv format
"""
config = cfg[self.args.school.upper()]
sheet_name = f'{config.NICE_NAME} Class List' # f'{config.NICE_NAME} Course List'
gc = pygsheets.authorize(service_file='core/credentials/penji_dev_key.json')
try:
sh = gc.open(sheet_name)
logger.info(f'Found {sheet_name} in google drive, downloading sheet')
except pygsheets.exceptions.SpreadsheetNotFound:
logger.error(f'Could not find {sheet_name} in google drive')
return
self.download_tutor_csv(sh)
df = self.download_prev_term_csv(sh)
df.rename(columns={'Class': 'Course Code'}, inplace=True)
print(df.head())
# prof_df = self.cu_prof_setup(df)
#
# course_df = self.cu_courses_setup(df)
#
# self.add_prev_term_to_archive()
#
# gs_utils.save(gs_df=prof_df, term=cfg.GENERAL.PREV_TERM, school=self.args.school,
# sheet_idx=2, test=self.args.test)
# gs_utils.save(gs_df=course_df, term=cfg.GENERAL.PREV_TERM, school=self.args.school,
# sheet_idx=3, test=self.args.test)
self.add_prev_term_to_archive(df)
def download_tutor_csv(self, sh):
wks = sh.worksheet_by_title("Tutors")
tutors_df = wks.get_as_df()
tutors_df = tutors_df[tutors_df['Date Added'] != '']
tutors_df = tutors_df.drop('', axis=1)
self._save(gs_df=tutors_df, file_name=f'data/{self.args.school}/archive/gs_archive_{self.args.school}_6.csv')
def download_prev_term_csv(self, sh):
wks = sh.worksheet_by_title("Spring '19")
df = wks.get_as_df()
df = df[df['Course'] != '']
df = df.drop('', axis=1)
gs_utils.save(gs_df=df, term=cfg.GENERAL.PREV_TERM, school=self.args.school,
sheet_idx=0, test=self.args.test)
return df
def add_prev_term_to_archive(self, term_df):
file_names = (f'data/{self.args.school}/archive/gs_archive_{self.args.school}_7.csv',
f'data/{self.args.school}/archive/gs_archive_{self.args.school}_8.csv')
arch_prof_df, arch_course_df = self.load_archive_files(file_names)
ca_data = {key: [] for key in cfg.GENERAL.WKS_COLUMNS['Course Archive']}
pr_data = {key: [] for key in cfg.GENERAL.WKS_COLUMNS['Professor Archive']}
for idx, col in term_df.iterrows():
if term_df['Course Code'][idx] not in ca_data['Course Code']:
ca_data['Course Code'].append(term_df['Course Code'][idx])
ca_data['Name'].append(term_df['Name'][idx])
ca_data['Title'].append(term_df['Title'][idx])
ca_data['Previous Demand'].append(term_df['Demand'][idx])
ca_data['Term Last Updated'].append('Spring 19')
if term_df['Professor'][idx] not in pr_data['Full Name']:
pr_data['Full Name'].append(term_df['Professor'][idx])
pr_data['First Name'].append(term_df['First Name'][idx])
pr_data['Last Name'].append(term_df['Last Name'][idx])
# Find Previous Response : Previous response, Pre-approval status, LHP
prev_response = (' ', term_df['Previous Response'][idx], term_df['Pre-approval status'][idx],
term_df['LHP Response (yes/no)'][idx])
cons_response = [resp for resp in prev_response if resp != '']
pr_data['Previous Response'].append(cons_response[-1])
pr_data['Term Last Sent'].append('Spring 19')
num_rows = len(ca_data['Course Code'])
gs_ca_data = {key: ([None] * num_rows if lst == [] else lst) for key, lst in ca_data.items()}
courses_df = pd.DataFrame(data=gs_ca_data, columns=cfg.GENERAL.WKS_COLUMNS['Course Archive'])
num_rows_2 = len(pr_data['Full Name'])
gs_pr_data = {key: ([None] * num_rows_2 if lst == [] else lst) for key, lst in pr_data.items()}
prof_df = | pd.DataFrame(data=gs_pr_data, columns=cfg.GENERAL.WKS_COLUMNS['Professor Archive']) | pandas.DataFrame |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
def test_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.round(freq)
def test_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.min.ceil("s")
expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.max.floor("s")
expected = Timedelta.max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
)
def test_round_sanity(self, method, n, request):
val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
td = Timedelta(val)
assert method(td, "ns") == td
res = method(td, "us")
nanos = 1000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "ms")
nanos = 1_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "s")
nanos = 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "min")
nanos = 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = Timedelta(10, unit="d")
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
assert Timedelta("10") == np.timedelta64(10, "ns")
assert Timedelta("10ns") == np.timedelta64(10, "ns")
assert Timedelta("100") == np.timedelta64(100, "ns")
assert Timedelta("100ns") == np.timedelta64(100, "ns")
assert Timedelta("1000") == np.timedelta64(1000, "ns")
assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
assert Timedelta("10us") == np.timedelta64(10000, "ns")
assert Timedelta("100us") == np.timedelta64(100000, "ns")
assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
assert | Timedelta("1000s") | pandas.Timedelta |
"""
.. module:: momentum
:synopsis: Momentum Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import numpy as np
import pandas as pd
from ta.utils import IndicatorMixin
class Heikin_Ashi(IndicatorMixin):
def __init__(self,close: pd.Series, high: pd.Series, low: pd.Series, open: pd.Series, n: int =2, fillna: bool = False):
print(type(close))
self.frame = pd.concat([close,open,high,low],axis=1)
self._n = n
self._fillna = fillna
self._run()
def _run(self):
self._ha_close = (self.frame.Close + self.frame.High + self.frame.Low + self.frame.Open)/4
self._ha_open = (self.frame.Open.shift(1) + self.frame.Close.shift(1)) / 2
self._ha_high = (self.frame[['Open','Close','High','Low']]).max(axis=1)
self._ha_low = (self.frame[['Open','Close','High','Low']]).min(axis=1)
def ha_close(self) -> pd.Series:
"""Heinki-Ashi Indicator Close
Returns:
pandas.Series: New feature generated.
"""
ha_close = self._check_fillna(self._ha_close, value=0)
return pd.Series(ha_close, name='ha_close')
def ha_open(self) -> pd.Series:
"""Heinki-Ashi Indicator Open
Returns:
pandas.Series: New feature generated.
"""
ha_open = self._check_fillna(self._ha_open, value=0)
return pd.Series(ha_open, name='ha_open')
def ha_high(self) -> pd.Series:
"""Heinki-Ashi Indicator High
Returns:
pandas.Series: New feature generated.
"""
ha_high = self._check_fillna(self._ha_high, value=0)
return | pd.Series(ha_high, name='ha_high') | pandas.Series |
from os import listdir
from os.path import join
import pandas as pd
from .base import get_data_dir
from .base import get_quantized_data_path
from ..preprocess import (
get_relevant_queries,
quantize_mfccs,
preprocess_tags,
preprocess_queries,
)
FREESOUND_DIR = 'freesound'
SOUNDS = 'sounds.json'
TAGS_FILE = 'preprocessed_tags.csv'
QUERIES = 'queries.csv'
PREPROCESSED_QUERIES = 'preprocessed_queries.csv'
FS_DL_PAGE = ('https://www.kaggle.com/dschwertfeger/freesound/')
def load_freesound(codebook_size, data_home=None, **kwargs):
r"""Loader for the Freesound dataset [4]_.
.. warning::
You need to `download the Freesound dataset from Kaggle
<https://www.kaggle.com/dschwertfeger/freesound/>`_ and unpack it into
your home directory or the directory specified as ``data_home`` for
this loader to work.
This dataset consists of 227,085 sounds. Each sound is at most 30 seconds
long and annotated with tags from a tag vocabulary of 3,466 tags. The
sounds' original tags were provided by the users who uploaded the sounds
to `Freesound <http://www.freesound.org>`_. The more than 50,000
original tags were preprocessed to form a tag vocabulary of 3,466 tags.
Parameters
----------
codebook_size : int, 512, 1024, 2048, 4096
The codebook size. The dataset is pre-encoded with codebook sizes of
512, 1024, 2048, and 4096. If you want to experiment with other
codebook-sizes, you need to download the orginal MFCCs, append the
first-order and second-order derivatives and quantize the resulting
frame-vectors specifying the desired ``codebook_size`` using
:func:`cbar.preprocess.quantize_mfccs`.
data_home : optional
Specify a home folder for the Freesound datasets. By default (``None``)
the files are expected to be in ``~/cbar_data/freesound/``, where
``cbar_data`` is the ``data_home`` directory.
Returns
-------
X : pd.DataFrame, shape = [227085, codebook_size]
Each row corresponds to a preprocessed sound, represented as a sparse
codebook vector.
Y : pd.DataFrame, shape = [227085,]
Tags associated with each sound provided as a list of strings. Use
:func:`sklearn.preprocessing.MultiLabelBinarizer` to transform tags
into binary indicator format.
References
----------
.. [4] <NAME>, <NAME>, and <NAME>, `Freesound technical demo
<http://mtg.upf.edu/node/2797>`_ 2013, pp. 411-412.
"""
data_dir = get_data_dir(data_home, FREESOUND_DIR)
data_path = get_quantized_data_path(data_dir, codebook_size)
try:
acoustic = | pd.read_pickle(data_path) | pandas.read_pickle |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.study_manager.study_manager import StudyManager
from numpy import array
import pandas as pd
import itertools
import numpy as np
class Study(StudyManager):
def __init__(self, execution_engine=None):
super().__init__(__file__, execution_engine=execution_engine)
def setup_usecase(self):
ns = self.study_name
sc_name = 'DoE_Eval'
input_selection_ABC = {'selected_input': [True, True, True],
'full_name': ['stat_A', 'stat_B', 'stat_C']}
input_selection_ABC = | pd.DataFrame(input_selection_ABC) | pandas.DataFrame |
#
# MIT License
#
# Copyright (c) 2018 WillQ
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
from typing import IO, List, Optional, Union
import numpy as np
import pandas
from dateutil.relativedelta import relativedelta
from monkq.assets.const import SIDE
from monkq.config.global_settings import KLINE_SIDE_CLOSED, KLINE_SIDE_LABEL
from monkq.const import TICK_DIRECTION
dtypes_trades = {
"timestamp": np.object,
"symbol": np.str,
"side": np.float64,
"size": np.float64,
"price": np.float64,
"tickDirection": np.float64,
"trdMatchID": np.str,
"grossValue": np.float64,
"homeNotional": np.float64,
"foreignNotional": np.float64
}
dtypes_quote = {
"timestamp": np.object,
"symbol": np.str,
"bidSize": np.float64,
"bidPrice": np.float64,
"askPrice": np.float64,
"askSize": np.float64
}
def _date_parse(one: str) -> pandas.Timestamp:
return pandas.to_datetime(one, format="%Y-%m-%dD%H:%M:%S.%f", utc=True)
def _side_converters(side: str) -> SIDE:
if side == 'Buy':
return np.float64(SIDE.BUY.value)
elif side == 'Sell':
return np.float64(SIDE.SELL.value)
else:
return np.float64(SIDE.UNKNOWN.value) # pragma: no cover
def _tick_direction(tick_direction: str) -> TICK_DIRECTION:
if tick_direction == 'MinusTick':
return np.float64(TICK_DIRECTION.MINUS_TICK.value)
elif tick_direction == 'PlusTick':
return np.float64(TICK_DIRECTION.PLUS_TICK.value)
elif tick_direction == 'ZeroMinusTick':
return np.float64(TICK_DIRECTION.ZERO_MINUS_TICK.value)
elif tick_direction == 'ZeroPlusTick':
return np.float64(TICK_DIRECTION.ZERO_PLUS_TICK.value)
else:
return np.float64(TICK_DIRECTION.UNKNOWN.value) # pragma: no cover
def read_trade_tar(path: Union[str, IO], with_detailed: bool = False, with_symbol: bool = True,
index: Optional[str] = None) -> pandas.DataFrame:
if with_detailed:
usecols = ["timestamp", "side", "size", "price",
"tickDirection", "trdMatchID", "grossValue",
"homeNotional", "foreignNotional"]
else:
usecols = ["timestamp", "side", "size", "price", "tickDirection",
"grossValue", "homeNotional", "foreignNotional"]
if with_symbol:
usecols.append("symbol")
use_dtypes = {}
for col in usecols:
if col in ('side', "tickDirection"):
continue
use_dtypes[col] = dtypes_trades[col]
t_frame = pandas.read_csv(path, compression='gzip',
parse_dates=[0],
infer_datetime_format=True,
usecols=usecols,
dtype=use_dtypes,
converters={'side': _side_converters,
'tickDirection': _tick_direction},
engine='c', low_memory=True, date_parser=_date_parse)
if index:
t_frame.set_index(index, inplace=True)
return t_frame
def read_quote_tar(path: Union[str, IO], with_symbol: bool = True, index: Optional[str] = None) -> pandas.DataFrame:
usecols = ["timestamp", "bidSize", "bidPrice", "askPrice", "askSize"]
if with_symbol:
usecols.append("symbol")
use_dtypes = {}
for col in usecols:
use_dtypes[col] = dtypes_quote[col]
t_frame = pandas.read_csv(path, compression='gzip',
parse_dates=[0],
infer_datetime_format=True,
usecols=usecols,
dtype=use_dtypes,
engine='c', low_memory=True, date_parser=_date_parse)
if index:
t_frame.set_index(index, inplace=True)
return t_frame
def trades_to_1m_kline(frame: pandas.DataFrame) -> pandas.DataFrame:
re_df = frame.resample('1Min', label=KLINE_SIDE_LABEL, closed=KLINE_SIDE_CLOSED)
kline = re_df['price'].ohlc()
kline['volume'] = re_df['homeNotional'].sum()
kline['turnover'] = re_df['foreignNotional'].sum()
kline.fillna(method='ffill', inplace=True)
return kline
def kline_from_list_of_dict(obj: List[dict]) -> pandas.DataFrame:
"""dict format
{'timestamp': '2019-03-02T02:05:00.000Z',
'symbol': 'XBTUSD',
'open': 3822,
'high': 3822,
'low': 3822,
'close': 3822,
'trades': 0,
'volume': 0,
'vwap': None,
'lastSize': None,
'turnover': 0,
'homeNotional': 0,
'foreignNotional': 0}"""
df = pandas.DataFrame(obj, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume', 'turnover'])
df['timestamp'] = pandas.to_datetime(df['timestamp'])
df.set_index('timestamp', inplace=True)
return df
def fullfill_1m_kline_with_start_end(frame: pandas.DataFrame, start: datetime.datetime,
end: datetime.datetime) -> pandas.DataFrame:
assert start.second == 0
assert start.microsecond == 0
assert end.second == 0
assert end.microsecond == 0
new = pandas.DataFrame([
(np.nan, np.nan, np.nan, np.nan, 0., 0.),
(np.nan, np.nan, np.nan, np.nan, 0., 0.)
], columns=["high", "low", "open", "close", "volume", "turnover"], index=pandas.DatetimeIndex((start, end)))
new_df = frame.append(new, sort=False)
resample = new_df.resample('1Min', label=KLINE_SIDE_LABEL, closed=KLINE_SIDE_CLOSED, convention="end")
outcome = resample['close'].last()
outcome = pandas.DataFrame(index=outcome.index)
outcome['close'] = resample['close'].last()
outcome['open'] = resample['open'].last()
outcome['high'] = resample['high'].last()
outcome['low'] = resample['low'].last()
outcome['volume'] = resample['volume'].sum()
outcome['turnover'] = resample['turnover'].sum()
outcome.fillna(method='ffill', inplace=True)
return outcome
def classify_df(df: pandas.DataFrame, column: str, delete_column: bool = True) -> pandas.DataFrame:
out = {}
uniques = df[column].unique()
for one in uniques:
new = df[df[column] == one]
if delete_column:
del new[column]
out[one] = new
return out
def check_1m_data_integrity(df: pandas.DataFrame, start: datetime.datetime, end: datetime.datetime) -> bool:
assert start.second == 0
assert start.microsecond == 0
assert end.second == 0
assert end.microsecond == 0
start = start + relativedelta(minutes=1)
total_date = | pandas.date_range(start, end, freq='min') | pandas.date_range |
#%%
import numpy as np
from numpy import pi
import pandas as pd
import matplotlib.pyplot as plt
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path+'/config/')
from variables import train_len, dev_len,test_len
from ssa import SSA
station = 'Huaxian' # 'Huaxian', 'Xianyang' and 'Zhangjiashan'
save_path = {
'Huaxian':root_path + '\\Huaxian_ssa\\data\\',
'Xianyang':root_path + '\\Xianyang_ssa\\data\\',
'Zhangjiashan':root_path + '\\Zhangjiashan_ssa\\data\\',
}
data ={
'Huaxian':(pd.read_excel(root_path+'/time_series/HuaxianRunoff1951-2018(1953-2018).xlsx')['MonthlyRunoff'][24:]).reset_index(drop=True),
'Xianyang':(pd.read_excel(root_path+'/time_series/XianyangRunoff1951-2018(1953-2018).xlsx')['MonthlyRunoff'][24:]).reset_index(drop=True),
'Zhangjiashan':(pd.read_excel(root_path+'/time_series/ZhangJiaShanRunoff1953-2018(1953-2018).xlsx')['MonthlyRunoff'][0:]).reset_index(drop=True),
}
if not os.path.exists(save_path[station]+'ssa-test\\'):
os.makedirs(save_path[station]+'ssa-test\\')
# plotting the monthly runoff of huaxian station
data[station].plot()
plt.title("Monthly Runoff of "+station+" station")
plt.xlabel("Time(1953/01-2008/12)")
plt.ylabel(r"Runoff($m^3/s$)")
plt.tight_layout()
plt.show()
full = data[station] #(full)from 1953/01 to 2018/12 792 samples
train = full[:train_len] #(train)from 1953/01 to 1998/12, 552 samples
train_dev = full[:train_len+dev_len]
# decomposition parameter
window = 12
columns=[
'ORIG',#orig_TS
'Trend',#F0
'Periodic1',#F1
'Periodic2',#F2
'Periodic3',#F3
'Periodic4',#F4
'Periodic5',#F5
'Periodic6',#F6
'Periodic7',#F7
'Periodic8',#F8
'Periodic9',#F9
'Periodic10',#F10
'Noise',#F11
]
#%%
# Decompose the entire monthly runoff of huaxian
huaxian_ssa = SSA(full,window)
F0 = huaxian_ssa.reconstruct(0)
F1 = huaxian_ssa.reconstruct(1)
F2 = huaxian_ssa.reconstruct(2)
F3 = huaxian_ssa.reconstruct(3)
F4 = huaxian_ssa.reconstruct(4)
F5 = huaxian_ssa.reconstruct(5)
F6 = huaxian_ssa.reconstruct(6)
F7 = huaxian_ssa.reconstruct(7)
F8 = huaxian_ssa.reconstruct(8)
F9 = huaxian_ssa.reconstruct(9)
F10 = huaxian_ssa.reconstruct(10)
F11 = huaxian_ssa.reconstruct(11)
orig_TS = huaxian_ssa.orig_TS
df = pd.concat([orig_TS,F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11],axis=1)
df = pd.DataFrame(df.values,columns=columns)
df.to_csv(save_path[station]+'SSA_FULL.csv',index=None)
#%%
# Decompose the training monthly runoff of huaxian
huaxian_ssa = SSA(train,window)
F0 = huaxian_ssa.reconstruct(0)
F1 = huaxian_ssa.reconstruct(1)
F2 = huaxian_ssa.reconstruct(2)
F3 = huaxian_ssa.reconstruct(3)
F4 = huaxian_ssa.reconstruct(4)
F5 = huaxian_ssa.reconstruct(5)
F6 = huaxian_ssa.reconstruct(6)
F7 = huaxian_ssa.reconstruct(7)
F8 = huaxian_ssa.reconstruct(8)
F9 = huaxian_ssa.reconstruct(9)
F10 = huaxian_ssa.reconstruct(10)
F11 = huaxian_ssa.reconstruct(11)
orig_TS = huaxian_ssa.orig_TS
df = | pd.concat([orig_TS,F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11],axis=1) | pandas.concat |
import os
import re
import zipfile
from io import BytesIO
import pandas as pd
class ParseMMSDMTables:
"""Used to extract and format data from AEMO MMSDM tables"""
def __init__(self, archive_dir):
"""Initialise object used to extract MMSDM data
Parameters
----------
archive_dir : str
Path to directory containing MMSDM data (zipped)
"""
# Path to folder containing MMSDM data
self.archive_dir = archive_dir
def mmsdm_table_to_dataframe(self, archive_name, table_name):
"""Read MMSDM table into pandas DataFrame
Parameters
----------
archive_name : str
Name of zip folder containing MMSDM information for a given year
table_name : str
Name of MMSDM table to be read into pandas DataFrame
Returns
-------
df : pandas DataFrame
DataFrame containing contents of given MMSDM table
"""
# Path to MMSDM archive for a given year and month
archive_path = os.path.join(self.archive_dir, archive_name)
# Open zipped archive
with zipfile.ZipFile(archive_path) as outer_zip:
# Filter files in archive by table name and file type
compressed_files = [f for f in outer_zip.filelist
if (table_name in f.filename)
and ('.zip' in f.filename)
and re.search(r'_{}_\d'.format(table_name), f.filename)]
# Check only 1 file in list
if len(compressed_files) != 1:
raise Exception('Encountered {} files, should only encounter 1'.format(len(compressed_files)))
else:
compressed_file = compressed_files[0]
# Construct name of compressed csv file
csv_name = compressed_file.filename.replace('.zip', '.CSV').split('/')[-1]
# Convert opened zip into bytes IO object to read inner zip file
zip_data = BytesIO(outer_zip.read(compressed_file))
# Open inner zip file
with zipfile.ZipFile(zip_data) as inner_zip:
# Read csv from inner zip file
with inner_zip.open(csv_name) as f:
# Read into Pandas DataFrame
df = pd.read_csv(f, skiprows=1)
# Remove last row of DataFrame (End File row)
df = df[:-1]
return df
def parse_biddayoffer_d(self, archive_name, column_index=None):
"""Read BIDDAYOFFER_D table into DataFrame and apply formatting
Parameters
----------
archive_name : str
Name of zip folder containing MMSDM information for a given year
column_index : list
List containing new column labels for outputted DataFrame
Returns
-------
df_o : pandas DataFrame
Formatted data from BIDDAYOFFER_D table
"""
# Read MMSDM table into DataFrame
df = self.mmsdm_table_to_dataframe(archive_name=archive_name, table_name='BIDDAYOFFER_D')
# Columns to keep
cols = ['SETTLEMENTDATE', 'DUID', 'PRICEBAND1', 'PRICEBAND2', 'PRICEBAND3', 'PRICEBAND4',
'PRICEBAND5', 'PRICEBAND6', 'PRICEBAND7', 'PRICEBAND8', 'PRICEBAND9', 'PRICEBAND10']
# Filter energy bids, remove duplicate (keeping last bid)
df_o = df.loc[df['BIDTYPE'] == 'ENERGY', cols].drop_duplicates(keep='last')
# Convert settlement date to datetime object
df_o['SETTLEMENTDATE'] = pd.to_datetime(df_o['SETTLEMENTDATE'])
# Shift settlement date forward by 4hrs and 5mins. Note that each trading
# day starts at 4.05am, but the settlement date starts at 12am. Price bands
# for a given settlementdate are applicable to trading intervals when the
# trading day actually starts, hence the need to shift the time forward
# by 4hrs and 5 mins.
df_o['SETTLEMENTDATE'] = df_o['SETTLEMENTDATE'] + pd.Timedelta(hours=4, minutes=5)
# List of columns corresponding to priceband values
value_columns = df_o.columns.drop(['SETTLEMENTDATE', 'DUID'])
# Pivot DataFrame and rename columns
df_o = df_o.pivot_table(index='SETTLEMENTDATE', columns='DUID', values=value_columns)
df_o.columns = df_o.columns.map('|'.join)
# If new column index exists, reindex columns accordingly
if column_index:
# Reindex columns
df_o = df_o.reindex(columns=column_index)
# Fill nan values with -999
df_o = df_o.fillna(-999)
return df_o
def parse_bidperoffer_d(self, archive_name, column_index=None):
"""Read BIDPEROFFER_D table into DataFrame and apply formatting
Parameters
----------
archive_name : str
Name of zip folder containing MMSDM information for a given year
column_index : list
List containing new column labels for outputted DataFrame
Returns
-------
df_o : pandas DataFrame
Formatted data from BIDPEROFFER_D table
"""
# Read MMSDM table into DataFrame
df = self.mmsdm_table_to_dataframe(archive_name=archive_name, table_name='BIDPEROFFER_D')
# Columns to keep
cols = ['INTERVAL_DATETIME', 'DUID', 'BANDAVAIL1', 'BANDAVAIL2', 'BANDAVAIL3', 'BANDAVAIL4',
'BANDAVAIL5', 'BANDAVAIL6', 'BANDAVAIL7', 'BANDAVAIL8', 'BANDAVAIL9', 'BANDAVAIL10',
'MAXAVAIL', 'ROCUP', 'ROCDOWN']
df_o = df.loc[df['BIDTYPE'] == 'ENERGY', cols].drop_duplicates(keep='last')
# Convert interval datetime to datetime object
df_o['INTERVAL_DATETIME'] = | pd.to_datetime(df_o['INTERVAL_DATETIME']) | pandas.to_datetime |
from __future__ import print_function,unicode_literals,with_statement,division
# kivy related
import matplotlib
import threading
matplotlib.use('module://kivy.garden.matplotlib.backend_kivy')
from matplotlib import pyplot as plt
from kivy.garden.graph import MeshLinePlot
#import matplotlib.animation as animation
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from kivy.clock import Clock
from kivy.properties import StringProperty,ListProperty
from kivy.uix.screenmanager import ScreenManager, Screen
import socket
import struct
import numpy as np
import pandas as pd
from scipy import signal
from functools import partial
from colorama import Fore, Back, Style, init
import logging
import time
# Kivy Material Design
from kivymd.theming import ThemeManager
from BCIEncode import sequence
__author__ = '<NAME>'
__email__ = '<EMAIL>'
class CountDown(BoxLayout):
pass
class SerialPortSelection(BoxLayout):
""" Select Serial Port on GUI """
unlimited = True
save_directory = 'data'
def __init__(self,**kwargs):
super(SerialPortSelection,self).__init__(**kwargs)
# kv file is load after rendering. So in __init__ it's impossible to access widget by ids for no widget has been renderred. Then we can use Clock.schedule_once(..., 0) to schedule the call to a function relying on ids.
Clock.schedule_once(self.scanPorts,0)
Clock.schedule_once(self.popupHintOnConnectionFailedConfig,0)
def popupHintOnConnectionFailedConfig(self,dt=0):
# Connection Failed popup hint configuration
self.popup = Popup(title='Connection Failed',id='popup')
App.get_running_app().root.current_screen.add_widget(self.popup)
App.get_running_app().root.current_screen.remove_widget(self.popup)
def clear_filename(self):
self.ids['filename'].text = ''
def changeState(self,state):
if state == 'down':
con = App.get_running_app().connect(self.ids['uart'].text)
if self.ids['duration'].text != 'Unlimited':
self.unlimited = False
self.countDown = CountDown()
self.parent.add_widget(self.countDown)
self.duration = int(self.ids['duration'].text[:-1])
self.remained = int(self.ids['duration'].text[:-1])
self.countDown.ids['remaingTime'].text = self.ids['duration'].text
self.countDown.ids['progress'].value = 0
App.get_running_app().save = True
Clock.schedule_interval(self.tick,1/10)
# When connection failed
if con is False:
self.ids['connect'].state = 'normal'
# Popup hint and rescan serial devices
self.popup.open()
Clock.schedule_once(self.popup.dismiss ,1)
self.scanPorts()
else:
data = App.get_running_app().disconnect()
if not self.unlimited:
Clock.unschedule(self.tick)
self.parent.remove_widget(self.countDown)
App.get_running_app().save = False
filename = self.ids['filename'].text
if len(filename) == 0:
filename = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
np.savetxt('./%s/BCI-%s.txt'%(self.save_directory, filename),App.get_running_app().toSave)
App.get_running_app().toSave = list()
def tick(self,dt):
self.remained -= dt
self.countDown.ids['remaingTime'].text = '%d s' % self.remained
self.countDown.ids['progress'].value = (1 - self.remained/self.duration) * 100
if self.remained <= 0:
self.ids['connect'].state = 'normal'
def scanPorts(self,dt=0):
pass
class FFT(BoxLayout):
""" Real Time frequency-domain Plotting """
length = 128 # FFT length
fftLen = 1024
plotScale = 4 # (integer) After FFT there are much points as fftLen/2 to plot. Considerable to reduce the points.
autoScale = True
def __init__(self,**kwargs):
super(FFT,self).__init__(**kwargs)
self.fs = App.get_running_app().fs
self.ts = 1.0/self.fs # Sampling interval
self.xscale = self.fs/4
self.xcenter = self.fs/2
# Configure real time fft figure with matplotlib
self.fig, self.ax = plt.subplots()
#plt.ion() #Never turn this on. Or enjoy a almost frozen Application.
# Settings
self.ax.xaxis.set_label_coords(0.9, -0.01)
self.ax.yaxis.set_label_coords(-0.00, 1.05)
self.ax.set_xlabel('Freq(Hz)')
self.ax.set_ylabel('Amplitude(uV)',rotation=0)
# self.ax.set_title('PSD')
self.ax.set_title('FFT')
# x-axis, ts = 1/fs
self.f = np.fft.rfftfreq(self.fftLen,self.ts)
fPos = self.f#[self.f>=0]
fPlot = [fPos[i] for i in range(len(fPos)) if np.mod(i,self.plotScale) == 0 ]
self.plotLen = len(fPlot)
# Set X padding
padding = (np.max(fPlot) - np.min(fPlot)) * 0.01
self.ax.set_xlim([np.min(fPlot)-padding,np.max(fPlot)+padding])
self.FFTplot, = self.ax.plot(fPlot,np.zeros_like(fPlot))
# self.fig.canvas.mpl_connect('scroll_event', self.figure_scroll)
self.add_widget(self.fig.canvas)
# def start(self):
# Clock.unschedule(self.refresh)
# Clock.schedule_interval(self.refresh,1/self.fps)
#
# def stop(self):
# Clock.unschedule(self.refresh)
# self.FFTplot.set_ydata(np.zeros(self.fftLen))
# self.ax.set_ylim([-1,1])
# plt.draw()
def figure_scroll(self,event):
print('scroll event from mpl ', event.x, event.y, event.step, event.button)
def clear(self, dt=0):
y = self.FFTplot.get_ydata()
self.FFTplot.set_ydata(np.zeros(len(y)))
self.ax.set_ylim([0,1])
plt.draw_all()
def set_fft_length(self, FixedFFTLen=False):
self.length = int(self.ids['fftLen'].text)
if not FixedFFTLen:
self.fftLen = self.length
self.f = np.fft.rfftfreq(self.fftLen,self.ts)
# fPos = self.f[self.f>=0]
# fPlot = [fPos[i] for i in range(len(fPos)) if np.mod(i,self.plotScale) == 0 ]
fPos = self.f
fPlot = [fPos[i] for i in range(len(fPos)) if np.mod(i,self.plotScale) == 0 ]
self.plotLen = len(fPlot)
self.FFTplot.set_data(fPlot,np.zeros_like(fPlot))
def set_scale(self):
self.scale = self.ids['scale'].text
if self.ids['scale'].text == 'Auto':
self.autoScale = True
else:
self.autoScale = False
if self.scale == '10μV':
self.ax.set_ylim([0,10])
elif self.scale == '100μV':
self.ax.set_ylim([0,100])
elif self.scale == '1mV':
self.ax.set_ylim([0,1000])
elif self.scale == '10mV':
self.ax.set_ylim([0,10000])
elif self.scale == '100mV':
self.ax.set_ylim([0,1e5])
elif self.scale == '1000mV':
self.ax.set_ylim([0,1e6])
def set_horizontal_width(self):
self.xcenter = self.ids['horizon'].value
self.ax.set_xlim([self.xcenter - self.xscale , self.xcenter + self.xscale])
def set_xscale(self):
self.xscale = self.fs/4 / self.ids['xscale'].value
xmin = self.xscale
xmax = self.fs/2 - self.xscale
self.ids['horizon'].range = (xmin,xmax)
if self.xcenter - self.xscale < 0:
self.ids['horizon'].value = xmin
elif self.xcenter + self.xscale > self.fs/2:
self.ids['horizon'].value = xmax
self.ax.set_xlim([self.xcenter - self.xscale , self.xcenter + self.xscale])
def refresh(self):
data = App.get_running_app().filteredData
if len(data) < self.length:
return False
# logging.info("Refreshing. Length of data:%d"%(len(data)))
# Clear
#self.ax.cla()
# Get data
y = data[-self.length:] # * signal.blackman(self.length, sym=0)
# PSD
# x,YPlot = signal.periodogram(y,fs=self.fs,nfft=None,window='hamming')
# YPlot = 10 * np.log(YPlot)
# x = x[1:]
# YPlot = YPlot[1:]
# self.FFTplot.set_data(x,YPlot)
# FFT
Y = np.fft.rfft(y,self.fftLen)
YampPos = np.abs(Y/self.fs)
# YampPos[1:-1] = YampPos[1:-1] * 2
YPlot = [YampPos[i] for i in range(len(YampPos)) if np.mod(i,self.plotScale)==0 ]
# YPlot = YampPos
self.FFTplot.set_ydata(YPlot)
if self.autoScale:
# Set y padding
padding = (np.max(YPlot) - np.min(YPlot)) * 0.1
# TODO To improve figure ylimits stability
if padding > 0.1:
self.ax.set_ylim([np.min(YPlot)-padding,np.max(YPlot)+padding])
plt.draw_all()
#self.ax.plot(fPlot,YPlot)
class RealTimePlotting(BoxLayout):
scale = 'Auto'
plotScale = 2
# band = np.array([49,51])
"""Real Time time-domain Plotting """
def __init__(self,**kwargs):
super(RealTimePlotting ,self).__init__(**kwargs)
self.fs = App.get_running_app().fs
self.length = self.fs * 4
# Configure real time fft figure with matplotlib
self.fig, self.ax = plt.subplots()
#plt.ion() #Never turn this on. Or enjoy a almost frozen Application.
# Settings
self.ax.set_xlabel('Time(seconds)')
self.ax.xaxis.set_label_coords(0.8, -0.01)
self.ax.yaxis.set_label_coords(-0.00, 1.05)
self.ax.set_ylabel('Amplitude(uV)',rotation=0)
self.ax.get_xaxis().set_visible(True)
#self.ax.set_title('Real Time Plotting')
# Plot x data once. Then we only need to update y data
x = np.arange(0,self.length)/self.fs
x= [x[i] for i in range(len(x)) if np.mod(i,self.plotScale)==0 ]
self.ax.set_xlim([np.min(x),np.max(x)])
self.RealTimePlot, = self.ax.plot([],[])
self.RealTimePlot.set_xdata(x)
self.RealTimePlot.set_ydata(np.zeros_like(x).tolist())
self.add_widget(self.fig.canvas)
# def start(self):
# Clock.unschedule(self.refresh)
# Clock.schedule_interval(self.refresh,1/self.fps)
# def stop(self):
# Clock.unschedule(self.refresh)
# self.RealTimePlot.set_ydata(np.zeros(self.length))
# self.ax.set_ylim([-1,1])
# plt.draw()
def clear(self,dt=0):
self.RealTimePlot.set_ydata(np.zeros(int(self.length/self.plotScale)).tolist())
self.ax.set_ylim([-1,1])
plt.draw_all()
def refresh(self):
# TODO Now real time plotting and FFT cannot be showed on the same time
# y_raw = App.get_running_app().data[-self.length:]
y_raw = App.get_running_app().filteredData[-self.length:]
y= [y_raw[i] for i in range(len(y_raw)) if np.mod(i,self.plotScale)==0 ]
# Frequency Domain filter
# b,a = signal.butter(4,[5 /(self.fs/2),45 /(self.fs/2)],'band')
# y = signal.filtfilt(b,a,y_raw)
self.RealTimePlot.set_ydata(y)
if self.scale == 'Auto':
ymin,ymax = self.ax.get_ylim()
if ymax - ymin !=0:
padding = ( np.max(y) - np.min(y) )*0.1
if np.min(y) < ymin or np.max(y) > ymax or padding < (ymax - ymin) *0.1 and (ymax-ymin)>10:
padding = (np.max(y) - np.min(y)) * 0.1
# TODO To improve figure ylimits stability
self.ax.set_ylim([np.min(y)-padding, np.max(y)+padding])
plt.draw_all()
def set_filter(self):
if self.ids['filters'].text == 'None':
App.get_running_app().refresh_filter(0,0,'None')
elif self.ids['filters'].text == 'Highpass:4Hz':
fs = App.get_running_app().fs
App.get_running_app().refresh_filter(4,fs/2,ftype='highpass')
elif self.ids['filters'].text == '4Hz-60Hz':
App.get_running_app().refresh_filter(4,60)
elif self.ids['filters'].text == '4Hz-45Hz':
App.get_running_app().refresh_filter(4,45)
def set_notch(self):
if self.ids['notch'].text == 'None':
App.get_running_app().refresh_notch_filter(50,False)
elif self.ids['notch'].text == '50Hz':
App.get_running_app().refresh_notch_filter(50,True)
elif self.ids['notch'].text == '60Hz':
App.get_running_app().refresh_notch_filter(60,True)
def set_length(self):
if self.ids['length'].text == '0.5s':
self.length = int(self.fs * 0.5)
elif self.ids['length'].text == '1s':
self.length = self.fs * 1
elif self.ids['length'].text == '2s':
self.length = self.fs * 2
elif self.ids['length'].text == '3s':
self.length = self.fs * 3
elif self.ids['length'].text == '4s':
self.length = self.fs * 4
x_raw = np.arange(0,self.length)/self.fs
x= [x_raw[i] for i in range(len(x_raw)) if np.mod(i,self.plotScale)==0 ]
y_raw = App.get_running_app().data[-self.length:]
y= [y_raw[i] for i in range(len(y_raw)) if np.mod(i,self.plotScale)==0 ]
self.ax.set_xlim([np.min(x),np.max(x)])
self.RealTimePlot.set_data(x,y)
plt.draw_all()
class Test(Screen):
"""Test Layout"""
# Settings
theme_cls = ThemeManager()
def __init__(self, **kwargs):
""" Initializing serial and plot
"""
super(Test,self).__init__(**kwargs)
''' BLINKING
'''
# for i in range(12):
# Clock.schedule_interval(partial(self.blinking,i),1/(0+12))
def blinking(self,idx,dt):
widgetID = 'button%d' % idx
if self.ids[widgetID].state == 'normal':
self.ids[widgetID].state = 'down'
self.ids[widgetID].trigger_action(0.01)
if self.ids[widgetID].state == 'down':
self.ids[widgetID].state = 'normal'
self.ids[widgetID].trigger_action(0.01)
class Blink(Screen):
# Settings
theme_cls = ThemeManager()
def __init__(self, **kwargs):
""" Initializing serial and plot
:returns: TODO
"""
super(Blink,self).__init__(**kwargs)
# ''' BLINKING
# '''
# for i in range(12):
# hz = 6 # i + 4
# Clock.schedule_interval(partial(self.blinking,i),1/(2*hz))
# def blinking(self,idx,dt):
# widgetID = 'button%d' % idx
# if self.ids[widgetID].state == 'normal':
# self.ids[widgetID].state = 'down'
# elif self.ids[widgetID].state == 'down':
# self.ids[widgetID].state = 'normal'
def set_freq(self):
"""
set screen blinking frequency
"""
freq = self.ids['freq'].value
self.ids['freqLabel'].text = "%dHz" % self.ids['freq'].value
for i in range(12):
Clock.unschedule(partial(self.blinking,i))
Clock.schedule_interval(partial(self.blinking,i),1/(freq*2))
def toggleBlink(self):
pass
class BlinkApp(App):
kv_directory = 'ui_template'
def __init__(self,**kwargs):
""" Initializing serial
:returns: TODO
"""
super(BlinkApp,self).__init__(**kwargs)
def build(self):
root = ScreenManager()
root.add_widget(Blink(name='bci'))
return root
class BCIApp(App):
# Settings
kv_directory = 'ui_template'
fps = 5
fs = 500
storedLen = 4096
data = list()
# Buffer
rawRemained = b'' # raw Data from serial, this should contain the data unused last time
save = False
toSave = list()
filteredDataNotch = list()
filteredData = list()
port = 23333
# Classification
lastF = {'f':False, 'count':0}
fBuffer = dict()
laststate = 0
ratio = 0.4
window = fs
tolerance = 0.5
interval = 0.2
decodeBuffer = [False, False, False]
def __init__(self,**kwargs):
""" Initializing serial
:returns: TODO
"""
init(autoreset=True)
self.data = np.zeros(self.storedLen).tolist()
self.filteredData = np.zeros(self.storedLen).tolist()
self.filteredDataNotch = np.zeros(self.storedLen).tolist()
self.refresh_notch_filter(50,True)
self.refresh_filter(4,45)
# self.b,self.a = signal.butter(4,[4 /(self.fs/2),30 /(self.fs/2)],'band')
super(BCIApp,self).__init__(**kwargs)
self.tcpSerSock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.tcpSerSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#绑定服务端口
self.tcpSerSock.bind(("", self.port))
#开始监听
self.tcpSerSock.listen(5)
self.tcp = False
def on_stop(self):
if self.tcp:
self.disconnect()
def build(self):
root = ScreenManager()
root.add_widget(Test(name='bci'))
return root
def classify(self, dt):
def classifyNaive():
"""
Naive Classification
"""
with open('blinkState','r') as f:
state = int(f.read())
f.close()
if state:
y = self.filteredData[-self.window:]
Y = np.fft.rfft(y)
fs = App.get_running_app().fs
freq = np.fft.rfftfreq(len(y),1/fs)
powerSpectrum = np.abs(Y/fs)
data = | pd.Series(powerSpectrum,index=freq) | pandas.Series |
# -*- coding: utf-8 -*-
"""Functions for downloading and analysing data on MPs."""
# Imports ---------------------------------------------------------------------
import numpy as np
import pandas as pd
from . import combine
from . import constants
from . import core
from . import elections
from . import filter
from . import members
from . import utils
# Raw MPs queries -------------------------------------------------------------
def fetch_mps_raw():
"""Fetch key details for all MPs."""
return members.fetch_members_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_commons_memberships_raw():
"""Fetch Commons memberships for all MPs."""
commons_memberships_query = """
PREFIX : <https://id.parliament.uk/schema/>
PREFIX d: <https://id.parliament.uk/>
SELECT DISTINCT
?person_id
?mnis_id
?given_name
?family_name
?display_name
?constituency_id
?constituency_name
?constituency_ons_id
?seat_incumbency_id
?seat_incumbency_start_date
?seat_incumbency_end_date
WHERE {{
# House constraint for the House of Commons
BIND(d:{0} AS ?house)
?person_id :memberMnisId ?mnis_id;
:personGivenName ?given_name ;
:personFamilyName ?family_name ;
<http://example.com/F31CBD81AD8343898B49DC65743F0BDF> ?display_name ;
:memberHasParliamentaryIncumbency ?seat_incumbency_id .
?seat_incumbency_id a :SeatIncumbency ;
:seatIncumbencyHasHouseSeat ?seat ;
:parliamentaryIncumbencyStartDate ?seat_incumbency_start_date .
OPTIONAL {{ ?seat_incumbency_id :parliamentaryIncumbencyEndDate ?seat_incumbency_end_date . }}
?seat :houseSeatHasHouse ?house ;
:houseSeatHasConstituencyGroup ?constituency_id .
?constituency_id :constituencyGroupName ?constituency_name ;
:constituencyGroupStartDate ?constituencyStartDate .
OPTIONAL {{ ?constituency_id :constituencyGroupOnsCode ?constituency_ons_id . }}
}}
""".format(constants.PDP_ID_HOUSE_OF_COMMONS)
return core.sparql_select(commons_memberships_query)
def fetch_mps_party_memberships_raw():
"""Fetch party memberships for all MPs."""
return members.fetch_party_memberships_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_mps_government_roles_raw():
"""Fetch government roles for all MPs."""
return members.fetch_government_roles_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_mps_opposition_roles_raw():
"""Fetch opposition roles for all MPs."""
return members.fetch_opposition_roles_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_mps_committee_memberships_raw():
"""Fetch committee memberships for all MPs."""
return members.fetch_committee_memberships_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
# Main MPs API ----------------------------------------------------------------
def fetch_mps(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN):
"""Fetch key details for all MPs.
fetch_mps fetches data from the data platform showing key details about
each MP, with one row per MP.
The from_date and to_date arguments can be used to filter the MPs returned
based on the dates of their Commons memberships. The on_date argument is a
convenience that sets the from_date and to_date to the same given date. The
on_date has priority: if the on_date is set, the from_date and to_date are
ignored.
The filtering is inclusive: an MP is returned if any part of one of their
Commons memberships falls within the period specified with the from and to
dates.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
Returns
-------
out : DataFrame
A pandas dataframe of key details for each MP, with one row per MP.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch key details
mps = fetch_mps_raw()
# Filter based on membership dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
commons_memberships = fetch_commons_memberships()
matching_memberships = filter.filter_dates(
commons_memberships,
start_col='seat_incumbency_start_date',
end_col='seat_incumbency_end_date',
from_date=from_date,
to_date=to_date)
mps = mps[mps['person_id'].isin(matching_memberships['person_id'])]
# Tidy up and return
mps.sort_values(
by=['family_name'],
inplace=True)
mps.reset_index(drop=True, inplace=True)
return mps
def fetch_commons_memberships(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN):
"""Fetch Commons memberships for all MPs.
fetch_commons_memberships fetches data from the data platform showing
Commons memberships for each MP. The memberships are processed to impose
consistent rules on the start and end dates for memberships.
The from_date and to_date arguments can be used to filter the memberships
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The filtering is inclusive: a membership is returned if any part
of it falls within the period specified with the from and to dates.
Note that a membership with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
Returns
-------
out : DataFrame
A pandas dataframe of Commons memberships for each MP, with one row
per Commons membership.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the Commons memberships
commons_memberships = fetch_commons_memberships_raw()
# Get elections and fix the end dates of memberships
end_dates = commons_memberships['seat_incumbency_end_date'].values
general_elections = elections.get_general_elections().values
general_elections_count = len(general_elections)
# If the end date for a membership falls after dissolution adjust it
for i in range(len(end_dates)):
date = end_dates[i]
if pd.isna(date): continue
for j in range(general_elections_count):
dissolution = general_elections[j, 1]
election = general_elections[j, 2]
if date > dissolution and date <= election:
end_dates[i] = dissolution
continue
commons_memberships['seat_incumbency_end_date'] = end_dates
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
commons_memberships = filter.filter_dates(
commons_memberships,
start_col='seat_incumbency_start_date',
end_col='seat_incumbency_end_date',
from_date=from_date,
to_date=to_date)
# Tidy up and return
commons_memberships.sort_values(
by=['family_name',
'seat_incumbency_start_date'],
inplace=True)
commons_memberships.reset_index(drop=True, inplace=True)
return commons_memberships
def fetch_mps_party_memberships(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN,
while_mp=True,
collapse=False):
"""Fetch party memberships for all MPs.
fetch_mps_party_memberships fetches data from the data platform showing
party memberships for each MP.
The from_date and to_date arguments can be used to filter the memberships
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The while_mp argument can be used to filter the memberships to include only
those that occurred during the period when each individual was an MP.
The filtering is inclusive: a membership is returned if any part
of it falls within the period specified with the from and to dates.
The collapse argument controls whether memberships are combined so that
there is only one row for each period of continuous membership within the
same party. Combining the memberships in this way means that party
membership ids from the data platform are not included in the dataframe
returned.
Note that a membership with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
while_mp : bool, optional
A boolean indicating whether to filter the party memberships to include
only those memberships that were held while each individual was serving
as an MP. The default value is True.
collapse: bool, optional
Determines whether to collapse consecutive memberships within the same
party into a single period of continuous party membership. Setting this
to True means that party membership ids are not returned in the
dataframe. The default value is False.
Returns
-------
out : DataFrame
A pandas dataframe of party memberships for each MP, with one row per
party membership. The memberships are processed and merged so that
there is only one party membership for a period of continuous
membership within the same party. A membership with a NaN end date is
still open.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the party memberships
party_memberships = fetch_mps_party_memberships_raw()
# Filter on dates if requested
if not pd.isna(from_date) or not | pd.isna(to_date) | pandas.isna |
import numpy as np
import seaborn as sns
from sklearn.metrics import precision_score, recall_score, f1_score, average_precision_score, confusion_matrix, auc
from tqdm import tqdm
from nltk.stem.porter import PorterStemmer
from textblob import Word
from nltk.corpus import stopwords
import re
import nltk
import pandas as pd
import requests
#!/usr/bin/env python
import config
import matplotlib.pyplot as plt
def group_list(lst, size=100):
"""
Generate batches of 100 ids in each
Returns list of strings with , seperated ids
"""
new_list =[]
idx = 0
while idx < len(lst):
new_list.append(
','.join([str(item) for item in lst[idx:idx+size]])
)
idx += size
return new_list
def tweets_request(tweets_ids):
"""
Make a requests to Tweeter API
"""
df_lst = []
for batch in tqdm(tweets_ids):
url = "https://api.twitter.com/2/tweets?ids={}&tweet.fields=created_at&expansions=author_id&user.fields=created_at".format(batch)
payload={}
headers = {'Authorization': 'Bearer ' + config.keys['bearer_token'],
'Cookie': 'personalization_id="v1_hzpv7qXpjB6CteyAHDWYQQ=="; guest_id=v1%3A161498381400435837'}
r = requests.request("GET", url, headers=headers, data=payload)
data = r.json()
if 'data' in data.keys():
df_lst.append( | pd.DataFrame(data['data']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import warnings
from numpy import cumsum, log, polyfit, sqrt, std, subtract
from datetime import datetime, timedelta
import scipy.stats as st
import statsmodels.api as sm
import math
import matplotlib
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.stats import norm
from scipy import poly1d
warnings.simplefilter(action='ignore', category=Warning)
import plotly.express as px
import plotly.graph_objects as go
import scipy.stats as stats
from pandas.tseries.offsets import BDay
from plotly.subplots import make_subplots
matplotlib.rcParams['figure.figsize'] = (25.0, 15.0)
matplotlib.style.use('ggplot')
pd.set_option('display.float_format', lambda x: '%.4f' % x)
import plotly.io as pio
from numpy import median, mean
pio.templates.default = "plotly_white"
class SampleStrategy():
def shortEntry(self, prices_df):
short_entry_filter_1 = prices_df['MA NEAR'][-1] < prices_df['MA FAR'][-1]
short_entry_filter_2 = prices_df['MA NEAR'][-2] > prices_df['MA FAR'][-2]
enter_trade = short_entry_filter_1 and short_entry_filter_2
if enter_trade:
return True
else:
return False
def longEntry(self, prices_df):
long_entry_filter_1 = prices_df['MA NEAR'][-1] > prices_df['MA FAR'][-1]
long_entry_filter_2 = prices_df['MA NEAR'][-2] < prices_df['MA FAR'][-2]
enter_trade = long_entry_filter_1 and long_entry_filter_2
if enter_trade:
return True
else:
return False
def longExit(self, prices_df):
long_exit_filter_1 = prices_df['MA NEAR'][-1] < prices_df['MA FAR'][-1]
long_exit_filter_2 = prices_df['MA NEAR'][-2] > prices_df['MA FAR'][-2]
exit_trade = long_exit_filter_1 and long_exit_filter_2
if exit_trade:
return True
else:
return False
def shortExit(self, prices_df):
short_exit_filter_1 = prices_df['MA NEAR'][-1] > prices_df['MA FAR'][-1]
short_exit_filter_2 = prices_df['MA NEAR'][-2] < prices_df['MA FAR'][-2]
exit_trade = short_exit_filter_1 and short_exit_filter_2
if exit_trade:
return True
else:
return False
from functools import reduce
class Broker():
def __init__(self,
price_data=None,
MA_period_slow=200,
MA_period_fast=50):
assert price_data is not None
self.data = price_data
self.pass_history = 20
self.strategy_obj = SampleStrategy()
self.entry_price = None
self.exit_price = None
self.position = 0
self.pnl = 0
self.MA_period_slow = MA_period_slow
self.MA_period_fast = MA_period_fast
self.trade_id = -1
self.trade_type = None
self.entry_time = None
self.exit_time = None
self.exit_type = None
self.data['MA NEAR'] = self.data['Close'].rolling(self.MA_period_fast).mean()
self.data['MA FAR'] = self.data['Close'].rolling(self.MA_period_slow).mean()
self.tradeLog = pd.DataFrame(columns=['Trade ID',
'Trade Type',
'Entry Time',
'Entry Price',
'Exit Time',
'Exit Price',
'PNL',
])
def tradeExit(self):
self.tradeLog.loc[self.trade_id, 'Trade ID'] = self.trade_id
self.tradeLog.loc[self.trade_id, 'Trade Type'] = self.trade_type
self.tradeLog.loc[self.trade_id, 'Entry Time'] = pd.to_datetime(self.entry_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Entry Price'] = self.entry_price
self.tradeLog.loc[self.trade_id, 'Exit Time'] = pd.to_datetime(self.exit_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Exit Price'] = self.exit_price
self.tradeLog.loc[self.trade_id, 'PNL'] = self.pnl*1000
def testerAlgo(self):
def takeEntry():
assert self.pass_history%1==0
enterShortSignal = self.strategy_obj.shortEntry(self.data.iloc[i-self.pass_history:i+1])
enterLongSignal = self.strategy_obj.longEntry(self.data.iloc[i-self.pass_history:i+1])
if enterShortSignal == True:
self.position = -1
self.trade_id = self.trade_id + 1
self.trade_type = 'Short'
self.entry_time = self.data.index[i]
self.entry_price = self.data['Close'][i]
elif enterLongSignal == True:
self.position = 1
self.trade_id = self.trade_id + 1
self.trade_type = 'Long'
self.entry_time = self.data.index[i]
self.entry_price = self.data['Close'][i]
for i in tqdm(range(self.pass_history, len(self.data)-1)):
if self.position in [1, -1]:
if self.position == -1:
assert self.pass_history%1==0
exitShortSignal = self.strategy_obj.shortExit(self.data.iloc[i-self.pass_history:i+1])
if exitShortSignal == True:
self.position = 0
self.exit_price = self.data['Close'][i]
self.pnl = (self.entry_price - self.exit_price)
self.exit_time = self.data.index[i]
self.tradeExit()
takeEntry()
if self.position == 1:
exitLongSignal = self.strategy_obj.longExit(self.data.iloc[i-self.pass_history:i+1])
if exitLongSignal == True:
self.position = 0
self.exit_price = self.data['Close'][i]
self.pnl = (self.exit_price - self.entry_price)
self.exit_time = self.data.index[i]
self.tradeExit()
takeEntry()
elif self.position == 0:
takeEntry()
class TestBroker():
def __init__(self,
MA_period_slow=200,
MA_period_fast=50):
url='https://drive.google.com/file/d/1pdzeR8bYD7G_pj7XvWhcJrxnFyzmmqps/view?usp=sharing'
url2='https://drive.google.com/uc?id=' + url.split('/')[-2]
self.data = pd.read_csv(url2 ,
parse_dates=['Timestamp'],
infer_datetime_format=True,
memory_map=True,
index_col='Timestamp',
low_memory=False)
self.pass_history = 20
self.strategy_obj = SampleStrategy()
self.entry_price = None
self.exit_price = None
self.position = 0
self.pnl = 0
self.MA_period_slow = MA_period_slow
self.MA_period_fast = MA_period_fast
self.trade_id = -1
self.trade_type = None
self.entry_time = None
self.exit_time = None
self.exit_type = None
self.data['MA NEAR'] = self.data['Close'].rolling(self.MA_period_fast).mean()
self.data['MA FAR'] = self.data['Close'].rolling(self.MA_period_slow).mean()
self.tradeLog = pd.DataFrame(columns=['Trade ID',
'Trade Type',
'Entry Time',
'Entry Price',
'Exit Time',
'Exit Price',
'PNL',
])
def tradeExit(self):
self.tradeLog.loc[self.trade_id, 'Trade ID'] = self.trade_id
self.tradeLog.loc[self.trade_id, 'Trade Type'] = self.trade_type
self.tradeLog.loc[self.trade_id, 'Entry Time'] = pd.to_datetime(self.entry_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Entry Price'] = self.entry_price
self.tradeLog.loc[self.trade_id, 'Exit Time'] = pd.to_datetime(self.exit_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Exit Price'] = self.exit_price
self.tradeLog.loc[self.trade_id, 'PNL'] = self.pnl*1000
def testerAlgo(self):
def takeEntry():
assert self.pass_history%1==0
enterShortSignal = self.strategy_obj.shortEntry(self.data.iloc[i-self.pass_history:i+1])
enterLongSignal = self.strategy_obj.longEntry(self.data.iloc[i-self.pass_history:i+1])
if enterShortSignal == True:
self.position = -1
self.trade_id = self.trade_id + 1
self.trade_type = -1
self.entry_time = self.data.index[i]
self.entry_price = self.data['Close'][i]
elif enterLongSignal == True:
self.position = 1
self.trade_id = self.trade_id + 1
self.trade_type = 1
self.entry_time = self.data.index[i]
self.entry_price = self.data['Close'][i]
for i in tqdm(range(self.pass_history, len(self.data)-1)):
if self.position in [1, -1]:
if self.position == -1:
assert self.pass_history%1==0
exitShortSignal = self.strategy_obj.shortExit(self.data.iloc[i-self.pass_history:i+1])
if exitShortSignal == True:
self.position = 0
self.exit_price = self.data['Close'][i]
self.pnl = (self.entry_price - self.exit_price)
self.exit_time = self.data.index[i]
self.tradeExit()
takeEntry()
if self.position == 1:
exitLongSignal = self.strategy_obj.longExit(self.data.iloc[i-self.pass_history:i+1])
if exitLongSignal == True:
self.position = 0
self.exit_price = self.data['Close'][i]
self.pnl = (self.exit_price - self.entry_price)
self.exit_time = self.data.index[i]
self.tradeExit()
takeEntry()
elif self.position == 0:
takeEntry()
class Metrics():
def __init__(self,
trade_logs):
self.trade_logs = trade_logs
self.trade_logs['Entry Time'] = pd.to_datetime(self.trade_logs['Entry Time'], infer_datetime_format= True)
self.trade_logs['Exit Time'] = pd.to_datetime(self.trade_logs['Exit Time'], infer_datetime_format= True)
self.performance_metrics = pd.DataFrame(index=[
'Total Trades',
'Winning Trades',
'Losing Trades',
'Net P/L',
'Gross Profit',
'Gross Loss',
'P/L Per Trade',
'Max Drawdown',
'Win Percentage',
'Profit Factor'])
self.monthly_performance = pd.DataFrame()
self.yearly_performance = pd.DataFrame()
def overall_calc(self):
def total_trades_calc(self):
return len(self.trade_logs)
self.performance_metrics.loc['Total Trades', 'Overall'] = total_trades_calc(self)
################################################
def winning_trades_calc(self):
mask = self.trade_logs['PNL']>0
return len(self.trade_logs.loc[mask])
self.performance_metrics.loc['Winning Trades', 'Overall'] = winning_trades_calc(self)
################################################
def losing_trades_calc(self):
mask = self.trade_logs['PNL']<0
return len(self.trade_logs.loc[mask])
self.performance_metrics.loc['Losing Trades', 'Overall'] = losing_trades_calc(self)
################################################
def gross_profit_calc(self):
mask = self.trade_logs['PNL']>0
if len(self.trade_logs.loc[mask])>0:
return round(sum(self.trade_logs['PNL'].loc[mask]),2)
else:
return 0
self.performance_metrics.loc['Gross Profit', 'Overall'] = gross_profit_calc(self)
################################################
def gross_loss_calc(self):
mask = self.trade_logs['PNL']<0
if len(self.trade_logs.loc[mask])>0:
return round(sum(self.trade_logs['PNL'].loc[mask]),2)
else:
return 0
self.performance_metrics.loc['Gross Loss', 'Overall'] = gross_loss_calc(self)
################################################
def net_pnl_calc(self):
return round(sum(self.trade_logs['PNL']),2)
self.performance_metrics.loc['Net P/L', 'Overall'] = net_pnl_calc(self)
###############################################
def pnl_per_trade_calc(self):
return round(sum(self.trade_logs['PNL'])/len(self.trade_logs), 3)
self.performance_metrics.loc['P/L Per Trade', 'Overall'] = pnl_per_trade_calc(self)
################################################
def win_percentage_calc(self):
return round((self.performance_metrics.loc['Winning Trades', 'Overall']/self.performance_metrics.loc['Total Trades', ('Overall')])*100,2)
self.performance_metrics.loc['Win Percentage', 'Overall'] = win_percentage_calc(self)
################################################
def profit_factor_calc(self):
return round(abs(self.performance_metrics.loc['Gross Profit', 'Overall']/self.performance_metrics.loc['Gross Loss', ('Overall')]), 2)
self.performance_metrics.loc['Profit Factor', 'Overall'] = profit_factor_calc(self)
################################################
def pnl_per_win_calc(self):
return round((self.performance_metrics.loc['Gross Profit', 'Overall']/self.performance_metrics.loc['Winning Trades', ('Overall')]),2)
self.performance_metrics.loc['Profit Per Winning Trade', 'Overall'] = pnl_per_win_calc(self)
################################################
def pnl_per_loss_calc(self):
return round((self.performance_metrics.loc['Gross Loss', 'Overall']/self.performance_metrics.loc['Losing Trades', ('Overall')]),2)
self.performance_metrics.loc['Loss Per Losing Trade', 'Overall'] = pnl_per_loss_calc(self)
################################################
def max_drawdown_calc(self):
xs = self.trade_logs['PNL'].cumsum()
i = np.argmax(np.maximum.accumulate(xs) - xs)
j = np.argmax(xs[:i])
return round(abs(xs[i]-xs[j]),2)
self.performance_metrics.loc['Max Drawdown', 'Overall'] = max_drawdown_calc(self)
################################################
def monthly_perf_calc(self):
return self.trade_logs.groupby(self.trade_logs['Entry Time'].dt.month)['PNL'].sum()
self.monthly_performance['Overall'] = monthly_perf_calc(self)
###############################################
def yearly_perf_calc(self):
return self.trade_logs.groupby(self.trade_logs['Entry Time'].dt.year)['PNL'].sum()
self.yearly_performance['Overall'] = yearly_perf_calc(self)
###############################################
def plot_monthly_performance(self):
fig = px.bar( y=self.monthly_performance['Overall'], x=self.monthly_performance.index, title='Monthly Performance')
fig.show()
def plot_yearly_performance(self, ):
fig = px.bar( y=self.yearly_performance['Overall'], x=self.yearly_performance.index, title='Yearly Performance')
fig.show()
def plot_cumulative_returns(self):
fig = px.line( y= self.trade_logs['PNL'].cumsum(), x=self.trade_logs['Entry Time'], title='Cumulative Returns')
fig.show()
class GenerateSubmission():
def __init__(self,
MA_period_slow=None,
MA_period_fast=None):
assert MA_period_fast is not None
assert MA_period_slow is not None
self.MA_period_slow = MA_period_slow
self.MA_period_fast = MA_period_fast
self.test_bt = TestBroker(MA_period_slow=self.MA_period_slow, MA_period_fast=self.MA_period_fast)
self.test_bt.testerAlgo()
self.combined_tradelog = self.test_bt.tradeLog
self.combined_tradelog['Entry Time'] = pd.to_datetime(self.combined_tradelog['Entry Time'], infer_datetime_format= True)
self.year_array = np.unique(self.combined_tradelog['Entry Time'].dt.year)
self.submission_metrics = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
assert not isnull(datetime.now())
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = | isnull(pidx[1:]) | pandas.core.dtypes.missing.isnull |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import time
import numpy as np
import pandas as pd
import pymongo
try:
import QUANTAXIS as QA
from QUANTAXIS.QAUtil import (
QASETTING,
DATABASE,
QA_util_date_stamp,
QA_util_date_valid,
QA_util_log_info,
QA_util_to_json_from_pandas,
QA_util_dict_remove_key,
QA_util_code_tolist,
)
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION
from QUANTAXIS.QAData.QADataStruct import (
QA_DataStruct_Index_min,
QA_DataStruct_Index_day,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min
)
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
except:
print('PLEASE run "pip install QUANTAXIS" before call GolemQ.GQFetch.portfolio modules')
pass
from GolemQ.GQUtil.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST,
FEATURES as FTR,)
def GQSignal_fetch_position_singal_day(start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
getting_trigger=True,
format='numpy',
ui_log=None,
ui_progress=None):
"""
'获取特定买入信号的股票指标日线'
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
if QA_util_date_valid(end):
if (getting_trigger):
# 按“信号”查询
cursor = coll_indices.find({
ST.TRIGGER_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
else:
# 按“持有状态”查询
cursor = coll_indices.find({
ST.POSITION_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
#print(len(res), start, end)
try:
res = res.assign(date=pd.to_datetime(res.date)).drop_duplicates((['date',
'code'])).set_index(['date',
'code'],
drop=False)
except:
res = None
if (res is not None):
try:
codelist = QA.QA_fetch_stock_name(res[AKA.CODE].tolist())
res['name'] = res.apply(lambda x:codelist.at[x.get(AKA.CODE), 'name'], axis=1)
except:
res['name'] = res['code']
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error GQSignal_fetch_position_singal_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info('QA Error GQSignal_fetch_position_singal_day data parameter start=%s end=%s is not right' % (start,
end))
def GQSignal_fetch_mainfest_singal_day(start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
getting_trigger=True,
format='numpy',
ui_log=None,
ui_progress=None):
"""
'获取主升浪买入信号的股票指标日线'
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
if QA_util_date_valid(end):
if (getting_trigger):
# 按主升浪买入“信号”查询
cursor = coll_indices.find({
'$and': [{ '$or': [{
FLD.BOOTSTRAP_COMBO_TIMING_LAG:{
'$gt':0
}
},
{
FLD.BOOTSTRAP_GROUND_ZERO_MINOR_TIMING_LAG:{
'$gt':0
}
}]
},
#{ FLD.BOOTSTRAP_COMBO_RETURNS:{
# '$gt':0.00618
# }
# },
{ '$or': [{ FLD.BOOTSTRAP_COMBO_RETURNS:{
'$gt':-0.0927
}
}, { FLD.BOOTSTRAP_COMBO_MINOR_RETURNS:{
'$gt':-0.0927
}
}]},
{ '$or': [{ ST.TRIGGER_R5:{'$gt':0}}, { ST.TRIGGER_RPS:{'$gt':0}}]},
{ "date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}},]
},
{"_id": 0},
batch_size=10000)
else:
# 按“持有状态”查询
cursor = coll_indices.find({
ST.POSITION_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
#print(len(res), start, end)
try:
res = res.assign(date=pd.to_datetime(res.date)).drop_duplicates((['date',
'code'])).set_index(['date',
'code'],
drop=False)
except:
res = None
if (res is not None):
try:
codelist = QA.QA_fetch_stock_name(res[AKA.CODE].tolist())
res['name'] = res.apply(lambda x:codelist.at[x.get(AKA.CODE), 'name'], axis=1)
except:
res['name'] = res['code']
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error GQSignal_fetch_position_singal_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info('QA Error GQSignal_fetch_position_singal_day data parameter start=%s end=%s is not right' % (start,
end))
def GQSignal_fetch_bootstrap_singal_day(start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
getting_trigger=True,
format='numpy',
ui_log=None,
ui_progress=None):
"""
'获取主升浪买入信号的股票指标日线'
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
if QA_util_date_valid(end):
if (getting_trigger):
# 按主升浪买入“信号”查询
cursor = coll_indices.find({
'$and': [
{ '$or' :[
{ ST.CLUSTER_GROUP_TOWARDS:{'$lt':0} },
{ ST.CLUSTER_GROUP_TOWARDS_MINOR:{'$lt':0} },
]},
{ '$or' :[
{ FTR.UPRISING_RAIL_TIMING_LAG:{'$gt':0} },
{ FLD.BOOTSTRAP_GROUND_ZERO_TIMING_LAG:{'$gt':0} },
]},
{ ST.BOOTSTRAP_GROUND_ZERO:{'$gt':0} },
{ "date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}},]
},
{"_id": 0},
batch_size=10000)
else:
# 按“持有状态”查询
cursor = coll_indices.find({
ST.POSITION_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
#print(len(res), start, end)
try:
res = res.assign(date=pd.to_datetime(res.date)).drop_duplicates((['date',
'code'])).set_index(['date',
'code'],
drop=False)
except:
res = None
if (res is not None):
try:
codelist = QA.QA_fetch_stock_name(res[AKA.CODE].tolist())
res['name'] = res.apply(lambda x:codelist.at[x.get(AKA.CODE), 'name'], axis=1)
except:
res['name'] = res['code']
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error GQSignal_fetch_position_singal_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info('QA Error GQSignal_fetch_position_singal_day data parameter start=%s end=%s is not right' % (start,
end))
def GQSignal_fetch_code_singal_day(code,
start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
format='numpy',
ui_log=None,
ui_progress=None):
"""
获取指定代码股票日线指标/策略信号数据
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
# code checking
print(code)
code = QA_util_code_tolist(code)
print(code)
if QA_util_date_valid(end):
cursor = coll_indices.find({
'code': {
'$in': code
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(date= | pd.to_datetime(res.date) | pandas.to_datetime |
import pandas as pd
import numpy as np
class DataParser:
@staticmethod
def _parse_companies(cmp_list):
"""
Создает DataFrame компаний по списку словарей из запроса
:param cmp_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=['ID', 'TITLE', 'CMP_TYPE_CUSTOMER', 'CMP_TYPE_PARTNER'])
if cmp_list:
cmp_df = pd.DataFrame(cmp_list)
cmp_df['CMP_TYPE_CUSTOMER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'CUSTOMER') else 0)
cmp_df['CMP_TYPE_PARTNER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'PARTNER') else 0)
cmp_df = cmp_df.drop(columns=['COMPANY_TYPE'], axis=1)
ret_df = pd.concat([ret_df, cmp_df])
return ret_df
@staticmethod
def _parse_deals(deal_list):
"""
Создает DataFrame сделок по списку словарей из запроса
:param deal_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'OPPORTUNITY_DEAL_Q01', 'PROBABILITY_DEAL_Q01', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q01',
'OPPORTUNITY_DEAL_Q09', 'PROBABILITY_DEAL_Q09', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q09',
'OPPORTUNITY_DEAL_MEAN', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEAN', 'CLOSED',
'OPPORTUNITY_DEAL_MEDIAN', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEDIAN', 'DEAL_BY_YEAR'])
ret_df.index.name = 'COMPANY_ID'
if deal_list:
deal_df = pd.DataFrame(deal_list)
deal_df['CLOSED'] = deal_df['CLOSED'].apply(lambda x: 1 if (x == 'Y') else 0)
deal_df['OPPORTUNITY'] = pd.to_numeric(deal_df['OPPORTUNITY'])
deal_df['PROBABILITY'] = pd.to_numeric(deal_df['PROBABILITY'])
deal_df['BEGINDATE'] = pd.to_datetime(deal_df['BEGINDATE'])
deal_df['CLOSEDATE'] = pd.to_datetime(deal_df['CLOSEDATE'])
deal_df['TIME_DIFF_BEGIN_CLOSE'] = (deal_df['CLOSEDATE'] - deal_df['BEGINDATE']).astype(
'timedelta64[h]') / 24
deal_group = deal_df.groupby(by='COMPANY_ID')
deal_count = pd.DataFrame(deal_group['CLOSED'].count())
deal_date_max = deal_group['CLOSEDATE'].max()
deal_date_min = deal_group['BEGINDATE'].min()
d = {'YEAR': (deal_date_max - deal_date_min).astype('timedelta64[h]') / (24 * 365)}
deal_date_max_min_diff = pd.DataFrame(data=d)
deal_by_year = pd.DataFrame()
deal_by_year['DEAL_BY_YEAR'] = (deal_count['CLOSED'] / deal_date_max_min_diff['YEAR']).astype(np.float32)
deal_quantile01 = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE'].quantile(0.1)
deal_quantile09 = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE'].quantile(0.9)
deal_mean = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE', 'CLOSED'].mean()
deal_median = deal_group['OPPORTUNITY', 'TIME_DIFF_BEGIN_CLOSE'].median()
deal_result = pd.merge(deal_quantile01, deal_quantile09, on='COMPANY_ID',
suffixes=['_DEAL_Q01', '_DEAL_Q09'])
deal_result1 = pd.merge(deal_mean, deal_median, on='COMPANY_ID', suffixes=['_DEAL_MEAN', '_DEAL_MEDIAN'])
deal_result = pd.merge(deal_result, deal_result1, on='COMPANY_ID')
deal_result = pd.merge(deal_result, deal_by_year, on='COMPANY_ID')
deal_result = deal_result.mask(np.isinf(deal_result))
ret_df = pd.concat([ret_df, deal_result])
return ret_df
@staticmethod
def _parse_invoices(inv_list):
"""
Создает DataFrame счетов по списку словарей из запроса
:param inv_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'PRICE_INV_Q01', 'TIME_DIFF_PAYED_BILL_INV_Q01', 'TIME_DIFF_PAYBEF_PAYED_INV_Q01',
'PRICE_INV_Q09', 'TIME_DIFF_PAYED_BILL_INV_Q09', 'TIME_DIFF_PAYBEF_PAYED_INV_Q09', 'PRICE_INV_MEAN',
'TIME_DIFF_PAYED_BILL_INV_MEAN', 'TIME_DIFF_PAYBEF_PAYED_INV_MEAN', 'PAYED', 'STATUS_ID_P',
'STATUS_ID_D', 'STATUS_ID_N', 'STATUS_ID_T', 'PRICE_INV_MEDIAN', 'TIME_DIFF_PAYED_BILL_INV_MEDIAN',
'TIME_DIFF_PAYBEF_PAYED_INV_MEDIAN', 'MONTH_TOGETHER_INV', 'DEAL_BY_YEAR'])
ret_df.index.name = 'UF_COMPANY_ID'
if inv_list:
inv_df = pd.DataFrame(inv_list)
inv_df['PRICE'] = pd.to_numeric(inv_df['PRICE'])
inv_df['DATE_BILL'] = pd.to_datetime(inv_df['DATE_BILL'])
inv_df['DATE_PAYED'] = pd.to_datetime(inv_df['DATE_PAYED'])
inv_df['DATE_PAY_BEFORE'] = | pd.to_datetime(inv_df['DATE_PAY_BEFORE']) | pandas.to_datetime |
import pandas as pd
import numpy as np
#import sys
#sys.path.append("F:\3RDSEM\DM\Assignment_1\DM-Project\Assignment-1\Code")
from Utility import getDataFrame
fileNames = ["./../DataFolder/CGMSeriesLunchPat1.csv", "./../DataFolder/CGMSeriesLunchPat2.csv",
"./../DataFolder/CGMSeriesLunchPat3.csv", "./../DataFolder/CGMSeriesLunchPat4.csv",
"./../DataFolder/CGMSeriesLunchPat5.csv"]
def Feature_Extraction(df):
feature_1_df = df.groupby(np.arange(len(df.columns))//6, axis=1).mean()
feature_1_df.columns=['mean_'+str(i+1) for i, column in enumerate(feature_1_df.columns)]
for i, columns in enumerate(feature_1_df.columns):
feature_1_df['shifted_mean' + str(i+1)] = feature_1_df['mean_'+str(i+1)].shift(1)
#==================================================================
local_maxima = []
for i in range(0,len(df.index)):
indices = []
for j in range(0, len(df.columns)-1):
if((df.iloc[i][df.columns[j]] >= df.iloc[i][df.columns[j-1]] and
df.iloc[i][df.columns[j]] > df.iloc[i][df.columns[j+1]]) or
(df.iloc[i][df.columns[j]] > df.iloc[i][df.columns[j-1]] and
df.iloc[i][df.columns[j]] >= df.iloc[i][df.columns[j+1]])
):
indices.append(j)
local_maxima.append(indices)
local_minima = []
for i in range(0,len(df.index)):
indices = []
for j in range(0, len(df.columns)-1):
if((df.iloc[i][df.columns[j]] <= df.iloc[i][df.columns[j-1]] and
df.iloc[i][df.columns[j]] < df.iloc[i][df.columns[j+1]]) or
(df.iloc[i][df.columns[j]] < df.iloc[i][df.columns[j-1]] and
df.iloc[i][df.columns[j]] <= df.iloc[i][df.columns[j+1]])):
indices.append(j)
local_minima.append(indices)
#==================================================================
feature_2 = []
for i,maxima in enumerate(local_maxima):
global_maxima = 0
temp_list = []
for val in maxima:
temp_list.extend(df.iloc[i][:].tolist())
global_maxima = max(df.iloc[i][val], global_maxima)
feature_2.append([global_maxima, (temp_list.index(global_maxima)) // 6 + 1 if temp_list != [] else -1])
feature_2_df = pd.DataFrame(feature_2)
feature_2_df.columns = ['Global_Maximum', 'Global_Maximum_Interval']
#==================================================================
segments = [(i) * 6 for i in range(len(df.columns)//6 + 1)]
feature_3 = []
for i, (maxima, minima) in enumerate(zip(local_maxima, local_minima)):
count_local_maxima_interval = [0] * (len(df.columns)//6)
count_local_minima_interval = [0] * (len(df.columns)//6)
for val in maxima:
for seg in range(1, len(segments)):
if(val > segments[seg-1] and val <= segments[seg]):
count_local_maxima_interval[seg-1] += 1
for val in minima:
for seg in range(1, len(segments)):
if(val > segments[seg-1] and val <= segments[seg]):
count_local_minima_interval[seg-1] += 1
feature_3.append(count_local_maxima_interval + count_local_minima_interval)
feature_3_df = pd.DataFrame(feature_3)
feature_3_df.columns = ["Count_Local_Max_" + str(i) for i in range(1, len(segments))] + \
["Count_Local_Min_" + str(i) for i in range(1, len(segments))]
#==================================================================
segments = [(i) * 6 for i in range(len(df.columns)//6 + 1)]
feature_4 = []
interval = -1
for row, (maxima) in enumerate(local_maxima):
diff_interval = [0] * (len(df.columns)//6)
for val in maxima:
for seg in range(1, len(segments)):
if(val > segments[seg-1] and val <= segments[seg]):
interval = seg-1
break
local_maxima_interval = df.iloc[row][val]
prev = val - 1
prev_local_minimum = 1000
while(prev > segments[interval]):
prev_local_minimum = min(df.iloc[row][prev], prev_local_minimum)
prev -= 1
prev_local_minimum = min(df.iloc[row][prev], prev_local_minimum)
prev_local_minimum %= 1000
diff = local_maxima_interval - prev_local_minimum
diff_interval[interval] = diff
feature_4.append(diff_interval)
feature_4_df = pd.DataFrame(feature_4)
feature_4_df.columns = ["Diff_Local_Max_Min_Interval_" + str(i) for i in range(1, len(segments))]
#==================================================================
segments = [(i) * 6 for i in range(len(df.columns) // 6 + 1)]
feature_5 = {}
for i in range(len(segments) - 1):
df1 = df.iloc[:, segments[i]:segments[i + 1]]
diff1 = df1[df1.columns[::-1]].diff(axis=1)
if 'cgmSeries_30' in diff1.columns:
diff1['cgmSeries_30'].fillna(0, inplace=True)
sum1 = diff1.sum(axis=1)
feature_5[i] = sum1
feature_5_df = pd.DataFrame.from_dict(feature_5)
feature_5_df.columns = ['CGM_Displacement_Interval_' + str(i) for i in range(1, len(segments))]
#==================================================================
segments = [(i) * 6 for i in range(len(df.columns) // 6 + 1)]
feature_6 = {}
for i in range(len(segments) - 1):
df1 = df.iloc[:, segments[i]:segments[i + 1]]
diff1 = df1[df1.columns[::-1]].diff(axis=1)
if 'cgmSeries_30' in diff1.columns:
diff1['cgmSeries_30'].fillna(0, inplace=True)
mean1 = diff1.mean(axis=1)
feature_6[i] = mean1
feature_6_df = pd.DataFrame.from_dict(feature_6)
feature_6_df.columns = ['CGM_Velocity_Interval_' + str(i) for i in range(1, len(segments))]
#==================================================================
final_df = pd.concat([df, feature_1_df, feature_2_df, feature_3_df, feature_4_df, feature_5_df, feature_6_df], axis=1)
return final_df
final_df = | pd.DataFrame() | pandas.DataFrame |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import html
import itertools
import os
import re
import feedparser
import pandas as pd
import requests
import zenhan
BRANCHS = ['jp', 'en', 'ru', 'ko', 'es', 'cn', 'cs',
'fr', 'pl', 'th', 'de', 'it', 'ua', 'pt', 'uo']
currentpath = os.path.dirname(os.path.abspath(__file__))
def get_country_from_code(brt):
if brt.isalpha():
brt = brt.upper()
try:
dictionary = pd.read_csv(
currentpath + "/data/ISO3166-1.CSV"
)
except FileNotFoundError as e:
print(e)
country = dictionary.query('二字 == @brt')
if country.empty:
return "該当する国コードは存在しません"
else:
country = country.values.tolist()
country = itertools.chain(*country)
country = list(country)
return country[0] + "支部はまだ存在しませんよ?"
else:
return "国コードが正しくありません."
def scp_number(msg):
msg = zenhan.z2h(msg.casefold()).replace("-", "").replace("scp", "")
number = re.sub("\\D", "", msg)
if number is (None and ""):
return None
brt = msg.replace(number, "")
if brt == "":
brt = "en"
if brt not in BRANCHS: # 要改良
reply = get_country_from_code(brt)
return reply
try:
dictionary = pd.read_csv(currentpath + "/data/scps.csv", index_col=0)
except FileNotFoundError as e:
print(e)
result = dictionary.query('branches in @brt')
result = result.query('url.str.contains(@number)', engine='python')
result = result[0:1].values.tolist()
result = itertools.chain(*result)
result = list(result)
if len(result) == 0 or number is re.sub("\\D", "", result[0]):
if len(number) > 4:
return None
if "en" in brt:
return("scp-" + str(number) + "はまだ存在しません")
else:
return("scp-" + str(number) + "-" + str(brt) + "はまだ存在しません")
return(result)
def src_tale(msg):
result = pd.DataFrame(columns=['url', 'title', 'author', 'branches'])
try:
dictionary = pd.read_csv(
currentpath +
f"/data/tale.csv",
index_col=0)
except FileNotFoundError as e:
print(e)
'''if brt is not "*":
dictionary = dictionary.query('branches in @brt')'''
dictionary_url = dictionary.query(
'url.str.contains(@msg)', engine='python')
dictionary_title = dictionary.query(
'title.str.contains(@msg)', engine='python')
dictionary_author = dictionary.query(
'author.str.contains(@msg)', engine='python')
result = | pd.concat([dictionary_url, dictionary_title, dictionary_author]) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.