prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from glob import glob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from shutil import copyfile
from utils import create_new_folder_if_it_doesnt_exist
import seaborn as sns
import rasterio as rio
sizes = "32"
class ImageMetrics:
def __init__(self, imagePath, maskPath, savePath):
self.imagePath = imagePath
self.maskPath = maskPath
self.savePath = savePath
def calculate_TP_FP_FN(self, area):
image = rio.open(self.imagePath).read()
meta = rio.open(self.imagePath).meta
mask = rio.open(self.maskPath).read()
TP = image + mask
TP = TP == 256
FP = TP - image
FP = FP == 255
FN = mask - TP
FN = FN == 255
result = {"TP": TP, "FP": FP, "FN": FN}
for key, value in result.items():
create_new_folder_if_it_doesnt_exist(f"{self.savePath}/area_{area}")
with rio.open(f"{self.savePath}/area_{area}/{key}_area_{area}.tif", 'w', **meta) as dst:
dst.write(value.astype("uint8"))
data = self.calculate_pixels(area)
self.plot_hist(data,area)
def calculate_pixels(self,area):
result = {}
for i in ["TP", "FP", "FN"]:
image = rio.open(f"{self.savePath}/area_{area}/{i}_area_{area}.tif").read()
(unique, counts) = np.unique(image, return_counts=True)
result[i] = counts[1]
return result
def plot_hist(self, data,area):
print(list(data.values()))
colors = ["#04d253", "#ed0e0e", "#f7ff0f"]
sns.barplot(list(data.keys()), list(data.values()), palette=colors)
plt.savefig(f"{self.savePath}/area_{area}/hist_area_{area}.png",dpi=90)
class BestOverallResult:
def __init__(self, area1ResultPath, area2ResultPath, area3ResultPath, sizes=[32,64,128,256]):
self.sizes = sizes
self.model1Result = {}
self.better_results = {"size":[],"path":[], "f1":[], "area":[]}
for size in sizes:
self.model1Result[size] = self._get_data_path(path=area1ResultPath, _size=size)
self.model1Result[size].extend(self._get_data_path(path=area1ResultPath, _size=size, network="unet"))
self.model1Result[size].extend(self._get_data_path(path=area2ResultPath, _size=size))
self.model1Result[size].extend(self._get_data_path(path=area2ResultPath, _size=size, network="unet"))
self.model1Result[size].extend(self._get_data_path(path=area3ResultPath, _size=size))
self.model1Result[size].extend(self._get_data_path(path=area3ResultPath, _size=size, network="unet"))
def get_best_overall_result_of_each_test_area(self):
bestTestPath = {32:{},64:{}, 128:{},256:{}}
# loop over keys
for size in self.sizes:
bestResults = {"test1": 0, "test2": 0, "test3": 0}
for i in range(len(self.model1Result[size])):
path = self.model1Result[size][i]
# open tables of model1
df1 = pd.read_csv(path)
area1 = [df1["f1_score"][0]]
area2 = [df1["f1_score"][1]]
area3 = [df1["f1_score"][2]]
if max(area1) > bestResults["test1"]:
bestTestPath[size]["test1"] = path
bestResults["test1"] = max(area1)
self.better_results["size"].append(size)
self.better_results["path"].append(path)
self.better_results["f1"].append(max(area1))
self.better_results["area"].append("test1")
if max(area2) > bestResults["test2"]:
bestTestPath[size]["test2"] = path
bestResults["test2"] = max(area2)
self.better_results["size"].append(size)
self.better_results["path"].append(path)
self.better_results["f1"].append(max(area2))
self.better_results["area"].append("test2")
if max(area3) > bestResults["test3"]:
bestTestPath[size]["test3"] = path
bestResults["test3"] = max(area3)
self.better_results["size"].append(size)
self.better_results["path"].append(path)
self.better_results["f1"].append(max(area3))
self.better_results["area"].append("test3")
df = | pd.DataFrame(self.better_results) | pandas.DataFrame |
import os
from dotenv import load_dotenv
import pandas as pd
# Scrape data from the NRCan program page to find out about investments
def read_dataset(url):
# The table with the data is the first table on the page
df = pd.read_html(url)[0]
df.columns = df.columns.str.replace(' ', '_')
df = (df.pipe(numeric_funding)
.pipe(extract_phase)
.pipe(clean_status_values)
.pipe(extract_project_type)
.assign(Province=lambda x: pd.Categorical(x['Province']),
Status=lambda x: pd.Categorical(x['Status']),
NRCan_Funding_Program=lambda x: pd.Categorical(x['NRCan_Funding_Program'])))
return df
def numeric_funding(df):
df = df.copy()
df['NRCan_Funding'] = (pd.to_numeric(df['NRCan_Funding']
.str.replace('$', '', regex=False)
.str.replace(',', '', regex=False)))
return df
def extract_phase(df):
df = df.copy()
phase = df['NRCan_Funding_Program'].str.extract('(\d)', expand=False)
df['Phase'] = pd.to_numeric(phase)
return df
def clean_status_values(df):
df = df.copy()
df['Status'] = df['Status'].str.casefold()
return df
def extract_project_type(df):
df = df.copy()
types = df['Project'].str.extract("\d\s(\w+)")
df['Project_Type'] = types
return df
if __name__ == '__main__':
# Prime the environment
load_dotenv()
phase1 = read_dataset(os.environ['PHASE1_URL'])
phase2 = read_dataset(os.environ['PHASE2_URL'])
investments = | pd.concat([phase1, phase2]) | pandas.concat |
import os, sys
newsyspath = os.path.realpath(__file__).split('\\')[:-2]
if len(newsyspath) == 0:
newsyspath = os.path.realpath(__file__).split('/')[:-2]
sys.path.append('/'.join(newsyspath))
else:
sys.path.append('\\'.join(newsyspath))
from database.database_schemas import Schemas
from database.dsstox.compounds import Compounds
from database.dsstox.generic_substances import GenericSubstances
from database.session import SQLSession
from database.dsstox.generic_substance_compounds import GenericSubstanceCompounds
import sys
import click
import pandas as pd
from io import StringIO
@click.command()
@click.argument('tsv_input', required=False)
@click.option('-o', default='',
help='output file path in .tsv format')
@click.option('-noerror', is_flag=True, default=True,
help='remove the default error message')
def cli(tsv_input,o,noerror):
### HELP DOCUMENTATION ###
"""
SIDtoCID takes in a .tsv datatable with a dsstox_substance_id column (must be index or first 2 columns).
The dsstox_substance_id column is converted to dsstox_compound_id.
Can use a .tsv file as stdin. Default output is stdout as .tsv.
\n\n
Warning!: column names are needed in the input .tsv! Otherwise first row will be skipped.
-- EXAMPLE I/O TABLES --
INPUT: .tsv file
|DTXSID COLUMN | ENDPOINT COLUMN |\n
----------------------------------\n
| DTXSID123456 | 0 |\n
----------------------------------\n
| DTXSID234567 | 1 |\n
----------------------------------\n
| DTXSID345678 | 0 |\n
----------------------------------\n
EXPORT: .tsv file
|DTXCID COLUMN | ENDPOINT COLUMN |\n
----------------------------------\n
| DTXCID891011 | 0 |\n
----------------------------------\n
| DTXCID910111 | 1 |\n
----------------------------------\n
| DTXCID101112 | 0 |\n
----------------------------------\n
"""
# creates table of .tsv file
# takes stdin if argument is not directly given
if not tsv_input:
tsv_input = sys.stdin.read()
mytable = pd.read_csv(StringIO(tsv_input), sep="\t")
elif tsv_input:
mytable = pd.read_csv(tsv_input, sep="\t")
#checks the index, and first two columns for DTXSIDs
#input table should be in the correct format already
try:
if mytable.iloc[0,0][0:6] == 'DTXSID':
idrow = mytable.iloc[:,0]
colname = mytable.columns.values[0]
except:
pass
try:
if mytable.iloc[0,1][0:6] == 'DTXSID':
idrow = mytable.iloc[:,1]
colname = mytable.columns.values[0]
except:
pass
try:
if mytable.index.values[0][0:6] == 'DTXSID':
idrow = mytable.index.values
mytable.index.name = 'DTXSID'
colname = mytable.index.name
except:
pass
# drop empty columns
mytable = mytable.dropna(axis='columns', how='all')
# click.echo(mytable.columns.values)
#make an SQL query table for relevant SIDs & CIDs
mysession = SQLSession(Schemas.dsstox_schema).get_session()
query = mysession.query(GenericSubstances.dsstox_substance_id, Compounds.dsstox_compound_id).join(GenericSubstanceCompounds) \
.join(Compounds)
df = pd.DataFrame(list(query))
idrow = pd.DataFrame(idrow)
idrow.columns = ['dsstox_substance_id']
df = pd.merge(idrow, df, on='dsstox_substance_id', how='inner')
#if no DTXCIDs returned
if df.empty and noerror:
click.secho("Error: No valid DTXSIDs or no associated DTXCIDs\n{}".format(list(idrow)), fg='red', bold=True)
sys.exit(1)
elif df.empty:
sys.exit(1)
#creates new CID table
mytable = mytable.rename(columns={colname : "dsstox_substance_id"})
mytable = | pd.merge(df, mytable, on='dsstox_substance_id') | pandas.merge |
from pytwanalysis.py_twitter_db import TwitterDB
from pytwanalysis.py_twitter_graphs import TwitterGraphs
from pytwanalysis.py_twitter_topics import TwitterTopics
#from pyTwitterGraphAnalysis import tw_graph
#from pyTwitterDB import tw_database
#from pyTwitterTopics import tw_topics
from pymongo import MongoClient
import networkx as nx
import numpy as np
import os
import datetime
import csv
import pandas as pd
import matplotlib.pyplot as plt
import time
import warnings
warnings.filterwarnings("ignore")
MIN_NO_OF_NODES_TO_REDUCE_GRAPH = 100
class TwitterAnalysis(TwitterGraphs, TwitterDB, TwitterTopics):
"""
Main class - It inherits TwitterGraphs, TwitterDB, and TwitterTopics classes.
"""
def __init__(
self,
base_folder_path,
mongoDB_database):
TwitterGraphs.__init__(self, base_folder_path)
TwitterDB.__init__(self, mongoDB_database)
TwitterTopics.__init__(self, base_folder_path, mongoDB_database)
self.type_of_graph = 'user_conn_all'
self.is_bot_Filter = None
self.period_arr = None
self.create_nodes_edges_files_flag = 'Y'
self.create_graphs_files_flag ='Y'
self.create_topic_model_files_flag = 'Y'
self.create_ht_frequency_files_flag = 'Y'
self.create_words_frequency_files_flag = 'Y'
self.create_timeseries_files_flag = 'Y'
self.create_top_nodes_files_flag = 'Y'
self.create_community_files_flag = 'N'
self.create_ht_conn_files_flag = 'Y'
self.num_of_topics = 4
self.top_no_word_filter = None
self.top_ht_to_ignore = None
self.graph_plot_cutoff_no_nodes = 500
self.graph_plot_cutoff_no_edges = 2000
self.create_graph_without_node_scale_flag = 'N'
self.create_graph_with_node_scale_flag = 'Y'
self.create_reduced_graph_flag = 'Y'
self.reduced_graph_comty_contract_per = 90
self.reduced_graph_remove_edge_weight = None
self.reduced_graph_remove_edges = 'Y'
self.top_degree_start = 1
self.top_degree_end = 10
self.period_top_degree_start = 1
self.period_top_degree_end = 5
self.commty_edge_size_cutoff = 200
self.user_conn_filter = None
self.edge_prefix_str = 'UserConnections_'
#####################################
# Method: setConfigs
# Description: Configure objects settings
def setConfigs(
self,
type_of_graph='user_conn_all',
is_bot_Filter=None,
period_arr=None,
create_nodes_edges_files_flag='Y',
create_graphs_files_flag='Y',
create_topic_model_files_flag='Y',
create_ht_frequency_files_flag='Y',
create_words_frequency_files_flag='Y',
create_timeseries_files_flag='Y',
create_top_nodes_files_flag = 'Y',
create_community_files_flag = 'N',
create_ht_conn_files_flag='Y',
num_of_topics=4,
top_no_word_filter=None,
top_ht_to_ignore=None,
graph_plot_cutoff_no_nodes=500,
graph_plot_cutoff_no_edges=2000,
create_graph_without_node_scale_flag='N',
create_graph_with_node_scale_flag='Y',
create_reduced_graph_flag='Y',
reduced_graph_comty_contract_per=90,
reduced_graph_remove_edge_weight=None,
reduced_graph_remove_edges='Y',
top_degree_start=1,
top_degree_end=10,
period_top_degree_start=1,
period_top_degree_end=5,
commty_edge_size_cutoff=200):
"""
Configure the current object settings to drive the automation of the analysis files
Parameters
----------
type_of_graph : (Optional)
This setting defines the type of graph to analyze. Six different options are available: user_conn_all, user_conn_retweet, user_conn_quote, user_conn_reply, user_conn_mention, and ht_conn.
(Default='user_conn_all')
is_bot_Filter : (Default=None)
period_arr : (Optional)
An array of start and end dates can be set so that the pipeline creates a separate analysis folder for each of the periods in the array. (Default=None)
create_nodes_edges_files_flag : (Optional)
If this setting is set to 'Y', the pipeline will create two files for each graph and sub-graph. One file with the edge list, and one with the node list and their respective degree.(Default='Y')
create_graphs_files_flag : (Optional)
If this setting is set to 'Y', the pipeline will plot the graph showing all the connections.
(Default='Y')
create_topic_model_files_flag : (Optional)
If this setting is set to 'Y', the pipeline will create topic discovery related files for each folder. It will create a text file with all the tweets that are part of that folder, it will also train a LDA model based on the tweets texts and plot a graph with the results.
(Default='Y')
create_ht_frequency_files_flag : (Optional)
If this setting is set to 'Y', the pipeline will create hashtag frequency files for each folder. It will create a text file with the full list of hashtags and their frequency, a wordcloud showing the most frequently used hashtags, and barcharts showing the top 30 hashtags.
(Default='y')'
create_words_frequency_files_flag : (Optional)
If this setting is set to 'Y', the pipeline will create word frequency files for each folder. It will create a text file with a list of words and their frequency, a wordcloud showing the most frequently used words, and barcharts showing the top 30 words.
(Default='Y')
create_timeseries_files_flag : (Optional)
If this setting is set to 'Y', the pipeline will create timeseries graphs for each folder representing the tweet count by day, and the top hashtags frequency count by day.
(Default='Y')
create_top_nodes_files_flag : (Optional)
If this setting is set to 'Y', the pipeline will create separate analysis folders for all the top degree nodes.
(Default='Y')
create_community_files_flag : (Optional)
If this setting is set to 'Y', the pipeline will use the louvain method to assign each node to a community. A separate folder for each of the communities will be created with all the analysis files.
(Default='N')
create_ht_conn_files_flag : (Optional)
If this setting is set to 'Y', the pipeline will plot hashtag connections graphs. This can be used when user connections are being analyzed, but it could still be interesting to see the hashtags connections made by that group of users.
(Default='Y')
num_of_topics : (Optional)
If the setting *CREATE_TOPIC_MODEL_FILES_FLAG* was set to 'Y', then this number will be used to send as input to the LDA model. If no number is given, the pipeline will use 4 as the default value.
(Default=4)
top_no_word_filter : (Optional)
If the setting *CREATE_WORDS_FREQUENCY_FILES_FLAG* was set to 'Y', then this number will be used to decide how many words will be saved in the word frequency list text file. If no number is given, the pipeline will use 5000 as the default value.
(Default=None)
top_ht_to_ignore : (Optional)
If the setting *CREATE_HT_CONN_FILES_FLAG* was set to 'Y', then this number will be used to choose how many top hashtags can be ignored. Sometimes ignoring the main hashtag can be helpful in visualizations to discovery other interesting structures within the graph.
(Default=None)
graph_plot_cutoff_no_nodes : (Optional)
Used with the graph_plot_cutoff_no_edges parameter. For each graph created, these numbers will be used as cutoff values to decide if a graph is too large to be plot or not. Choosing a large number can result in having the graph to take a long time to run. Choosing a small number can result in graphs that are too reduced and with little value or even graphs that can't be printed at all because they can't be reduce further.
(Default=500)
graph_plot_cutoff_no_edges : (Optional)
Used with the graph_plot_cutoff_no_nodes parameter. For each graph created, these numbers will be used as cutoff values to decide if a graph is too large to be plot or not. Choosing a large number can result in having the graph to take a long time to run. Choosing a small number can result in graphs that are too reduced and with little value or even graphs that can't be printed at all because they can't be reduce further.
(Default=2000)
create_graph_without_node_scale_flag : (Optional)
For each graph created, if this setting is set to 'Y', the pipeline will try to plot the full graph with no reduction and without any logic for scaling the node size.
(Default='N')
create_graph_with_node_scale_flag : (Optional)
For each graph created, if this setting is set to 'Y', the pipeline will try to plot the full graph with no reduction, but with additional logic for scaling the node size.
(Default='Y')
create_reduced_graph_flag : (Optional)
For each graph created, if this setting is set to 'Y', the pipeline will try to plot the reduced form of the graph.
(Default='Y')
reduced_graph_comty_contract_per : (Optional)
If the setting *CREATE_REDUCED_GRAPH_FLAG* was set to 'Y', then this number will be used to reduce the graphs by removing a percentage of each community found in that particular graph. The logic can be run multiple times with different percentages. For each time, a new graph file will be saved with a different name according to the parameter given.
(Default=90)
reduced_graph_remove_edge_weight : (Optional)
If the setting *CREATE_REDUCED_GRAPH_FLAG* was set to 'Y', then this number will be used to reduce the graphs by removing edges that have weights smaller then this number. The logic can be run multiple times with different percentages. For each time, a new graph file will be saved with a different name according to the parameter given.
(Default=None)
reduced_graph_remove_edges : (Optional)
If this setting is set to 'Y', and the setting *CREATE_REDUCED_GRAPH_FLAG was set to 'Y', then the pipeline will continuously try to reduce the graphs by removing edges of nodes with degrees smaller than this number. It will stop the graph reduction once it hits the the values set int the GRAPH_PLOT_CUTOFF parameters.
(Default='Y')
top_degree_start : (Optional)
If the setting *CREATE_TOP_NODES_FILES_FLAG* was set to 'Y', then these numbers will define how many top degree node sub-folders to create.
(Default=1)
top_degree_end : (Optional)
If the setting *CREATE_TOP_NODES_FILES_FLAG* was set to 'Y', then these numbers will define how many top degree node sub-folders to create.
(Default=10)
period_top_degree_start : (Optional)
If the setting *CREATE_TOP_NODES_FILES_FLAG* was set to 'Y', then these numbers will define how many top degree node sub-folders for each period to create.
(Default=1)
period_top_degree_end : (Optional)
If the setting *CREATE_TOP_NODES_FILES_FLAG* was set to 'Y', then these numbers will define how many top degree node sub-folders for each period to create.
(Default=5)
commty_edge_size_cutoff : (Optional)
If the setting textit{CREATE_COMMUNITY_FILES_FLAG} was set to 'Y', then this number will be used as the community size cutoff number. Any communities that have less nodes then this number will be ignored. If no number is given, the pipeline will use 200 as the default value.
(Default=200)
Examples
--------
...:
>>> setConfigs(type_of_graph=TYPE_OF_GRAPH,
>>> is_bot_Filter=IS_BOT_FILTER,
>>> period_arr=PERIOD_ARR,
>>> create_nodes_edges_files_flag=CREATE_NODES_EDGES_FILES_FLAG,
>>> create_graphs_files_flag=CREATE_GRAPHS_FILES_FLAG,
>>> create_topic_model_files_flag=CREATE_TOPIC_MODEL_FILES_FLAG,
>>> create_ht_frequency_files_flag=CREATE_HT_FREQUENCY_FILES_FLAG,
>>> create_words_frequency_files_flag=CREATE_WORDS_FREQUENCY_FILES_FLAG,
>>> create_timeseries_files_flag=CREATE_TIMESERIES_FILES_FLAG,
>>> create_top_nodes_files_flag=CREATE_TOP_NODES_FILES_FLAG,
>>> create_community_files_flag=CREATE_COMMUNITY_FILES_FLAG,
>>> create_ht_conn_files_flag=CREATE_HT_CONN_FILES_FLAG,
>>> num_of_topics=NUM_OF_TOPICS,
>>> top_no_word_filter=TOP_NO_WORD_FILTER,
>>> top_ht_to_ignore=TOP_HT_TO_IGNORE,
>>> graph_plot_cutoff_no_nodes=GRAPH_PLOT_CUTOFF_NO_NODES,
>>> graph_plot_cutoff_no_edges=GRAPH_PLOT_CUTOFF_NO_EDGES,
>>> create_graph_without_node_scale_flag=CREATE_GRAPH_WITHOUT_NODE_SCALE_FLAG,
>>> create_graph_with_node_scale_flag=CREATE_GRAPH_WITH_NODE_SCALE_FLAG,
>>> create_reduced_graph_flag=CREATE_REDUCED_GRAPH_FLAG,
>>> reduced_graph_comty_contract_per=REDUCED_GRAPH_COMTY_PER,
>>> reduced_graph_remove_edge_weight=REDUCED_GRAPH_REMOVE_EDGE_WEIGHT,
>>> reduced_graph_remove_edges=REDUCED_GRAPH_REMOVE_EDGES_UNTIL_CUTOFF_FLAG,
>>> top_degree_start=TOP_DEGREE_START,
>>> top_degree_end=TOP_DEGREE_END,
>>> period_top_degree_start=PERIOD_TOP_DEGREE_START,
>>> period_top_degree_end=PERIOD_TOP_DEGREE_END,
>>> commty_edge_size_cutoff=COMMTY_EDGE_SIZE_CUTOFF
>>> )
"""
self.type_of_graph = type_of_graph
self.is_bot_Filter = is_bot_Filter
self.period_arr = period_arr
self.create_nodes_edges_files_flag = create_nodes_edges_files_flag
self.create_graphs_files_flag = create_graphs_files_flag
self.create_topic_model_files_flag = create_topic_model_files_flag
self.create_ht_frequency_files_flag = create_ht_frequency_files_flag
self.create_words_frequency_files_flag = create_words_frequency_files_flag
self.create_timeseries_files_flag = create_timeseries_files_flag
self.create_top_nodes_files_flag = create_top_nodes_files_flag
self.create_community_files_flag = create_community_files_flag
self.create_ht_conn_files_flag = create_ht_conn_files_flag
self.num_of_topics = num_of_topics
self.top_no_word_filter = top_no_word_filter
self.top_ht_to_ignore = top_ht_to_ignore
self.graph_plot_cutoff_no_nodes = graph_plot_cutoff_no_nodes
self.graph_plot_cutoff_no_edges = graph_plot_cutoff_no_edges
self.create_graph_without_node_scale_flag = create_graph_without_node_scale_flag
self.create_graph_with_node_scale_flag = create_graph_with_node_scale_flag
self.create_reduced_graph_flag = create_reduced_graph_flag
self.reduced_graph_comty_contract_per = reduced_graph_comty_contract_per
self.reduced_graph_remove_edge_weight = reduced_graph_remove_edge_weight
self.reduced_graph_remove_edges = reduced_graph_remove_edges
self.top_degree_start = top_degree_start
self.top_degree_end = top_degree_end
self.period_top_degree_start = period_top_degree_start
self.period_top_degree_end = period_top_degree_end
self.commty_edge_size_cutoff = commty_edge_size_cutoff
if self.type_of_graph == 'user_conn_all':
self.edge_prefix_str = 'UserConnections_'
elif self.type_of_graph == 'user_conn_mention':
self.edge_prefix_str = 'MentionUserConnections_'
self.user_conn_filter = 'mention'
elif self.type_of_graph == 'user_conn_retweet':
self.edge_prefix_str = 'RetweetUserConnections_'
self.user_conn_filter = 'retweet'
elif self.type_of_graph == 'user_conn_reply':
self.edge_prefix_str = 'ReplyUserConnections_'
self.user_conn_filter = 'reply'
elif self.type_of_graph == 'user_conn_quote':
self.edge_prefix_str = 'QuoteUserConnections_'
self.user_conn_filter = 'quote'
elif self.type_of_graph == 'ht_conn':
self.edge_prefix_str = 'HTConnection_'
self.export_type = 'ht_edges'
#####################################
# Method: create_path
# Description: creates a path to add the files for this node
def create_path(self, path):
if not os.path.exists(path):
os.makedirs(path)
#####################################
# Method: get_now_dt
# Description: returns formated current timestamp to be printed
def get_now_dt(self):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#####################################
# Method: concat_edges
# Description: aux function to concatenate edges to help filter in mongoDB
def concat_edges(self, G):
"""
Aux function to concatenate edges to help filter in mongoDB
Parameters
----------
G :
undirected networkx graph created from the Twitter data
Returns
-------
arr_edges
the array with the concatenatd edges
Examples
--------
Create an array of concatenated edges from a networkx graph:
>>> arr_edges = concat_edges(G)
"""
arr_edges = []
for u,v,a in G.edges(data=True):
arr_edges.append(u.lower() + '-' +v.lower())
arr_edges.append(v.lower() + '-' +u.lower())
return arr_edges
#####################################
# Method: build_db_collections
# Description: Call methods to create all collections in mongoDB
def build_db_collections(self, inc=100000, bots_ids_list_file=None):
"""
This method is in charge of extracting, cleaning, and loading the data
into all the collections in MongoDB.
Parameters
----------
inc : (Optional)
used to determine how many tweets will be processed at a time - (Default=100000).
A large number may cause out of memory errors, and a low number may take a long time to run,
so the decision of what number to use should be made based on the hardware specification.
bots_ids_list_file : (Optional)
a file that contains a list of user ids that are bots.
It creates flags in the MongoDB collection to indentify
which tweets and user are in the bots list. - (Default=None)
Examples
--------
Load all data into all collections in MongoDB:
>>> inc = 50000
>>> build_db_collections(inc)
"""
### Loading Focused Data into MongoDB
self.loadFocusedData(inc)
### Loading user information to collection
# Loading user information for the actual tweet document
self.loadUsersData(inc, 'tweet')
# Loading user information for the original tweet in case of retweets
self.loadUsersData(inc, 'retweet')
# Loading user information for the quoted tweet
self.loadUsersData(inc, 'quote')
# Loading user information for replies -
# (in this case we we don't have full information about the user. Only screen_name and user_id)
self.loadUsersData(inc, 'reply')
# Loading user information for mention -
# (in this case we we don't have full information about the user. Only screen_name and sometimes user_id)
self.loadUsersData(inc, 'mention')
### Breaking tweets into Words
self.loadWordsData(inc)
### Loading tweet connections -
# These are the edges formed between users by replies, retweets, quotes and mentions
self.loadTweetConnections(inc)
### Loading tweet hashtag connections -
# These are the edges formed between hash tags being used together in the same tweet
self.loadTweetHTConnections(inc)
#####
### loading aggregate collections
self.loadAggregations('tweetCountByFile')
self.loadAggregations('tweetCountByLanguageAgg')
self.loadAggregations('tweetCountByMonthAgg')
# Loading bots list from file - (List of user ids that are bots)
# SKIP this step if you don't have a bots list
if bots_ids_list_file is not None:
bots_list_id_str = []
with open(bots_ids_list_file,'r') as f:
for line in f:
line = line.rstrip("\n")
bots_list_id_str.append(line)
self.set_bot_flag_based_on_arr(bots_list_id_str, 10000)
#####################################
# Method: plot_graph_contracted_nodes
# Description: aux function to plot graph.
# This steps repets in different parts of this code, so creating a function to avoid repetition
def plot_graph_contracted_nodes(self, G, file):
"""
Method to compress and plot graph based on the graph reduction
settings that can be updated using the *setConfigs* method.
Parameters
----------
G :
undirected networkx graph created from the Twitter data
file :
the path and name of the graph you want to save
Example
--------
>>> plot_graph_contracted_nodes(G, 'c:\\Data\\MyGraph.png')
"""
G2 = G.copy()
if len(G2.nodes()) > MIN_NO_OF_NODES_TO_REDUCE_GRAPH:
contraction_name = ''
print("Graph to plot before changes: nodes=" + str(len(G2.nodes)) + " edges=" + str(len(G2.edges)))
#in case we want to reduce the graph with edges of weight less than a cutoff number
if self.reduced_graph_remove_edge_weight is not None:
#find list of edges that have that weigh cutoff
edges_to_remove = [edge for edge in list(G2.edges(data=True)) if edge[2]['weight'] <= self.reduced_graph_remove_edge_weight]
#remove edges for the list
G2.remove_edges_from(edges_to_remove)
#get the largest connected component
G2 = self.largest_component_no_self_loops(G2)
contraction_name = contraction_name + "[RemEdgeWeightLessThan" + str(self.reduced_graph_remove_edge_weight) + "]"
#reduce graph based on a percentage of the nodes for each community
if self.reduced_graph_comty_contract_per is not None and len(G2.nodes()) > MIN_NO_OF_NODES_TO_REDUCE_GRAPH:
att = 'community_louvain'
G2 = self.contract_nodes_commty_per(G2, self.reduced_graph_comty_contract_per, att)
G2 = self.largest_component_no_self_loops(G2)
contraction_name = contraction_name + "[RemPercOfComty=" + str(self.reduced_graph_comty_contract_per) + "]"
#In case we want to continue to remove until we get to a cutoff number, another level of contraction
if self.reduced_graph_remove_edges == 'Y' and len(G2.nodes()) > MIN_NO_OF_NODES_TO_REDUCE_GRAPH:
if len(G2.edges()) > 100000:
cutoff_no = 3
G2 = self.remove_edges_eithernode(G2, cutoff_no)
contraction_name = contraction_name + '[RemEdgeEitherNodeDegCutoff=' + str(cutoff_no) + ']'
cutoff_no = 5
if (len(G2.nodes()) > self.graph_plot_cutoff_no_nodes) or (len(G2.edges()) > self.graph_plot_cutoff_no_edges):
while (len(G2.nodes()) > self.graph_plot_cutoff_no_nodes) or (len(G2.edges()) > self.graph_plot_cutoff_no_edges):
G2 = self.remove_edges(G2, cutoff_no)
if len(G2.nodes()) > 0:
G2 = self.largest_component_no_self_loops(G2)
if cutoff_no < 150:
cutoff_no += 10
elif cutoff_no < 1000:
cutoff_no += 100
elif cutoff_no < 10000:
cutoff_no += 500
else:
cutoff_no += 1000
contraction_name = contraction_name + '[RemEdgeBothNodesDegLessThan=' + str(cutoff_no) + ']'
#set up final file name with reduction parameters
file = file.replace('.', contraction_name + '.')
#get largest connected component after all removals
if len(G2.edges()) > 0:
G2 = self.largest_component_no_self_loops(G2)
#find best settings for the graphs depending on size. You can change these to get better graphs
if len(G2.edges()) < 450:
v_scale = 0.01; v_k =0.7; v_iterations=50; v_node_size=2
elif len(G2.edges()) < 5000:
v_scale = 2; v_k = 0.6; v_iterations=200; v_node_size=0.8
elif len(G2.edges()) < 10000:
v_scale = 1; v_k = 0.1; v_iterations=200; v_node_size=0.6
elif len(G2.edges()) >= 10000:
v_scale = 1; v_k = 0.05; v_iterations=500; v_node_size=0.6
print("Graph to plot after changes: nodes=" + str(len(G2.nodes)) + " edges=" + str(len(G2.edges)))
if (len(G2.nodes()) < self.graph_plot_cutoff_no_nodes and len(G2.edges()) < self.graph_plot_cutoff_no_edges) and len(G2.edges()) != 0:
if not os.path.exists(file):
G_to_plot, labels2, k = self.calculate_louvain_clustering(G2)
self.plotSpringLayoutGraph(G_to_plot,
file,
v_scale,
v_k,
v_iterations,
cluster_fl='Y',
v_labels=list(list(labels2)),
replace_existing_file=False)
#####################################
# Method: export_mult_types_edges_for_input
# Description: export edges that will be used to create graphs
# User can choose only one type of graph to export the edges, or export them all
def export_mult_types_edges_for_input(self, period_arr=None, bot_filter_fl='N', type_of_graph='all'):
"""
This method will export edges from mongoDB data that can be used to create graphs.
The user can choose only one type of graph to export the edges, or export them all
Parameters
----------
period_arr : (Optional)
An array with showing the different periods to be analyzed separatly in the data.
(Default = None)
bot_filter_fl : (Optional)
A flag to identify if you want to create extra edge files separating tweets by bots or not.
This option is only available when the bot flag was updated in mongoDB using method set_bot_flag_based_on_arr.
(Default='N')
type_of_graph : (Optional)
the type of graph to export the edges for.
Available options: user_conn_all, user_conn_mention,
user_conn_retweet, user_conn_reply, user_conn_quote, ht_conn, or all
(Default='all')
Example
--------
>>> # Set up the periods you want to analyse
>>> # Set period_arr to None if you don't want to analyze separate periods
>>> # Format: Period Name, Period Start Date, Period End Date
>>> period_arr = [['P1', '10/08/2017 00:00:00', '10/15/2017 00:00:00'],
>>> ['P2', '01/21/2018 00:00:00', '02/04/2018 00:00:00'],
>>> ['P3', '02/04/2018 00:00:00', '02/18/2018 00:00:00'],
>>> ['P4', '02/18/2018 00:00:00', '03/04/2018 00:00:00']]
>>>
>>>
>>> ## TYPE OF GRAPH EDGES
>>> ########################################################
>>> # You can export edges for one type, or for all
>>> # Options: user_conn_all, --All user connections
>>> # user_conn_mention, --Only Mentions user connections
>>> # user_conn_retweet, --Only Retweets user connections
>>> # user_conn_reply, --Only Replies user connections
>>> # user_conn_quote, --Only Quotes user connections
>>> # ht_conn --Hashtag connects - (Hashtgs that wereused together)
>>> # all --It will export all of the above options
>>>
>>> TYPE_OF_GRAPH = 'all'
>>>
>>> export_mult_types_edges_for_input(period_arr=period_arr, type_of_graph=TYPE_OF_GRAPH)
"""
if type_of_graph == 'all' or type_of_graph == 'user_conn_all':
self.export_all_edges_for_input(period_arr, bot_filter_fl, type_of_graph='user_conn_all')
if type_of_graph == 'all' or type_of_graph == 'user_conn_mention':
self.export_all_edges_for_input(period_arr, bot_filter_fl, type_of_graph='user_conn_mention')
if type_of_graph == 'all' or type_of_graph == 'user_conn_retweet':
self.export_all_edges_for_input(period_arr, bot_filter_fl, type_of_graph='user_conn_retweet')
if type_of_graph == 'all' or type_of_graph == 'user_conn_reply':
self.export_all_edges_for_input(period_arr, bot_filter_fl, type_of_graph='user_conn_reply')
if type_of_graph == 'all' or type_of_graph == 'user_conn_quote':
self.export_all_edges_for_input(period_arr, bot_filter_fl, type_of_graph='user_conn_quote')
if type_of_graph == 'all' or type_of_graph == 'ht_conn':
self.export_all_edges_for_input(period_arr, bot_filter_fl, type_of_graph='ht_conn')
#####################################
# Method: export_all_edges_for_input
# Description: export edges that will be used to create graphs
def export_all_edges_for_input(self, period_arr=None, bot_filter_fl='N', type_of_graph='user_conn_all'):
# Creates path to add the edge files to be used as input
input_files_path = self.folder_path + '\\data_input_files'
self.create_path(input_files_path)
#
edge_prefix_str = ''
user_conn_filter = None
export_type = 'edges'
if type_of_graph == 'user_conn_all':
edge_prefix_str = 'UserConnections_'
elif type_of_graph == 'user_conn_mention':
edge_prefix_str = 'MentionUserConnections_'
user_conn_filter = 'mention'
elif type_of_graph == 'user_conn_retweet':
edge_prefix_str = 'RetweetUserConnections_'
user_conn_filter = 'retweet'
elif type_of_graph == 'user_conn_reply':
edge_prefix_str = 'ReplyUserConnections_'
user_conn_filter = 'reply'
elif type_of_graph == 'user_conn_quote':
edge_prefix_str = 'QuoteUserConnections_'
user_conn_filter = 'quote'
elif type_of_graph == 'ht_conn':
edge_prefix_str = 'HTConnection_'
export_type = 'ht_edges'
print("** exporting edges - Graph type=" + type_of_graph )
# Export ALL edges for ALL periods
print("** exporting edges for AllPeriods " + self.get_now_dt())
self.exportData(export_type,
input_files_path + '\\' + edge_prefix_str + 'AllPeriods_',
0,
user_conn_filter=user_conn_filter,
replace_existing_file=False)
if bot_filter_fl == 'Y':
# Export edges for ALL periods, excluding edges associated with bots
print("** exporting edges for AllPeriods_ExcludingBots - " + self.get_now_dt())
self.exportData(export_type,
input_files_path + '\\' + edge_prefix_str + 'AllPeriods_ExcludingBots_',
0,
is_bot_Filter = '0',
user_conn_filter=user_conn_filter,
replace_existing_file=False)
# Export edges for ALL periods, only edges associated with bots
print("** exporting edges for AllPeriods_BotsOnly - " + self.get_now_dt())
self.exportData(export_type,
input_files_path + '\\' + edge_prefix_str + 'AllPeriods_BotsOnly_',
0,
is_bot_Filter = '1',
user_conn_filter=user_conn_filter,
replace_existing_file=False)
# Export edges by period using the dates set on array period_arr
if period_arr is not None:
for idx, period in enumerate(period_arr):
# Export ALL edges for this period
print("** exporting edges for " + period[0] + " - " + self.get_now_dt())
edges = self.exportData(export_type,
input_files_path + '\\' + edge_prefix_str + '' + period[0] + '_', 0,
startDate_filter=period[1],
endDate_filter=period[2],
is_bot_Filter=None,
user_conn_filter=user_conn_filter,
replace_existing_file=False)
if bot_filter_fl == 'Y':
# Export edges for this period, excluding edges associated with bots
print("** exporting edges for " + period[0] + "_ExcludingBots - " + self.get_now_dt())
edges = self.exportData(export_type,
input_files_path + '\\' + edge_prefix_str + '' + period[0] + '_ExcludingBots_', 0,
startDate_filter=period[1],
endDate_filter=period[2],
is_bot_Filter='0',
user_conn_filter=user_conn_filter,
replace_existing_file=False)
# Export edges for this period, only edges associated with bots
print("** exporting edges for " + period[0] + "_BotsOnly - " + self.get_now_dt())
edges = self.exportData(export_type,
input_files_path + '\\' + edge_prefix_str + '' + period[0] + '_BotsOnly_',
0,
startDate_filter=period[1],
endDate_filter=period[2],
is_bot_Filter='1',
user_conn_filter=user_conn_filter,
replace_existing_file=False)
print("** exporting edges - END *** - " + self.get_now_dt())
#####################################
# Method: nodes_edges_analysis_files
# Description: creates nodes and edges files
def nodes_edges_analysis_files(self, G, path):
"""
Given a graph G, it exports nodes with they degree, edges with their weight,
and word clouds representing the nodes scaled by their degree
Parameters
----------
G :
undirected networkx graph created from the Twitter data
path :
the path where the files should be saved
Examples
--------
Saved node and edges files into path:
>>> nodes_edges_analysis_files(G, 'C:\\Data\\MyFilePath')
"""
print("****** Exporting nodes and edges to file - " + self.get_now_dt())
self.export_nodes_edges_to_file(G, path + "\\G_NodesWithDegree.txt", path + "\\G_Edges.txt")
print("****** Ploting Nodes Wordcloud - " + self.get_now_dt())
node_file_name = path + '\\G_NodesWithDegree.txt'
df = self.read_freq_list_file(node_file_name,' ')
self.plot_word_cloud(df, file=path +'\\G_Nodes_WordCloud.png')
print("\n")
#####################################
# Method: lda_analysis_files
# Description: creates topic model files
# tweet texts, lda model visualization
def lda_analysis_files(self, path, startDate_filter=None, endDate_filter=None, arr_edges=None, arr_ht_edges=None):
"""
Creates topic model files. Export a files with tweet texts and a lda model visualization.
The data comes from the mongoDB database and is filtered based on the parameters.
Parameters
----------
path :
the path where the files should be saved
startDate_filter : (Optional)
filter by a certain start date
endDate_filter : (Optional)
filter by a certain end date
arr_edges : (Optional)
and array of concatenated edges that will be used to filter certain connection only.
the method concat_edges can be used to create that array.
arr_ht_edges : (Optional)
and array of concatenated hashtag edges that will be used to filter certain ht connection only.
the method concat_edges can be used to create that array.
Examples
--------
Save lda analysis files into path:
>>> lda_analysis_files(
>>> 'D:\\Data\\MyFiles',
>>> startDate_filter='09/20/2020 19:00:00',
>>> endDate_filter='03/04/2021 00:00:00')
"""
#export text for topic analysis
print("****** Exporting text for topic analysis - " + self.get_now_dt())
self.exportData('text_for_topics',
path + "\\" , 0,
startDate_filter,
endDate_filter,
self.is_bot_Filter,
arr_edges,
arr_ht_edges=arr_ht_edges,
replace_existing_file=False)
# Train LDA models and print topics
print("****** Topic discovery analysis (lda model) ****** - " + self.get_now_dt())
model_name = "Topics"
topics_file_name = path + '\\T_tweetTextsForTopics.txt'
if not os.path.exists(path + '\\Topics-(LDA model).png'):
self.train_model_from_file(topics_file_name, self.num_of_topics, model_name, model_type='lda')
self.plot_topics(path + '\\Topics-(LDA model).png', self.num_of_topics, 'lda', replace_existing_file=False)
#####################################
# Method: ht_analysis_files
# Description: creates hashtag frequency files
# frequency file text, wordcloud, and barcharts
def ht_analysis_files(self, path, startDate_filter=None, endDate_filter=None, arr_edges=None, arr_ht_edges=None):
"""
Creates hashtag frequency files. Frequency text file, wordcloud, and barcharts.
The data comes from the mongoDB database and is filtered based on the parameters.
Parameters
----------
path :
the path where the files should be saved
startDate_filter : (Optional)
filter by a certain start date
endDate_filter : (Optional)
filter by a certain end date
arr_edges : (Optional)
and array of concatenated edges that will be used to filter certain connection only.
the method concat_edges can be used to create that array.
arr_ht_edges : (Optional)
and array of concatenated hashtag edges that will be used to filter certain ht connection only.
the method concat_edges can be used to create that array.
Examples
--------
Save hashtag frequency files into path:
>>> ht_analysis_files(
>>> 'D:\\Data\\MyFiles',
>>> startDate_filter='09/20/2020 19:00:00',
>>> endDate_filter='03/04/2021 00:00:00')
"""
#export ht frequency list
print("\n****** Exporting ht frequency list - " + self.get_now_dt())
self.exportData('ht_frequency_list',
path + "\\" , 0,
startDate_filter,
endDate_filter,
self.is_bot_Filter,
arr_edges,
arr_ht_edges=arr_ht_edges,
replace_existing_file=False)
print("****** Ploting HashTags Barchart and Wordcloud - " + self.get_now_dt())
ht_file_name = path + '\\T_HT_FrequencyList.txt'
if os.stat(ht_file_name).st_size != 0:
df = self.read_freq_list_file(ht_file_name)
self.plot_top_freq_list(df, 30, 'HashTag', exclude_top_no=0, file=path + '\\T_HT_Top30_BarChart.png', replace_existing_file=False)
self.plot_top_freq_list(df, 30, 'HashTag', exclude_top_no=1, file=path + '\\T_HT_Top30_BarChart-(Excluding Top1).png', replace_existing_file=False)
self.plot_top_freq_list(df, 30, 'HashTag', exclude_top_no=2, file=path + '\\T_HT_Top30_BarChart-(Excluding Top2).png', replace_existing_file=False)
self.plot_word_cloud(df, file=path + '\\T_HT_WordCloud.png', replace_existing_file=False)
#####################################
# Method: words_analysis_files
# Description: creates words frequency files
# frequency file text, wordcloud, and barcharts
def words_analysis_files(self, path, startDate_filter=None, endDate_filter=None, arr_edges=None, arr_ht_edges=None):
"""
Creates words frequency files. Frequency text file, wordcloud, and barcharts.
The data comes from the mongoDB database and is filtered based on the parameters.
Parameters
----------
path :
the path where the files should be saved
startDate_filter : (Optional)
filter by a certain start date
endDate_filter : (Optional)
filter by a certain end date
arr_edges : (Optional)
and array of concatenated edges that will be used to filter certain connection only.
the method concat_edges can be used to create that array.
arr_ht_edges : (Optional)
and array of concatenated hashtag edges that will be used to filter certain ht connection only.
the method concat_edges can be used to create that array.
Examples
--------
Save words frequency files into path:
>>> words_analysis_files(
>>> 'D:\\Data\\MyFiles',
>>> startDate_filter='09/20/2020 19:00:00',
>>> endDate_filter='03/04/2021 00:00:00')
"""
#export words frequency list
print("\n****** Exporting words frequency list - " + self.get_now_dt())
self.exportData('word_frequency_list',
path + "\\" , 0,
startDate_filter,
endDate_filter,
self.is_bot_Filter,
arr_edges,
arr_ht_edges,
self.top_no_word_filter,
replace_existing_file=False)
print("****** Ploting Word Barchart and Wordcloud - " + self.get_now_dt())
word_file_name = path + '\\T_Words_FrequencyList.txt'
if os.stat(word_file_name).st_size != 0:
df = self.read_freq_list_file(word_file_name)
self.plot_top_freq_list(df, 30, 'Word', exclude_top_no=0, file=path+'\\T_Words_Top30_BarChart.png', replace_existing_file=False)
self.plot_word_cloud(df, file=path+'\\T_Words_WordCloud.png', replace_existing_file=False)
#####################################
# Method: time_series_files
# Description: creates time frequency files
def time_series_files(self, path, startDate_filter=None, endDate_filter=None, arr_edges=None, arr_ht_edges=None):
"""
Creates timeseries frequency files. Tweet count by day and hashcount count by day.
The data comes from the mongoDB database and is filtered based on the parameters.
Parameters
----------
path :
the path where the files should be saved
startDate_filter : (Optional)
filter by a certain start date
endDate_filter : (Optional)
filter by a certain end date
arr_edges : (Optional)
and array of concatenated edges that will be used to filter certain connection only.
the method concat_edges can be used to create that array.
arr_ht_edges : (Optional)
and array of concatenated hashtag edges that will be used to filter certain ht connection only.
the method concat_edges can be used to create that array.
Examples
--------
Save timeseries frequency files into path:
>>> time_series_files(
>>> 'D:\\Data\\MyFiles',
>>> startDate_filter='09/20/2020 19:00:00',
>>> endDate_filter='03/04/2021 00:00:00')
"""
print("****** Exporting time series files - " + self.get_now_dt())
tweet_df = self.get_time_series_df(startDate_filter=startDate_filter, endDate_filter=endDate_filter, arr_edges=arr_edges, arr_ht_edges=arr_ht_edges)
#plot time series for all tweets
if not os.path.exists(path + '\\TS_TweetCount.png'):
self.plot_timeseries(tweet_df, ['tweet', 'tweet_created_at'], path + '\\TS_TweetCount.png')
#plot time series for top hashtags [1-5]
if not os.path.exists(path + '\\TS_TweetCountByHT[1-5].png'):
self.plot_top_ht_timeseries(top_no_start=1, top_no_end=5, file = path + '\\TS_TweetCountByHT[1-5].png',
startDate_filter=startDate_filter, endDate_filter=endDate_filter, arr_edges=arr_edges, arr_ht_edges=arr_ht_edges)
#plot time series for top hashtags [3-10]
if not os.path.exists(path + '\\TS_TweetCountByHT[3-10].png'):
self.plot_top_ht_timeseries(top_no_start=3, top_no_end=10, file = path + '\\TS_TweetCountByHT[3-10].png',
startDate_filter=startDate_filter, endDate_filter=endDate_filter, arr_edges=arr_edges, arr_ht_edges=arr_ht_edges)
#####################################
# Method: ht_connection_files
# Description: creates hashags graph connections files
def ht_connection_files(self, path, startDate_filter=None, endDate_filter=None, arr_edges=None):
print("****** Exporting ht connection files - " + self.get_now_dt())
#create file with ht edges
self.exportData('ht_edges', path + "\\" , 0, startDate_filter, endDate_filter, self.is_bot_Filter, arr_edges)
edge_file_path = path + "\\ht_edges.txt"
G = self.loadGraphFromFile(edge_file_path)
if len(G.edges) > 0:
if len(G.edges) > 1000:
G = self.largest_component_no_self_loops(G)
else:
G.remove_edges_from(nx.selfloop_edges(G))
for node in list(nx.isolates(G)):
G.remove_node(node)
print("HT graph # of Nodes " + str(len(G.nodes())))
print("HT graph # of Edges " + str(len(G.edges())))
self.graph_analysis_files(G, path, gr_prefix_nm = 'HTG_')
#remove top hashtags if we want to ignore the top hashtags
if self.top_ht_to_ignore is not None:
G2 = G.copy()
remove_name = '[WITHOUT('
arr_nodes = sorted(G2.degree(), key=lambda x: x[1], reverse=True)
for ht, degree in arr_nodes[0:self.top_ht_to_ignore]:
remove_name = remove_name + '-' + ht
G2.remove_node(ht)
remove_name = remove_name + ')]'
if len(G2.edges) > 0:
if len(G2.edges) > 1000:
G2 = self.largest_component_no_self_loops(G2)
else:
G2.remove_edges_from(nx.selfloop_edges(G2))
for node in list(nx.isolates(G2)):
G2.remove_node(node)
print("HT graph # of Nodes " + str(len(G2.nodes())))
print("HT graph # of Edges " + str(len(G2.edges())))
self.graph_analysis_files(G2, path, gr_prefix_nm = 'HTG_' + remove_name + '_')
#####################################
# Method: graph_analysis_files
# Description: creates graphs files
def graph_analysis_files(self, G, path, gr_prefix_nm=''):
"""
Plot graph analysis files for a given graph G.
It uses the configuration set on the setConfigs method.
Parameters
----------
G :
undirected networkx graph created from the Twitter data
path :
the path where the files should be saved
gr_prefix_nm: (Optional)
a prefix to add to the graph name. (Default='')
Examples
--------
Create graph visualization files
>>> graph_analysis_files(G, 'C:\\Data\\MyAnalysis\\', 'MyNameTest')
"""
if len(G.nodes()) > 0 and len(G.edges()) > 0:
#plot graph
print("\n****** Ploting graphs... *********** - " + self.get_now_dt())
# if not os.path.exists(path + '\\' + gr_prefix_nm + 'G_Graph.png')
# and not os.path.exists(path + '\\' + gr_prefix_nm + 'G_Graph(WithoutScale).png'):
if ((len(G.nodes()) <= self.graph_plot_cutoff_no_nodes \
or len(G.edges()) <= self.graph_plot_cutoff_no_edges) \
and len(G.edges()) != 0) \
or len(G.nodes()) <= MIN_NO_OF_NODES_TO_REDUCE_GRAPH:
if len(G.edges()) < 450:
v_scale = 0.01; v_k =0.7; v_iterations=100; v_node_size=2
elif len(G.edges()) < 5000:
v_scale = 2; v_k = 0.6; v_iterations=200; v_node_size=0.8
elif len(G.edges()) < 10000:
v_scale = 1; v_k = 0.1; v_iterations=200; v_node_size=0.6
elif len(G.edges()) >= 10000:
v_scale = 1; v_k = 0.05; v_iterations=500; v_node_size=0.6
if self.create_graph_with_node_scale_flag == 'Y':
G_to_plot, labels2, k = self.calculate_louvain_clustering(G)
self.plotSpringLayoutGraph(G_to_plot,
path + '\\' + gr_prefix_nm + 'G_Graph.png',
v_scale,
v_k,
v_iterations,
cluster_fl='Y',
v_labels=list(list(labels2)),
replace_existing_file=False)
if self.create_graph_without_node_scale_flag == 'Y':
self.plotSpringLayoutGraph(G,
path + '\\' + gr_prefix_nm + 'G_Graph(WithoutScale).png',
v_scale,
v_k,
v_iterations,
cluster_fl='N',
v_alpha=1,
scale_node_size_fl='N',
replace_existing_file=False)
#plot reduced graph
if self.create_reduced_graph_flag == 'Y':
self.plot_graph_contracted_nodes(G, path + '\\' + gr_prefix_nm + 'G_Graph-(ReducedGraph).png')
print("\n")
#####################################
# Method: edge_files_analysis
# Description: load graph from edge files and call methods to create all analysis
# files for the main graph and for the graph of each period
def edge_files_analysis(self, output_path):
"""
Automated way to generate all analysis files.
It creates all folders, edge files, and any other files based on given settings.
The setting of what files are interesting or not, should be set using the setConfigs method.
Parameters
----------
output_path :
the path where the files should be saved
Examples
--------
Create all analysis files and folder based on the configurations set on setConfigs:
>>> edge_files_analysis('D:\\Data\\MyFiles')
"""
case_ht_str = ''
if self.type_of_graph == 'ht_conn':
case_ht_str = 'ht_'
#Get the right edges file to import
if self.is_bot_Filter is None:
parent_path = output_path + '\\' + self.edge_prefix_str + 'All'
edge_file_path = self.folder_path + '\\data_input_files\\' + self.edge_prefix_str + 'AllPeriods_' + case_ht_str + 'edges.txt'
if not os.path.exists(edge_file_path): self.export_all_edges_for_input(period_arr=self.period_arr, type_of_graph=self.type_of_graph)
elif self.is_bot_Filter == '0':
parent_path = output_path + '\\' + self.edge_prefix_str + 'ExcludingBots'
edge_file_path = self.folder_path + '\\data_input_files\\' + self.edge_prefix_str +'AllPeriods_ExcludingBots_' + case_ht_str + 'edges.txt'
if not os.path.exists(edge_file_path): self.export_all_edges_for_input(period_arr=self.period_arr, bot_filter_fl='Y', type_of_graph=self.type_of_graph)
elif self.is_bot_Filter == '1':
parent_path = output_path + '\\' + self.edge_prefix_str + 'Bots_Edges_Only'
edge_file_path = self.folder_path + '\\data_input_files\\' + self.edge_prefix_str + 'AllPeriods_BotsOnly_' + case_ht_str + 'edges.txt'
if not os.path.exists(edge_file_path): self.export_all_edges_for_input(period_arr=self.period_arr, bot_filter_fl='Y', type_of_graph=self.type_of_graph)
print(edge_file_path)
self.create_path(output_path)
# Load graph from edge file
G = self.loadGraphFromFile(edge_file_path)
# Call method to print all analysis files
self.all_analysis_file(G, parent_path, startDate_filter=None, endDate_filter=None)
# Run analysis by period using the dates set on array period_arr
if self.period_arr is not None:
# Creates a text file with the period information.
# This is just so that whoever is looking at these folder can know what dates we used for each period
myFile = open(output_path + '\\PeriodsInfo.txt', 'w', encoding="utf-8")
with myFile:
writer = csv.writer(myFile, delimiter='\t', lineterminator='\n')
writer.writerows(self.period_arr)
for idx, period in enumerate(self.period_arr):
# Set the period information variables
period_name = period[0]
period_start_date = period[1]
period_end_date = period[2]
print("\n**********************************************************")
print("************************** " + period_name + " ****************************\n" )
# Edge file path
if self.is_bot_Filter is None:
parent_path = output_path + "\\" + self.edge_prefix_str + "All_By_Period\\" + period_name
edge_file_path = output_path + "\\data_input_files\\" + self.edge_prefix_str + period_name +"_" + case_ht_str + "edges.txt"
if not os.path.exists(edge_file_path): self.export_all_edges_for_input(period_arr=self.period_arr, type_of_graph=self.type_of_graph)
elif self.is_bot_Filter == '0':
parent_path = output_path + "\\" + self.edge_prefix_str + "Excluding_Bots_By_Period\\" + period_name
edge_file_path = output_path + "\\data_input_files\\" + self.edge_prefix_str + period_name + "_ExcludingBots_" + case_ht_str + "edges.txt"
if not os.path.exists(edge_file_path): self.export_all_edges_for_input(period_arr=self.period_arr, bot_filter_fl='Y', type_of_graph=self.type_of_graph)
elif self.is_bot_Filter == '1':
parent_path = output_path + "\\" + self.edge_prefix_str + "Bots_Edges_Only_By_Period\\" + period_name
edge_file_path = output_path + "\\data_input_files\\" + self.edge_prefix_str + period_name +"_BotsOnly_" + case_ht_str + "edges.txt"
if not os.path.exists(edge_file_path): self.export_all_edges_for_input(period_arr=self.period_arr, bot_filter_fl='Y', type_of_graph=self.type_of_graph)
# Create new path if it doesn't exist
self.create_path(parent_path)
#load graph from edge file
G = self.loadGraphFromFile(edge_file_path)
#call function to genrate all files for this graph
self.all_analysis_file(G, parent_path, startDate_filter=period_start_date, endDate_filter=period_end_date)
#####################################
# Method: all_analysis_file
# Description: Calls method to create all files for full dataset, for top degree nodes, and for community graphs
def all_analysis_file(self, G, output_path, startDate_filter=None, endDate_filter=None):
#files for the main graph
self.create_analysis_file(G, output_path, startDate_filter=startDate_filter, endDate_filter=endDate_filter)
#files for the top nodes
if self.create_top_nodes_files_flag == 'Y':
self.top_nodes_analysis(G, output_path, startDate_filter=startDate_filter, endDate_filter=endDate_filter)
#files for community nodes
if self.create_community_files_flag == 'Y':
self.commty_analysis_files(G, output_path, startDate_filter=startDate_filter, endDate_filter=endDate_filter)
#####################################
# Method: create_analysis_file
# Description: calls individual methods to create files on the settings
def create_analysis_file(
self,
G,
output_path,
startDate_filter=None,
endDate_filter=None,
arr_edges=None):
#export file with measures
print("****** Graph Measures - " + self.get_now_dt())
self.print_Measures(G, fileName_to_print = output_path + "\\G_Measures-(All).txt")
print("\n")
arr_ht_edges = None
if self.type_of_graph == 'ht_conn':
arr_ht_edges = arr_edges
arr_edges = None
if len(G.edges()) != 0:
#get largest connected component and export file with measures
G = self.largest_component_no_self_loops(G)
print("****** Largest Component Graph Measures - " + self.get_now_dt())
self.print_Measures(G, fileName_to_print = output_path + "\\G_Measures-(LargestCC).txt")
print("\n")
#export files with edges and degrees
if self.create_nodes_edges_files_flag == 'Y':
self.nodes_edges_analysis_files(G, output_path)
#LDA Model
if self.create_topic_model_files_flag == 'Y':
self.lda_analysis_files(output_path,
startDate_filter=startDate_filter,
endDate_filter=endDate_filter,
arr_edges=arr_edges,
arr_ht_edges=arr_ht_edges)
#export ht frequency list
if self.create_ht_frequency_files_flag == 'Y':
self.ht_analysis_files(output_path,
startDate_filter=startDate_filter,
endDate_filter=endDate_filter,
arr_edges=arr_edges,
arr_ht_edges=arr_ht_edges)
#export words frequency list
if self.create_words_frequency_files_flag == 'Y':
self.words_analysis_files(output_path,
startDate_filter=startDate_filter,
endDate_filter=endDate_filter,
arr_edges=arr_edges,
arr_ht_edges=arr_ht_edges)
#plot graph
if self.create_graphs_files_flag == 'Y':
self.graph_analysis_files(G, output_path)
#time series
if self.create_timeseries_files_flag == 'Y':
self.time_series_files(output_path,
startDate_filter=startDate_filter,
endDate_filter=endDate_filter,
arr_edges=arr_edges,
arr_ht_edges=arr_ht_edges)
#hashtag connections
if self.create_ht_conn_files_flag == 'Y' and self.type_of_graph != 'ht_conn':
self.ht_connection_files(output_path,
startDate_filter=startDate_filter,
endDate_filter=endDate_filter,
arr_edges=arr_edges)
#####################################
# Method: top_nodes_analysis
# Description: calls methods to create files for each of the top degree nodes
def top_nodes_analysis(self, G, output_path, startDate_filter=None, endDate_filter=None):
# Choose which graph you want to run this for
Graph_to_analyze = G.copy()
top_degree_nodes = self.get_top_degree_nodes(Graph_to_analyze, self.top_degree_start, self.top_degree_end)
#creates a folder to save the files for this analysis
path = "Top_" + str(self.top_degree_start) + '-' + str(self.top_degree_end)
self.create_path(output_path + '\\' + path)
i = self.top_degree_end
# loops through the top degree nodes, creates a subgraph for them and saves the results in a folder
for x in np.flip(top_degree_nodes, 0):
node = x[0]
#creates a subgraph for this node
G_subgraph = self.create_node_subgraph(Graph_to_analyze, node)
G_subgraph_largestComponent = G_subgraph.copy()
G_subgraph_largestComponent = self.largest_component_no_self_loops(G_subgraph_largestComponent)
#creates a path to add the files for this node
path_node = path + "\\" + str(i) + "-" + node
self.create_path(output_path + '\\' + path_node)
#get array with all edges for this top degree node
if len(G_subgraph) > 1:
arr_edges = self.concat_edges(G_subgraph)
self.create_analysis_file(G_subgraph,
output_path + '\\' + path_node,
startDate_filter=startDate_filter,
endDate_filter=endDate_filter,
arr_edges=arr_edges)
i -= 1
#####################################
# Method: commty_analysis_files
# Description: calls methods to create files for each of the communities found
def commty_analysis_files(self, G, output_path, startDate_filter=None, endDate_filter=None):
print("\n******************************************************")
print("******** Louvain Communities ********")
if len(G.edges()) != 0:
# Choose which graph you want to run this for
Graph_to_analyze = G.copy()
#creates a folder to save the files for this analysis
path = output_path + "\\Communities_(Louvain)"
while os.path.exists(path):
path = path + "+"
self.create_path(path)
#calculate louvain community for largest connected component
Graph_to_analyze = self.largest_component_no_self_loops(Graph_to_analyze)
Graph_to_analyze, labels, k = self.calculate_louvain_clustering(Graph_to_analyze)
comm_att = 'community_louvain'
#find the number of communities in the graph
no_of_comm = max(nx.get_node_attributes(Graph_to_analyze, comm_att).values())+1
print("******************************************************")
print("Total # of Communities " + str(no_of_comm))
#loop through the communities print they analysis files
for commty in range(no_of_comm):
#find subgraphs of this community
G_subgraph = Graph_to_analyze.subgraph([n for n,attrdict in Graph_to_analyze.node.items() if attrdict [comm_att] == commty ])
#only cares about communities with more than 1 node
if len(G_subgraph.edges()) >= self.commty_edge_size_cutoff:
G_subgraph_largestComponent = G_subgraph.copy()
G_subgraph_largestComponent = self.largest_component_no_self_loops(G_subgraph_largestComponent)
#creates a path to add the files for this node
path_community = path + "\\Community-" + str(commty+1)
self.create_path(path_community)
print("\n")
print("******************************************************")
print("****** Printing files for community " + str(commty+1) + " ******")
#self.print_Measures(G_subgraph, False, False, False, False, fileName_to_print = path_community + '\\G_' + str(commty+1) + '_Measures.txt')
print("\n")
if len(G_subgraph) > 1:
arr_edges = self.concat_edges(G_subgraph)
self.create_analysis_file(G_subgraph, path_community,
startDate_filter=startDate_filter,
endDate_filter=endDate_filter,
arr_edges=arr_edges)
#####################################
# Method: get_time_series_df
# Description: query data in mongoDB for timeseries analysis
def get_time_series_df(
self,
ht_arr=None,
startDate_filter=None,
endDate_filter=None,
arr_edges=None,
arr_ht_edges=None):
"""
Method to query data in mongoDB for timeseries analysis given certain filters.
It creates all folders, edge files, and any other files based on given settings.
The setting of what files are interesting or not, should be set using the setConfigs method.
Parameters
----------
ht_arr :
array of hashtags to filter the data from
startDate_filter : (Optional)
filter by a certain start date
endDate_filter : (Optional)
filter by a certain end date
arr_edges : (Optional)
and array of concatenated edges that will be used to filter certain connection only.
the method concat_edges can be used to create that array.
arr_ht_edges : (Optional)
and array of concatenated hashtag edges that will be used to filter certain ht connection only.
the method concat_edges can be used to create that array.
Examples
--------
>>> ...
"""
df = pd.DataFrame()
if ht_arr is not None:
#get timeseries for each of the top hashtags
for i, ht in enumerate(ht_arr):
arrData, file = self.queryData(exportType='tweet_ids_timeseries',
filepath='',
inc=0,
ht_to_filter=ht,
startDate_filter=startDate_filter,
endDate_filter=endDate_filter,
is_bot_Filter=self.is_bot_Filter,
arr_edges=arr_edges,
arr_ht_edges=arr_ht_edges)
tweet_df = pd.DataFrame(list(arrData))
tweet_df.columns = ['tweet_created_at', ht]
df = pd.concat([df,tweet_df], axis=0, ignore_index=True)
else:
#get timeseries for all tweets
arrData, file = self.queryData(exportType='tweet_ids_timeseries',
filepath='', inc=0,
startDate_filter=startDate_filter,
endDate_filter=endDate_filter,
is_bot_Filter=self.is_bot_Filter,
arr_edges=arr_edges,
arr_ht_edges=arr_ht_edges)
tweet_df = pd.DataFrame(list(arrData))
tweet_df.columns = ['tweet_created_at', 'tweet']
df = | pd.concat([df,tweet_df], axis=0, ignore_index=True) | pandas.concat |
# coding=utf-8
# Author: <NAME> & <NAME>
# Date: Jan 06, 2021
#
# Description: Parse Instagram timelines and extract dictionary matches
#
import os
import sys
#
include_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'include'))
# include_path = '/nfs/nfs7/home/rionbr/myaura/include'
sys.path.insert(0, include_path)
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import re
#
import db_init as db
import utils
from load_dictionary import load_dictionary, build_term_parser
from termdictparser import Sentences
if __name__ == '__main__':
#
# Init
#
dicttimestamp = '20180706'
data = set([
'clobazam', 'onfi',
'levetiracetam', 'keppra', 'Levetiracetamum',
'lamotrigine', 'lamictal', 'lamotrigina', 'lamotrigine', 'lamotriginum',
'lacosamide', 'vimpat', 'SPM927', 'erlosamide', 'harkoseride',
'carbamazepine', 'carbamazepen', 'carbamazepin', 'carbamazepina', 'carbamazepinum', 'carbamazépine',
'diazepam', 'valium', 'diastat',
'oxcarbazepine',
'seizuremeds',
])
# Load Dictionary
dfD = load_dictionary(dicttimestamp=dicttimestamp, server='cns-postgres-myaura')
# Build Parser Vocabulary
tdp = build_term_parser(dfD)
#
dict_token = dfD['token'].to_dict()
dict_id_parent = dfD['id_parent'].to_dict()
dict_parent = dfD['parent'].to_dict()
# dict_dictionary = dfD['dictionary'].to_dict()
dict_type = dfD['type'].to_dict()
# dict_source = dfD['source'].to_dict()
#
# Get Users from MongoDB
#
db_raw = 'ddi_cohort_epilepsy'
mongo_raw, _ = db.connectToMongoDB(server='mongo-tweetline', db=db_raw)
#
# Load Selected Timelines
#
dfP = pd.read_csv('../tmp-data/db-matches-epilepsy.csv', header=0, index_col=0)
# Remove everything after a ReTweet
dfP['text'] = dfP['text'].str.replace(r'rt @[a-z0-9_]+.+', '', flags=re.IGNORECASE)
# Remove everything after a ReTweet
re_retweet = re.compile(r"rt @[a-z0-9_]+.+", re.IGNORECASE|re.UNICODE)
dfP['text'] = dfP['text'].str.replace(re_retweet, '')
re_tokenizer = re.compile(r"[\w']+", re.UNICODE)
def contains_match(x):
tokens = set(re_tokenizer.findall(x))
return any(tokens.intersection(data))
# Post contains drug match
dfP['contains'] = dfP['text'].apply(contains_match)
# Keep only posts with mentions
dfP = dfP.loc[(dfP['contains'] == True), :]
#
dfU = dfP.groupby('user_id').agg({'_id': 'count'}).rename(columns={'_id': 'n-matched-posts'}).reset_index()
#
# Get Users Timelines
#
print('--- Requesting Mongo Data: `tweetline` ---')
mongo_raw, _ = db.connectToMongoDB(server='mongo-tweetline', db='tweetline')
#
n_users = dfU.shape[0]
#
# Parse Users
#
list_post_mentions = []
#
for idx, row in dfU.iterrows():
#
i = idx + 1
per = i / n_users
id_user = int(row['user_id'])
print("> Parsing User '{id_user:d}': {i:d} of {n:d} ({per:.2%})".format(id_user=id_user, i=i, n=n_users, per=per))
q = mongo_raw.tweet.find(
{
'user_id': id_user
},
{
'_id': True,
'datetime': True,
'user_id': True,
'text': True,
}
)
df = pd.json_normalize(list(q))
df = df[['_id', 'datetime', 'user_id', 'text']]
df.columns = ['id', 'created_time', 'user_id', 'text']
df['created_time'] = | pd.to_datetime(df['created_time'], format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
import torch, sys, os, argparse
sys.path.append('./modeling')
import embedding_models as em
import data_utils, model_utils, datasets
import model_layers as ml
import torch.optim as optim
import torch.nn as nn
import json
import pandas as pd
from itertools import chain
SEED = 0
NUM_GPUS = 1
use_cuda = False
ENC_TYPE='lstm'
USE_WORD=True
CONN_LOSS_FACTOR = 0.01
SCALE_ATT='scale'
dim2i = {'Social Val': 0, 'Polite': 1, 'Impact': 2, 'Fact': 3,
'Sent':4, 'Emo': 5}
WEIGHTS = [.3, .5, .3, 1/6., 1/6.] + \
[1., 1., 1., 1., 1., 3., 3., 1., 1., 3., 3.] + [3] # Emo is last # W7
CONN_LOSS = nn.CrossEntropyLoss
CONN_EMO_LOSS = nn.MultiLabelSoftMarginLoss
verbna_dim2i = {'Social Val': 0, 'Polite': 1, 'Impact': 2, 'Fact': 3, 'Sent':4,
'P(wt)': 5, 'P(wa)': 6, 'P(at)': 7, 'E(t)': 8, 'E(a)': 9, 'V(t)': 10, 'V(a)': 11,
'S(t)': 12, 'S(a)': 13, 'power': 14, 'agency': 15, 'Emo': 16}
NUM_VERB_DIMS = 9 # 12
def eval(model_handler, dev_data, class_wise=False):
'''
Evaluates the given model on the given data, by computing
macro-averaged F1, precision, and recall scores. Can also
compute class-wise scores. Prints the resulting scores
:param class_wise: whether to return class-wise scores. Default(False):
does not return class-wise scores.
:return: a dictionary from score names to the score values.
'''
model_handler.eval_and_print(data_name='TRAIN', class_wise=class_wise)
model_handler.eval_and_print(data=dev_data, data_name='DEV',
class_wise=class_wise)
def extract_embeddings(model_handler, data, dev_data, data_name='train'):
model_handler.get_embeddings(data=dev_data, data_name=data_name)
def save_predictions(model_handler, dev_data, out_name, class_wise=False, is_test=False):
trn_pred_lst, _ = model_handler.predict(class_wise=class_wise) # predict train
trn_out, trn_cols = predict_helper(trn_pred_lst, model_handler.dataloader.data)
| pd.DataFrame(trn_out, columns=trn_cols) | pandas.DataFrame |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types"
r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
with pytest.raises(TypeError):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
with pytest.raises(TypeError):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
with pytest.raises(TypeError):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
with pytest.raises(TypeError):
td.__rfloordiv__(dt64)
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
with pytest.raises(TypeError):
td.__rfloordiv__(np.float64(2.0))
with pytest.raises(TypeError):
td.__rfloordiv__(np.uint8(9))
with pytest.raises(TypeError, match="Invalid dtype"):
# deprecated GH#19761, enforced GH#29797
td.__rfloordiv__(np.int32(2.0))
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# Array-like others
assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1
res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()]))
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")])
res = td.__rfloordiv__(arr)
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
ser = pd.Series([1], dtype=np.int64)
res = td.__rfloordiv__(ser)
assert res is NotImplemented
with pytest.raises(TypeError, match="Invalid dtype"):
# Deprecated GH#19761, enforced GH#29797
# TODO: GH-19761. Change to TypeError.
ser // td
# ----------------------------------------------------------------
# Timedelta.__mod__, __rmod__
def test_mod_timedeltalike(self):
# GH#19365
td = Timedelta(hours=37)
# Timedelta-like others
result = td % Timedelta(hours=6)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
result = td % timedelta(minutes=60)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % NaT
assert result is NaT
def test_mod_timedelta64_nat(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64("NaT", "ns")
assert result is NaT
def test_mod_timedelta64(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64(2, "h")
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
def test_mod_offset(self):
# GH#19365
td = Timedelta(hours=37)
result = td % offsets.Hour(5)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=2)
def test_mod_numeric(self):
# GH#19365
td = Timedelta(hours=37)
# Numeric Others
result = td % 2
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % 1e12
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
result = td % int(1e12)
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
def test_mod_invalid(self):
# GH#19365
td = Timedelta(hours=37)
with pytest.raises(TypeError):
td % Timestamp("2018-01-22")
with pytest.raises(TypeError):
td % []
def test_rmod_pytimedelta(self):
# GH#19365
td = | Timedelta(minutes=3) | pandas.Timedelta |
import optuna
import pandas as pd
import numpy as np
from scipy.stats import rankdata
import pandas_ta as pta
from finta import TA as fta
import talib as tta
import re
import warnings
import pareto
warnings.filterwarnings("ignore")
from timeit import default_timer as timer
def col_name(function, study_best_params):
"""
Create consistent column names given string function and params
:param function: Function represented as string
:param study_best_params: Params for function
:return:
"""
# Optuna string of indicator
function_name = function.split("(")[0].replace(".", "_")
# Optuna string of parameters
params = re.sub('[^0-9a-zA-Z_:,]', '', str(study_best_params)).replace(",", "_").replace(":", "_")
# Concatenate name and params to define
col = f"{function_name}_{params}"
return col
def _weighted_pearson(y, y_pred, w=None, pearson=True):
"""Calculate the weighted Pearson correlation coefficient."""
if pearson:
if w is None:
w = np.ones(len(y))
# idx = ~np.logical_or(np.isnan(y_pred), np.isnan(y)) # Drop NAs w/boolean mask
# y = np.compress(idx, np.array(y))
# y_pred = np.compress(idx, np.array(y_pred))
# w = np.compress(idx, w)
with np.errstate(divide='ignore', invalid='ignore'):
y_pred_demean = y_pred - np.average(y_pred, weights=w)
y_demean = y - np.average(y, weights=w)
corr = ((np.sum(w * y_pred_demean * y_demean) / np.sum(w)) /
np.sqrt((np.sum(w * y_pred_demean ** 2) *
np.sum(w * y_demean ** 2)) /
(np.sum(w) ** 2)))
if np.isfinite(corr):
return np.abs(corr)
return 0.
def _weighted_spearman(y, y_pred, w=None):
"""Calculate the weighted Spearman correlation coefficient."""
# idx = ~np.logical_or(np.isnan(y_pred), np.isnan(y)) # Drop NAs w/boolean mask
# y = np.compress(idx, np.array(y))
# y_pred = np.compress(idx, np.array(y_pred))
# w = np.compress(idx, w)
y_pred_ranked = np.apply_along_axis(rankdata, 0, y_pred)
y_ranked = np.apply_along_axis(rankdata, 0, y)
return _weighted_pearson(y_pred_ranked, y_ranked, w, pearson=False)
def _trial(self, trial, X):
"""
Calculate indicator using best fitted trial over X
:param self: Optuna study
:param trial: Optuna trial
:param X: dataset
:return:
"""
# Evaluate TA defined as optuna trial string
res = eval(self.function)
# If return is tuple, convert to DF
if isinstance(res, tuple):
res = pd.DataFrame(res).T
# Index result with X index
res = pd.DataFrame(res, index=X.index)
# Create consistent column names with function string and params
name = col_name(self.function, trial.params)
# Append integer identifier to DF with multiple columns
if len(res.columns) > 1:
res.columns = [f"{name}_{i}" for i in range(len(res.columns))]
else:
res.columns = [f"{name}"]
return res
# Minimize difference, Maximize Total
def _min_max(study):
"""
Multi-objective function to find best trial index with minimum deviation and max correlation
:param study: Optuna study
:return:
"""
# Iterate pareto-front trials storing mean correlation and std dev
df = []
for trial in study.best_trials:
df.append([trial.number, np.mean(trial.values), np.std(trial.values)])
# Sort dataframe ascending by mean correlation
df = pd.DataFrame(df).sort_values(by=2, ascending=True)
# Sort df with best trial in first row
if len(df) > 1 and len(df.iloc[:, 1:3].drop_duplicates()) > 1:
# Create second pareto to maximize correlation and minimize stddev
# Epsilons define precision, ie dominance over other candidates
# Dominance is defined as x percent of stddev of stddev
try:
nd = pareto.eps_sort([list(df.itertuples(False))], objectives=[1, 2],
epsilons=[1e-09, np.std(df[1])*.5], maximize=[1])
except:
# Something went wrong, return df
nd = df
# Sort remaining candidates
nd = | pd.DataFrame(nd) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use("svg")
import matplotlib.pyplot as plt
import seaborn as sns
from seaborn.palettes import blend_palette
from seaborn.utils import set_hls_values
def ci_error(lower, upper, truth):
below = np.maximum(lower - truth, 0)
above = np.maximum(truth - upper, 0)
return below + above
epsilon = 0.001
sns.set(style="ticks", palette="colorblind", context=snakemake.wildcards.context)
codebook = pd.read_table(snakemake.input.codebook, index_col=0)
errors = []
counts = []
ci_errors = []
for i, (mean, posterior_counts, raw_counts, known_counts) in enumerate(zip(
snakemake.params.means,
snakemake.input.posterior_counts,
snakemake.input.raw_counts,
snakemake.input.known_counts)):
posterior_estimates = pd.read_table(posterior_counts, index_col=[0, 1])
raw_counts = pd.read_table(raw_counts, index_col=[0, 1])
raw_counts = raw_counts["exact"] + raw_counts["corrected"]
known_counts = pd.read_table(known_counts, index_col=[0, 1])
known_counts = known_counts.reindex(codebook.index, level=1)
# remove PTPN14 because we have artificially increased it's simulated expression
# this will bias our plots
known_counts = known_counts[known_counts.index.get_level_values(1) != "PTPN14"]
raw_counts = raw_counts.reindex(known_counts.index, fill_value=0)
posterior_estimates = posterior_estimates.reindex(known_counts.index, fill_value=0)
dropouts = known_counts[(known_counts["count"] > 0) & (raw_counts == 0)]
print("dropouts", dropouts)
counts.append(pd.DataFrame({"known": known_counts["count"], "raw": raw_counts, "posterior": posterior_estimates["expr_map"]}))
errors.append(pd.DataFrame({"error": raw_counts - known_counts["count"], "mean": mean, "type": "raw", "known": known_counts["count"]}))
errors.append(pd.DataFrame({"error": posterior_estimates["expr_map"] - known_counts["count"], "mean": mean, "type": "posterior", "known": known_counts["count"]}))
errors.append(pd.DataFrame({
"error": ci_error(posterior_estimates["expr_ci_lower"], posterior_estimates["expr_ci_upper"], known_counts["count"]),
"mean": mean,
"type": "ci",
"known": known_counts["count"]}))
counts = | pd.concat(counts) | pandas.concat |
from datetime import date, datetime
import pandas
from typing import NoReturn, Tuple
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly.graph_objects as go
from IMLearn.learners.regressors import LinearRegression
from IMLearn.utils import split_train_test
pio.templates.default = "simple_white"
POSITIVE_OR_ZERO_COLS = ["yr_renovated", "floors", "sqft_basement", "bathrooms"]
POSITIVE_COLS = ["sqft_living", "price", "sqft_living15", "sqft_above", "yr_built", "sqft_lot", "sqft_lot15"]
REDUNDANT_COLS = ["lat", "long"]
DATE_TIME_FORMAT = "%Y%m%dT%H%M%S%f"
MAX_ROOMS = 15
MAX_LOT_SQRT = 1250000
MAX_LOT_14_SQRT = 500000
REPEAT_FACTOR = 3
RESOLUTION = 0.01
RENOVATED_FACTOR = 0.25
def load_data(filename: str) -> pd.DataFrame:
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
return | pandas.read_csv(filename) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
import pg8000
from sqlalchemy import create_engine
import h5py
import datetime
engine = create_engine('postgresql+pg8000://user:[email protected]/ecco_biwa_db')
ebintsql = '''
select ebint from ecco_biwa_lakes_v_0_1
where
ccluster30 = 1 or
ccomsat = 1 or
cbathnose = 1 or
ebint in (select ebint from "bath_lake_poly_NOSEFI_v5") or
ebint in (select ebint_old from "bath_lake_poly_NOSEFI_v5")
'''
ee = pd.read_sql(ebintsql, engine)
mrrosA = ['Catchment_mrros_EUR-11_ICHEC-EC-EARTH_%s_r3i1p1_DMI-HIRHAM5_v1_day_%s0101-%s1231.h5' % (e, y, y+4) for e, y in zip(['historical', 'rcp45', 'rcp45', 'rcp45'], [2001, 2031, 2061, 2091])]
mrrosB = ['Catchment_mrros_EUR-11_ICHEC-EC-EARTH_%s_r3i1p1_DMI-HIRHAM5_v1_day_%s0101-%s1231.h5' % (e, y+5, y+9) for e, y in zip(['rcp45', 'rcp45', 'rcp45', 'rcp45'], [2001, 2031, 2061, 2091])]
# tasA = 'Catchment_tas_EUR-11_ICHEC-EC-EARTH_historical_r3i1p1_DMI-HIRHAM5_v1_day_20010101-20051231.h5'
# tasB = 'Catchment_tas_EUR-11_ICHEC-EC-EARTH_rcp45_r3i1p1_DMI-HIRHAM5_v1_day_20060101-20101231.h5'
ya0list = [2001, 2031, 2061, 2091]
ya1list = [y+4 for y in ya0list]
yb0list = [y+5 for y in ya0list]
yb1list = [y+9 for y in ya0list]
yyy = zip(ya0list, ya1list, yb0list, yb1list)
f = h5py.File(mrrosA[0], mode='r')
k = f.keys()
f.close()
for i, ebint in enumerate(ee['ebint']):
print('%s %s' % (i, ebint))
results = np.nan * np.arange(4)
for i, yy in enumerate(yyy):
ya0, ya1, yb0, yb1 = yy
print('%s %s' % (i, ebint))
h = hex(int(ebint)).rstrip('L').lstrip('0x')
if not (h in k):
continue
ndaysA = (datetime.date(ya1, 12, 31) - datetime.date(ya0, 1, 1)).days + 1
datesA = [datetime.date(ya0, 1, 1) + datetime.timedelta(d)
for d in range(ndaysA)]
ndaysB = (datetime.date(yb1, 12, 31) - datetime.date(yb0, 1, 1)).days + 1
datesB = [datetime.date(yb0, 1, 1) + datetime.timedelta(d)
for d in range(ndaysB)]
dfA = pd.DataFrame(datesA, columns = ['date'])
dfB = pd.DataFrame(datesB, columns = ['date'])
dfA['mrros'] = h5py.File(mrrosA[i], mode='r')[h][:]
dfB['mrros'] = h5py.File(mrrosB[i], mode='r')[h][:]
df = | pd.concat([dfA, dfB]) | pandas.concat |
# pylint: disable=too-many-instance-attributes,too-many-locals,unused-argument,no-self-use,wrong-import-order
"""
Taking Terra results, populate sample-metadata for NAGIM project.
Has 2 commands: transfer and parse.
The following command transfers CRAMs, GVCFs files along with corresponding indices,
and QC files from a Terra workspace to the GCP upload bucket.
```
python scripts/parse_nagim.py transfer \
--tmp-dir nagim-parse-tmp \
--use-batch
```
This can be run through the analysis runner with the `nagim` dataset, because
the hail nagim service accounts were added as readers to the Terra workspace using
[Terra tools](https://github.com/broadinstitute/terra-tools/tree/master/scripts/register_service_account)
as follows:
```
git clone https://github.com/broadinstitute/terra-tools
python terra-tools/scripts/register_service_account/register_service_account.py \
--json_credentials nagim-test-133-hail.json \
--owner_email <EMAIL>
```
(Assuming the `-j` value is the JSON key for the hail "test" service account - so
should repeat the same command for the "standard" one - and the `-e` value is
the email where notifications will be sent to.)
Now, assuming CRAM and GVCFs are transferred, the following command populates the
sample metadata DB objects:
```
python scripts/parse_nagim.py parse \
--tmp-dir nagim-parse-tmp \
--skip-checking-objects \
--confirm
```
It would write each sample to a corresponding project.
The `--skip-checking-objects` tells the parser to skip checking the existence of
objects on buckets, which is useful to speed up the execution as long as we trust
the transfer hat happened in the previous `transfer` command. It would also
disable md5 and file size checks.
The script also has to be run under nagim-test or nagim-standard. The `standard`
access level needed to populate data from a production run, which is controlled by
adding `--prod` flag to both `transfer` and `parse` commands.
"""
import logging
import subprocess
import tempfile
from dataclasses import dataclass, field
from os.path import join, exists, basename
from typing import List, Dict, Any, Optional, Tuple, Callable, Union
import json
import gcsfs
import click
import pandas as pd
from cpg_pipes.pipeline import setup_batch
from cpg_pipes.resources import DRIVER_IMAGE
from cpg_pipes.utils import can_reuse
from sample_metadata.models import (
AnalysisStatus,
AnalysisType,
AnalysisModel,
)
from sample_metadata.apis import SampleApi
from sample_metadata.parser.generic_parser import GenericParser, GroupedRow
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
NAGIM_PROJ_ID = 'nagim'
NAMESPACE = 'main'
# Mapping the KCCG project IDs to internal CPG project IDs
PROJECT_ID_MAP = {
'1KB': 'thousand-genomes',
'ALS': 'csiro-als',
'AMP-PD': 'amp-pd',
'HGDP': 'hgdp',
'MGRB': 'mgrb',
'TOB': 'tob-wgs',
'acute_care': 'acute-care',
}
# 2 columns: sample IDs used in the NAGIM run, and a project ID.
SAMPLE_TO_PROJECT_TSV_PATH = 'gs://cpg-nagim-main/metadata/nagim-terra-samples.tsv'
SRC_BUCKETS = {
'test': {
'Australia': [ # Australian Terra workspace
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/2232b739-5183-4935-bb84-452a631c31ea',
],
'US': [ # The US Terra workspace§
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/1a9237ff-2e6e-4444-b67d-bd2715b8a156',
],
},
'main': {
'Australia': [
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/8b5c4805-a08c-4b22-9521-f003e1e02153',
'gs://fc-975676a8-4e21-46af-bc02-816044ad7448/1e968324-0d1d-4061-86d5-2f2678363e5a',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/376b7e6e-3e9a-4608-899b-3ae56f42b8ae',
'gs://fc-fa51701d-03df-4ca7-8408-5c859458759d/1c6b5f64-1b83-4f98-9ba8-0cc7918677a9',
'gs://fc-10674f84-3eed-440a-b6fd-f6b0a7a3f3d0/a521fa83-0974-4b0b-8ffd-de8bb7363adc',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/95b12dea-5d83-4e19-9a9d-4616d69ec9a3',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/4ee1f6ce-8045-49c5-8fd0-6409b3bd063f',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/bc178a03-ad33-4eba-8581-a5ee441d1370',
'gs://fc-f42ce9c2-17c2-4ae9-ac49-657ad9783280/2a991598-d7bc-4aea-af81-ff376d131c3b',
'gs://fc-30c132a7-2e19-4b73-9d70-e23c405740a2/9585ddb4-fa1c-499a-b424-32cf9def33a5',
'gs://fc-79767284-d7a5-4565-9816-61c6e28e9f7f/37959029-3ed9-4415-aa0a-f4c2337b9c14',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/ceaed9aa-9e17-4b19-9926-a320ee614d6e',
'gs://fc-7312af9d-7217-4eef-a6c0-c3637ade1662/d0bbd0be-3f66-4308-9376-34844d520073',
'gs://fc-79767284-d7a5-4565-9816-61c6e28e9f7f/65bca9dc-99b5-4eac-9e29-a82ef94c542c',
'gs://fc-fa51701d-03df-4ca7-8408-5c859458759d/fe652736-53aa-4fab-bc24-8fec9f7cea8e',
'gs://fc-ddb2e6d7-319a-4dc2-aa79-f640c2f889d3/defa7f3c-b04d-4a2d-ae80-16379be145e8',
'gs://fc-79cf62c1-c8c6-4934-93cd-dcd792d905d8/e47071c6-cc81-4c77-a860-56bd5fb75fff',
'gs://fc-3a36f1b1-761b-4d24-ba78-f8f72a55daab/d57f15fb-c7ae-45e2-bf17-f305493efa4a',
],
'US': [
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/153e4788-1c48-4a51-864e-9707dbae5c59',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/b4a00407-f6c6-4fd0-b71f-820e047f792c',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/914b7deb-9156-4cc8-8eb0-b13a6d008e2b',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/bfa7f93d-06c8-40d5-b1da-de68b390d8cf',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/b9fab668-3b28-4e58-8af2-5d443d7aae2f',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/884b65af-adba-4cbf-a068-48ea9e948524',
],
},
}
class Source:
"""
Type of files we pull (e.g. CRAM, GVCF, QC)
"""
def __init__(
self,
name: str,
search_pattern_by_ending: Dict[str, str],
upload_bucket: Union[str, Callable],
):
self.name = name
self.id = name.lower()
self.search_pattern_by_ending = search_pattern_by_ending
self._upload_bucket = upload_bucket
def get_upload_bucket(self, ending=None):
"""
Upload bucket can be a string or a lambda taking filename ending as an argument
"""
if isinstance(self._upload_bucket, str):
return self._upload_bucket
assert ending
return self._upload_bucket(ending)
def __repr__(self):
return self.name
def transfer(self, hbatch):
"""
Search files in buckets using search patterns and copy to CPG upload buckets
"""
for region, buckets in SRC_BUCKETS[NAMESPACE].items():
for bucket in buckets:
for ending, pattern in self.search_pattern_by_ending.items():
_add_batch_job(
cmd=(
f"gsutil ls '{bucket}/{pattern}'"
f' | gsutil -m cp -I {self.get_upload_bucket(ending)}/'
),
hbatch=hbatch,
job_name=(
f'{region}: transfer {self.name} {ending} files '
f'from {bucket}'
),
)
# Instantiating file sources
SOURCES = {
s.name: s
for s in [
Source(
name='CRAM',
search_pattern_by_ending={
'cram': '**/call-ConvertToCram/**/*.cram',
'cram.crai': '**/call-ConvertToCram/**/*.cram.crai',
'cram.md5': '**/call-ConvertToCram/**/*.cram.md5',
},
upload_bucket=f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-upload/cram',
),
Source(
name='GVCF',
search_pattern_by_ending={
'hard-filtered.g.vcf.gz': '**/call-MergeVCFs/**/*.hard-filtered.g.vcf.gz',
'hard-filtered.g.vcf.gz.tbi': '**/call-MergeVCFs/**/*.hard-filtered.g.vcf.gz.tbi',
},
upload_bucket=f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-upload/gvcf',
),
Source(
name='QC',
upload_bucket=lambda ending: f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-upload/QC/{ending}',
search_pattern_by_ending={
e: f'**/*.{e}'
for e in [
'alignment_summary_metrics',
'bait_bias_detail_metrics',
'bait_bias_summary_metrics',
'detail_metrics',
'duplicate_metrics',
'insert_size_metrics',
'pre_adapter_detail_metrics',
'pre_adapter_summary_metrics',
'quality_distribution_metrics',
'raw_wgs_metrics',
'summary_metrics',
'variant_calling_detail_metrics',
'variant_calling_summary_metrics',
'wgs_metrics',
'preBqsr.selfSM',
]
},
),
]
}
# Metrics we extract from MultiQC and put into Sequence.meta and Analysis
QC_METRICS = [
# id, multiqc id
('freemix', 'FREEMIX'),
('median_coverage', 'MEDIAN_COVERAGE'),
('pct_chimeras', 'PCT_CHIMERAS'),
('pct_30x', 'PCT_30X'),
('pct_reads_aligned_in_pairs', 'PCT_READS_ALIGNED_IN_PAIRS'),
('percent_duplication', 'PERCENT_DUPLICATION'),
('median_insert_size', 'summed_median'),
]
# Only process the following sources:
SOURCES_TO_PROCESS = [
'QC',
# 'GVCF',
# 'CRAM',
]
@dataclass
class Sample:
"""
Represent a parsed sample, so we can check that all required files for
a sample exist, and also populate and fix sample IDs.
"""
nagim_id: str
cpg_id: Optional[str] = None
ext_id: Optional[str] = None
project_id: Optional[str] = None
# File paths indexed by Source and file ending
files: Dict[Tuple[str, str], str] = field(default_factory=dict)
gvcf: Optional[str] = None
tbi: Optional[str] = None
cram: Optional[str] = None
crai: Optional[str] = None
cram_md5: Optional[str] = None
# File paths indexed by ending
qc_files: Dict[str, str] = field(default_factory=dict)
# QC stats indexed by ending
qc_values: Dict[str, str] = field(default_factory=dict)
@click.group()
def cli():
"""
Click group to handle multiple CLI commands defined further
"""
@cli.command()
@click.option('--tmp-dir', 'tmp_dir')
@click.option('--use-batch', is_flag=True, help='Use a Batch job to transfer data')
@click.option('--dry-run', 'dry_run', is_flag=True)
def transfer(
tmp_dir,
use_batch: bool,
dry_run: bool,
):
"""
Transfer data from the Terra workspaces to the GCP bucket. Must be run with
a personal account, because the read permissions to Terra buckets match
to the Terra user emails for whom the workspace is sharred, so Hail service
acounts won't work here.
"""
if not tmp_dir:
tmp_dir = tempfile.gettempdir()
if use_batch:
hbatch = setup_batch(
title='Transferring NAGIM data',
keep_scratch=False,
tmp_bucket=f'cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-tmp',
analysis_project_name=NAGIM_PROJ_ID,
)
else:
hbatch = None
for source in SOURCES_TO_PROCESS:
SOURCES[source].transfer(hbatch)
if use_batch:
hbatch.run(wait=True, dry_run=dry_run)
if dry_run:
return
samples = _parse_sample_project_map(SAMPLE_TO_PROJECT_TSV_PATH)
# Find GVCFs, CRAMs and other files after transferring, and checks that all
# of them have corresponding tbi/crai/md5.
_find_upload_files(
samples,
tmp_dir,
overwrite=True, # Force finding files
)
def _find_upload_files(samples: List[Sample], tmp_dir, overwrite=False):
"""
Populate fields for each sample and verify that every sample has an expected
set of files.
"""
sample_by_sid = {s.nagim_id: s for s in samples}
# Find files
for source_name in SOURCES_TO_PROCESS:
source = SOURCES[source_name]
for ending in source.search_pattern_by_ending:
paths = _cache_bucket_ls(
ending_to_search=ending,
source_bucket=source.get_upload_bucket(ending),
tmp_dir=tmp_dir,
overwrite=overwrite,
)
for path in paths:
assert path.endswith(f'.{ending}')
sid = basename(path)[: -len(f'.{ending}')]
if sid not in sample_by_sid:
continue
sample_by_sid[sid].files[(source.name, ending)] = path
# Tally found files
for source_name in SOURCES_TO_PROCESS:
source = SOURCES[source_name]
for ending in source.search_pattern_by_ending:
found_samples = len(
[s for s in sample_by_sid.values() if (source.name, ending) in s.files]
)
logger.info(
f'Found {found_samples}/{len(sample_by_sid)} '
f'{source.name}/{ending} files'
)
# For each sample, verify that the set of found files is consistent
for sample in sample_by_sid.values():
if 'GVCF' in SOURCES_TO_PROCESS:
sample.gvcf = sample.files.get(('GVCF', 'hard-filtered.g.vcf.gz'))
sample.tbi = sample.files.get(('GVCF', 'hard-filtered.g.vcf.gz.tbi'))
if sample.gvcf and not sample.tbi:
logger.warning(f'Found GVCF without TBI: {sample.nagim_id}')
elif sample.tbi and not sample.gvcf:
logger.warning(f'Found TBI without GVCF: {sample.nagim_id}')
elif not sample.gvcf:
logger.warning(f'Not found GVCF: {sample.nagim_id}')
if 'CRAM' in SOURCES_TO_PROCESS:
sample.cram = sample.files.get((SOURCES['CRAM'].name, 'cram'))
sample.crai = sample.files.get((SOURCES['CRAM'].name, 'cram.crai'))
sample.cram_md5 = sample.files.get((SOURCES['CRAM'].name, 'cram.md5'))
if sample.cram and not sample.crai:
logger.warning(f'Found CRAM without CRAI: {sample.nagim_id}')
if sample.cram and not sample.cram_md5:
logger.warning(f'Found CRAM without md5: {sample.nagim_id}')
if sample.crai and not sample.cram:
logger.warning(f'Found CRAI without CRAM: {sample.nagim_id}')
if 'QC' in SOURCES_TO_PROCESS:
for qc_ending in [
'alignment_summary_metrics',
'duplicate_metrics',
'insert_size_metrics',
'preBqsr.selfSM',
'wgs_metrics',
]:
no_qc = 0
key = (SOURCES['QC'].name, qc_ending)
if not sample.files.get(key):
if sample.gvcf:
logger.warning(
f'Found GVCF without QC {qc_ending}: {sample.nagim_id}'
)
no_qc += 1
continue
if no_qc:
logger.warning(f'Not found QC {qc_ending} for {no_qc} samples')
sample.qc_files[qc_ending] = sample.files[key]
@cli.command()
@click.option('--tmp-dir', 'tmp_dir')
@click.option(
'--confirm', is_flag=True, help='Confirm with user input before updating server'
)
@click.option('--dry-run', 'dry_run', is_flag=True)
@click.option(
'--overwrite-multiqc',
'overwrite_multiqc',
is_flag=True,
help='Redo MultiQC even if report/json exist',
)
@click.option(
'--skip-checking-objects',
'skip_checking_objects',
is_flag=True,
help='Do not check objects on buckets (existence, size, md5)',
)
def parse(
tmp_dir,
confirm: bool,
dry_run: bool,
overwrite_multiqc: bool,
skip_checking_objects: bool,
):
"""
Assuming the data is transferred to the CPG bucket, populate the SM projects.
"""
if not tmp_dir:
tmp_dir = tempfile.gettempdir()
samples = _parse_sample_project_map(SAMPLE_TO_PROJECT_TSV_PATH)
# Find GVCFs, CRAMs and other files after transferring, and checks that all
# of them have corresponding tbi/crai/md5.
_find_upload_files(samples, tmp_dir)
# Some samples processed with Terra use CPG IDs, checking if we already
# have them in the SMDB and fixing the external IDs.
_fix_sample_ids(samples)
multiqc_html_path = join(
f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-web/qc/multiqc.html'
)
multiqc_json_path = join(
f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-analysis/qc/multiqc_data.json'
)
if 'QC' in SOURCES_TO_PROCESS:
logger.info('Running MultiQC on QC files')
parsed_json_fpath = _run_multiqc(
samples, multiqc_html_path, multiqc_json_path, overwrite=overwrite_multiqc
)
gfs = gcsfs.GCSFileSystem()
with gfs.open(parsed_json_fpath) as f:
row_by_sample = json.load(f)
for s in samples:
if s.nagim_id in row_by_sample:
s.qc_values = row_by_sample[s.nagim_id]
# Creating a parser for each project separately, because `sample_metadata_project`
# is an initialization parameter, and we want to write to multiple projects.
for proj in PROJECT_ID_MAP.values():
sm_proj = _get_sm_proj_id(proj)
sample_tsv_file = join(tmp_dir, f'sm-nagim-parser-samples-{sm_proj}.csv')
rows = []
for s in samples:
if s.project_id != proj:
continue
row = dict(
cpg_id=s.cpg_id,
ext_id=s.ext_id,
gvcf=s.gvcf,
cram=s.cram,
project=s.project_id,
)
for metric, val in s.qc_values.items():
row[f'qc_value_{metric}'] = val
rows.append(row)
if len(rows) == 0:
logger.info(f'No samples for project {sm_proj} found, skipping')
continue
df = pd.DataFrame(rows)
df.to_csv(sample_tsv_file, index=False)
logger.info(
f'Processing {len(df)} samples for project {sm_proj}, '
f'sample manifest: {sample_tsv_file}'
)
parser = NagimParser(
path_prefix=None,
sample_metadata_project=sm_proj,
skip_checking_gcs_objects=skip_checking_objects,
verbose=False,
multiqc_html_path=multiqc_html_path,
multiqc_json_path=multiqc_json_path,
)
with open(sample_tsv_file) as f:
parser.parse_manifest(f, dry_run=dry_run, confirm=confirm)
def _run_multiqc(
samples: List[Sample],
html_fpath: str,
json_fpath: str,
overwrite: bool = False,
) -> str:
"""
Runs MultiQC on QC files from Picard and VerifyBAMID.
Generates an HTML report and puts in into nagim web bucket.
Generates a JSON with metrics, extracts useful metrics into another JSON
indexed by sample, and returns path to this JSON.
"""
tmp_bucket = f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-tmp/qc'
row_by_sample_json_path = f'{tmp_bucket}/parsed-qc.json'
if can_reuse(row_by_sample_json_path, overwrite):
return row_by_sample_json_path
b = setup_batch(
title='Run MultiQC on NAGIM',
keep_scratch=False,
tmp_bucket=f'cpg-{NAGIM_PROJ_ID}-main-tmp',
analysis_project_name=NAGIM_PROJ_ID,
)
if not can_reuse([json_fpath, html_fpath], overwrite):
j = b.new_job('Run MultiQC')
j.image(DRIVER_IMAGE)
qc_endings = set()
qc_paths = []
for s in samples:
for qc_ending, qc_path in s.qc_files.items():
qc_paths.append(qc_path)
qc_endings.add(qc_ending)
file_list_path = f'{tmp_bucket}/multiqc-file-list.txt'
df = | pd.DataFrame({'_': path} for path in qc_paths) | pandas.DataFrame |
from sklearn.metrics import confusion_matrix, f1_score, roc_curve
import numpy as np
import pandas as pd
class analysis:
def __init__(self):
pass
def _getComplexParams(self, abs=False):
"""
Function for extracting the data associated with
the second component of the complex source.
To call:
_getComplexParams(abs)
Parameters:
abs Take the absolute value of the difference
Postcondition:
The flux of the second component, the difference
in phases and depth between the two components,
and the noise value are stored in the data
frame "self.dfComplex_"
The model's predicted probability that
the source is complex is also stored.
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 1)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux of the second component
# ===================================================
flux = self.testFlux_[loc]
flux = np.asarray([f[1] for f in flux])
# ===================================================
# Compute the difference in phases
# ===================================================
chi = self.testChi_[loc]
chi = np.asarray([c[1] - c[0] for c in chi])
if abs: chi = np.abs(chi)
# ===================================================
# Compute the difference in Faraday depths
# ===================================================
depth = self.testDepth_[loc]
depth = np.asarray([d[1] - d[0] for d in depth])
if abs: depth = np.abs(depth)
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = pd.Series(prob, name='prob')
sig = pd.Series(sig, name="sig")
# ===================================================
# Store the results in a dataframe
# ===================================================
self.dfComplex_ = pd.concat([chi, depth, flux, prob, sig], axis=1)
def _getSimpleParams(self):
"""
Function for extracting the data associated with
the simple sources.
To call:
_getSimpleParams()
Parameters:
None
Postcondition:
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 0)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux
# ===================================================
flux = self.testFlux_[loc]
# ===================================================
# Extract the phase
# ===================================================
chi = self.testChi_[loc]
# ===================================================
# Extract the Faraday depth
# ===================================================
depth = self.testDepth_[loc]
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = | pd.Series(prob, name='prob') | pandas.Series |
import pandas as pd
import numpy as np
import scipy.stats as stats
import xarray as xr
def min_subtract(traces):
return traces - traces.min(axis=1).reshape(-1,1)
def baseline_subtract(cut_traces, baseline_length):
baseline = cut_traces[:,:,:baseline_length].mean(axis=2)
psths_baselined = cut_traces - baseline.reshape(*cut_traces.shape[:2], 1)
return psths_baselined
def percentile_dff(traces, q=10):
f0s = np.percentile(traces, q, axis=1, keepdims=True)
traces = (traces-f0s)/f0s
return traces
def rolling_baseline_dff(traces, q=10, window=300):
f0s = pd.DataFrame(traces.T).rolling(window, min_periods=1, center=True).quantile(q/100)
f0s = f0s.values.T
traces = (traces-f0s)/f0s
return traces
def make_trialwise(traces, trial_lengths):
traces = np.split(traces, np.cumsum(trial_lengths[:-1]), axis=1)
shortest = min(map(lambda x: x.shape[1], traces))
traces = np.array([a[:, :shortest] for a in traces])
return traces
def stim_align_trialwise(trialwise_traces, times_trial, new_start):
"""
Aligns to stimulus onset that is synchronous for all cells (eg. visual stimulus). Takes
trialwise data (eg. trial x cell x time array) and rolls data around to other side array.
Use stim_align_cellwise for stimuli that are specific to each cell (eg. holographic stimulus
like a stim-test).
Args:
trialwise_traces (array-like): trial x cell x time array of traces data, typicall from make_trialwise
times (array-like): list of stim times for each cell, must match exactly, not sure how it
handles nans yet...
new_start (int): frame number where the psths will be aligned to
"""
psth = np.zeros_like(trialwise_traces)
for i in range(trialwise_traces.shape[0]):
psth[i,:,:] = np.roll(trialwise_traces[i,:,:], -int(times_trial[i])+new_start, axis=1)
return psth
def stim_align_cellwise(trialwise_traces, times_cell, new_start):
"""
Make stim-aligned PSTHs from trialwise data (eg. trial x cell x time array). The
advantage of doing it this way (trialwise) is the trace for each cell gets rolled around
to the other side of the array, thus eliminating the need for nan padding.
Args:
trialwise_traces (array-like): trial x cell x time array of traces data, typicall from make_trialwise
times (array-like): list of stim times for each cell, must match exactly, not sure how it
handles nans yet...
new_start (int): frame number where the psths will be aligned to
"""
psth = np.zeros_like(trialwise_traces)
for i in range(trialwise_traces.shape[0]):
psth[i,:,:] = np.array([np.roll(cell_trace, -amt+new_start) for cell_trace, amt in zip(trialwise_traces[i,:,:], times_cell.astype(int))])
return psth
def cut_psths(stim_aligned, length=25):
cut_psths = stim_aligned[:,:,:length]
return cut_psths
def make_dataframe(traces, fr, stim_id, stim_name='trial_id'):
# make the dataframe
df = xr.DataArray(traces.T).to_dataset(dim='dim_0').to_dataframe()
df = df.reset_index(level=['dim_1', 'dim_2'])
df = | pd.melt(df, ('dim_1', 'dim_2')) | pandas.melt |
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from datetime import datetime
import pytest
from pandas import read_sql_query, DataFrame
from edfi_canvas_extractor.api.enrollments import _sync_without_cleanup
from edfi_lms_extractor_lib.api.resource_sync import (
SYNC_COLUMNS_SQL,
SYNC_COLUMNS,
add_hash_and_json_to,
add_sourceid_to,
)
from tests.api.api_helper import prep_expected_sync_df, prep_from_sync_db_df
IDENTITY_COLUMNS = ["id"]
COLUMNS = [
"id",
"user_id",
"course_id",
"type",
"created_at",
"created_at_date",
"updated_at",
"updated_at_date",
"associated_user_id",
"start_at",
"end_at",
"course_section_id",
"root_account_id",
"limit_privileges_to_course_section",
"enrollment_state",
"role",
"role_id",
"last_activity_at",
"last_attended_at",
"total_activity_time",
"sis_import_id",
"grades",
"sis_account_id",
"sis_course_id",
"course_integration_id",
"sis_section_id",
"section_integration_id",
"sis_user_id",
"html_url",
"user",
"last_activity_at_date",
]
CHANGED_ENROLLMENT_BEFORE = [
"1",
"Changed Enrollment Before",
"11",
"111",
"1111",
"2020-11-01",
"11111",
"111111",
"1111111",
"2020-11-01",
"11111111",
"111111111",
"1111111111",
"11111111111",
"111111111111",
"1111111111111",
"11111111111111",
"111111111111111",
"1111111111111111",
"11111111111111111",
"111111111111111111",
"1111111111111111111",
"11111111111111111111",
"111111111111111111111",
"1111111111111111111111",
"11111111111111111111111",
"111111111111111111111111",
"1111111111111111111111111",
"11111111111111111111111111",
"111111111111111111111111111",
"1111111111111111111111111111",
]
CHANGED_ENROLLMENT_AFTER = [
"1",
"*Changed Enrollment After*",
"11",
"111",
"1111",
"2020-11-01",
"11111",
"111111",
"1111111",
"2020-11-01",
"11111111",
"111111111",
"1111111111",
"11111111111",
"111111111111",
"1111111111111",
"11111111111111",
"111111111111111",
"1111111111111111",
"11111111111111111",
"111111111111111111",
"1111111111111111111",
"11111111111111111111",
"111111111111111111111",
"1111111111111111111111",
"11111111111111111111111",
"111111111111111111111111",
"1111111111111111111111111",
"11111111111111111111111111",
"111111111111111111111111111",
"1111111111111111111111111111",
]
UNCHANGED_ENROLLMENT = [
"2",
"Unchanged Enrollment",
"22",
"222",
"2222",
"2020-01-02",
"22222",
"222222",
"2222222",
"2020-02-02",
"22222222",
"222222222",
"2222222222",
"22222222222",
"222222222222",
"2222222222222",
"22222222222222",
"222222222222222",
"2222222222222222",
"22222222222222222",
"222222222222222222",
"2222222222222222222",
"22222222222222222222",
"222222222222222222222",
"2222222222222222222222",
"22222222222222222222222",
"222222222222222222222222",
"2222222222222222222222222",
"22222222222222222222222222",
"222222222222222222222222222",
"2222222222222222222222222222",
]
OMITTED_FROM_SYNC_ENROLLMENT = [
"3",
"Omitted From Sync Enrollment",
"33",
"333",
"3333",
"2020-01-03",
"33333",
"333333",
"3333333",
"2020-03-03",
"33333333",
"333333333",
"3333333333",
"33333333333",
"333333333333",
"3333333333333",
"33333333333333",
"333333333333333",
"3333333333333333",
"33333333333333333",
"333333333333333333",
"3333333333333333333",
"33333333333333333333",
"333333333333333333333",
"3333333333333333333333",
"33333333333333333333333",
"333333333333333333333333",
"3333333333333333333333333",
"33333333333333333333333333",
"333333333333333333333333333",
"3333333333333333333333333333",
]
NEW_ENROLLMENT = [
"4",
"New Enrollment",
"44",
"444",
"4444",
"2020-01-04",
"44444",
"444444",
"4444444",
"2020-04-04",
"44444444",
"444444444",
"4444444444",
"44444444444",
"444444444444",
"4444444444444",
"44444444444444",
"444444444444444",
"4444444444444444",
"44444444444444444",
"444444444444444444",
"4444444444444444444",
"44444444444444444444",
"444444444444444444444",
"4444444444444444444444",
"44444444444444444444444",
"444444444444444444444444",
"4444444444444444444444444",
"44444444444444444444444444",
"444444444444444444444444444",
"4444444444444444444444444444",
]
SYNC_DATA = [CHANGED_ENROLLMENT_AFTER, UNCHANGED_ENROLLMENT, NEW_ENROLLMENT]
def describe_when_testing_sync_with_new_and_missing_and_updated_rows():
@pytest.fixture
def test_db_after_sync(test_db_fixture):
# arrange
INITIAL_ENROLLMENT_DATA = [
CHANGED_ENROLLMENT_BEFORE,
UNCHANGED_ENROLLMENT,
OMITTED_FROM_SYNC_ENROLLMENT,
]
enrollments_initial_df = DataFrame(INITIAL_ENROLLMENT_DATA, columns=COLUMNS)
enrollments_initial_df = add_hash_and_json_to(enrollments_initial_df)
add_sourceid_to(enrollments_initial_df, IDENTITY_COLUMNS)
dateToUse = datetime(2020, 9, 14, 12, 0, 0)
enrollments_initial_df["SyncNeeded"] = 0
enrollments_initial_df["CreateDate"] = dateToUse
enrollments_initial_df["LastModifiedDate"] = dateToUse
enrollments_initial_df = enrollments_initial_df[SYNC_COLUMNS]
enrollments_sync_df = DataFrame(SYNC_DATA, columns=COLUMNS)
with test_db_fixture.connect() as con:
con.execute("DROP TABLE IF EXISTS Enrollments")
con.execute(
f"""
CREATE TABLE IF NOT EXISTS Enrollments (
{SYNC_COLUMNS_SQL}
)
"""
)
enrollments_initial_df.to_sql(
"Enrollments", test_db_fixture, if_exists="append", index=False, chunksize=1000
)
# act
_sync_without_cleanup(enrollments_sync_df, test_db_fixture)
return test_db_fixture
def it_should_have_enrollments_table_with_updated_row_and_added_new_row(
test_db_after_sync,
):
EXPECTED_ENROLLMENT_DATA_AFTER_SYNC = [
UNCHANGED_ENROLLMENT,
OMITTED_FROM_SYNC_ENROLLMENT,
CHANGED_ENROLLMENT_AFTER,
NEW_ENROLLMENT,
]
with test_db_after_sync.connect() as con:
expected_enrollments_df = prep_expected_sync_df(
| DataFrame(EXPECTED_ENROLLMENT_DATA_AFTER_SYNC, columns=COLUMNS) | pandas.DataFrame |
from __future__ import print_function, absolute_import
import argparse
import sqlite3
from collections import defaultdict
import re
import copy
from rdkit import Chem
from .do_fragment import parse_salt_remover
from . import command_support
from . import do_fragment
from . import fragment_algorithm
from . import index_algorithm
from . import environment
from . import smiles_syntax
from . import schema
from .config import DEFAULT_RULE_SELECTION_OPTIONS
from . import _compat
from . import config
import pandas as pd
class EvalError(Exception):
pass
# =====================================
def positive_int(value):
try:
value = int(value)
except ValueError:
raise argparse.ArgumentTypeError("must be a positive integer")
if value <= 0:
raise argparse.ArgumentTypeError("must be a positive integer")
return value
# =====================================
def nonnegative_int(value):
try:
value = int(value)
except ValueError:
raise argparse.ArgumentTypeError("must be a positive integer or zero")
if not (value >= 0):
raise argparse.ArgumentTypeError("must be a positive integer or zero")
return value
# =====================================
def open_database(dbfile):
conn = None
try:
conn = sqlite3.connect(dbfile)
return conn
except:
print("Problem in opening database file, exiting for now")
exit()
# =====================================
def get_cursor(db_conn):
return db_conn.cursor()
# =====================================
def get_fragment_options_from_args(args):
return config.FragmentOptions(
max_heavies=args.tmax_heavies,
max_rotatable_bonds=args.tmax_rotatable_bonds,
rotatable_smarts=args.trotatable_smarts, # otherwise it's unicode
cut_smarts=args.tcut_smarts, # otherwise it's unicode
num_cuts=args.tnum_cuts,
salt_remover=args.tsalt_remover,
method="chiral",
min_heavies_per_const_frag=args.tmin_heavies_per_const_frag,
min_heavies_total_const_frag=args.tmin_heavies_total_const_frag
)
# =====================================
def _get_one_or_none(result):
for row in result:
return row[0]
return None
# =====================================
def _get_all(result):
rows = []
for row in result:
rows.append(row)
return rows
# =====================================
def get_rule_smiles_id(smiles, cursor=None):
c = cursor.execute(
"SELECT id FROM fragment_smi WHERE fragsmi = ?", (smiles,))
return _get_one_or_none(c)
# =====================================
def get_fingerprint_id(fingerprint, cursor=None):
c = cursor.execute(
"SELECT id FROM environment_fp WHERE envfp = ?", (fingerprint,))
return _get_one_or_none(c)
# =====================================
def find_rule_environments_for_transform(smiles_id, possible_env_fps_ids, cursor, min_pairs=10, is_db_symmetric=False):
# (lhs, rhs, envfp, envsmi, frequency)
matching_rows = []
if is_db_symmetric:
# TO DO HANDLE SYMMETRIC DB
print("Symmetric db handling not implemented yet")
else:
assert len(possible_env_fps_ids) > 0, possible_env_fps_ids
for env_id in possible_env_fps_ids:
c = cursor.execute(
"select lhs_frag, rhs_frag, envfp, envsmi, frequency from transformations where lhs_frag == ? and envfp == ? and frequency >= ?",
(smiles_id, env_id, min_pairs))
matching_rows = matching_rows + _get_all(c)
# c = cursor.execute(
# "select rhs_frag, lhs_frag, envfp, envsmi, frequency from transformations where rhs_frag == ? and envfp == ? and frequency == ?",
# (smiles_id, env_id, min_pairs))
#matching_rows = matching_rows + _get_all(c)
if len(matching_rows) == 0:
return matching_rows
# Now getting the lhs, rhs smiles and envsmi smiles. First make unique list and then get smi
fragsmi_id = {row[0]: row[0] for row in matching_rows}
for row in matching_rows:
fragsmi_id[row[1]] = row[1]
envsmi_id = {row[3]: row[3] for row in matching_rows}
sql = "select id, fragsmi from fragment_smi where id == " + " or id == ".join(["?"] * len(fragsmi_id))
result = cursor.execute(sql, tuple(fragsmi_id.values()))
id_to_lhsORrhs_smi = {row[0]: row[1] for row in result}
sql = "select id, envsmi from environment_smi where id == " + " or id == ".join(["?"] * len(envsmi_id))
result = cursor.execute(sql, tuple(envsmi_id.values()))
id_to_envsmi = {row[0]: row[1] for row in result}
# final result preparation
matching_rows_output = []
for row in matching_rows:
lhs = id_to_lhsORrhs_smi[row[0]]
rhs = id_to_lhsORrhs_smi[row[1]]
envfp = row[2]
envsmi = id_to_envsmi[row[3]]
freq = row[4]
matching_rows_output.append((lhs, rhs, envfp, envsmi, freq))
return matching_rows_output
# =====================================
class Tool(object):
def __init__(self,
db_connection, fragment_options, fragment_filter):
self.db_connection = db_connection
self.fragment_options = fragment_options
self.fragment_filter = fragment_filter
# =====================================
def _get_tool(klass, db_connection, args):
fragment_options = get_fragment_options_from_args(args)
fragment_filter = do_fragment.get_fragment_filter(fragment_options)
return klass(
db_connection=db_connection,
fragment_options=fragment_options,
fragment_filter=fragment_filter
)
# =====================================
def get_transform_tool(
db_connection, args
):
return _get_tool(TransformTool, db_connection, args)
# =====================================
class TransformTool(Tool):
def fragment_transform_smiles(self, smiles):
# Figure out how I'm going to fragment the input --smiles
if "[H]" in smiles:
# User-specified transform location
record = do_fragment.make_hydrogen_fragment_record("query", smiles, self.fragment_filter)
else:
record = do_fragment.make_fragment_record_from_smiles(smiles, self.fragment_filter)
return record
def transform(self, transform_fragments, radius=0, min_pairs=0, min_variable_size=0,
max_variable_size=9999, min_constant_size=0,
substructure_pat=None,
pool=None, is_symmetric=False):
cursor = get_cursor(self.db_connection)
return make_transform(
self.db_connection, transform_fragments,
substructure_pat=substructure_pat,
radius=radius, min_pairs=min_pairs,
min_variable_size=min_variable_size, max_variable_size=max_variable_size,
min_constant_size=min_constant_size,
pool=pool,
cursor=cursor, is_symmetric=is_symmetric)
def expand_variable_symmetry(self, transform_record):
# Expand fragmentations of transform where the variable part is symmetric
symmetry_fragments = []
for fragment in transform_record.fragments:
if fragment.num_cuts == 1:
continue # No symmetry here
elif fragment.num_cuts == 2 and fragment.variable_symmetry_class == "11":
if fragment.constant_symmetry_class == "11":
continue # Both variable and constant are symmetric
new_fragment = copy.copy(fragment)
frag1, frag2 = new_fragment.constant_smiles.split(".")
new_fragment.constant_smiles = frag2 + "." + frag1
symmetry_fragments.append(new_fragment)
elif fragment.num_cuts == 3 and fragment.variable_symmetry_class == '111':
new_fragment = copy.copy(fragment)
frag1, frag2, frag3 = new_fragment.constant_smiles.split(".")
new_fragment.constant_smiles = frag1 + "." + frag3 + "." + frag2
symmetry_fragments.append(new_fragment)
new_fragment = copy.copy(fragment)
new_fragment.constant_smiles = frag2 + "." + frag1 + "." + frag3
symmetry_fragments.append(new_fragment)
new_fragment = copy.copy(fragment)
new_fragment.constant_smiles = frag2 + "." + frag3 + "." + frag1
symmetry_fragments.append(new_fragment)
new_fragment = copy.copy(fragment)
new_fragment.constant_smiles = frag3 + "." + frag1 + "." + frag2
symmetry_fragments.append(new_fragment)
new_fragment = copy.copy(fragment)
new_fragment.constant_smiles = frag3 + "." + frag2 + "." + frag1
symmetry_fragments.append(new_fragment)
elif fragment.num_cuts == 3 and fragment.variable_symmetry_class == '112':
change_idx1, change_idx2 = int(fragment.attachment_order[0]), int(fragment.attachment_order[1])
keep_idx = int(fragment.attachment_order[2])
new_fragment = copy.copy(fragment)
frags = new_fragment.constant_smiles.split(".")
new_frags = ['', '', '']
new_frags[keep_idx] = frags[keep_idx]
new_frags[change_idx1] = frags[change_idx2]
new_frags[change_idx2] = frags[change_idx1]
new_fragment.constant_smiles = new_frags[0] + "." + new_frags[1] + "." + new_frags[2]
symmetry_fragments.append(new_fragment)
elif fragment.num_cuts == 3 and fragment.variable_symmetry_class == '121':
change_idx1, change_idx2 = int(fragment.attachment_order[0]), int(fragment.attachment_order[2])
keep_idx = int(fragment.attachment_order[1])
new_fragment = copy.copy(fragment)
frags = new_fragment.constant_smiles.split(".")
new_frags = ['', '', '']
new_frags[keep_idx] = frags[keep_idx]
new_frags[change_idx1] = frags[change_idx2]
new_frags[change_idx2] = frags[change_idx1]
new_fragment.constant_smiles = new_frags[0] + "." + new_frags[1] + "." + new_frags[2]
symmetry_fragments.append(new_fragment)
elif fragment.num_cuts == 3 and fragment.variable_symmetry_class == '122':
change_idx1, change_idx2 = int(fragment.attachment_order[1]), int(fragment.attachment_order[2])
keep_idx = int(fragment.attachment_order[0])
new_fragment = copy.copy(fragment)
frags = new_fragment.constant_smiles.split(".")
new_frags = ['', '', '']
new_frags[keep_idx] = frags[keep_idx]
new_frags[change_idx1] = frags[change_idx2]
new_frags[change_idx2] = frags[change_idx1]
new_fragment.constant_smiles = new_frags[0] + "." + new_frags[1] + "." + new_frags[2]
symmetry_fragments.append(new_fragment)
for frag in symmetry_fragments:
transform_record.fragments.append(frag)
return transform_record
# Enumerate all of the ways that the canonical unlabeled SMILES
# might be turned into a non-canonical labeled SMILES.
_bracket_wildcard_pat = re.compile(re.escape("[*]"))
_organic_wildcard_pat = re.compile(re.escape("*"))
def enumerate_permutations(smiles, is_symmetric=False):
# RDKit pre-2018 used "[*]"; this changed to using a bare "*".
if "[*]" in smiles:
wildcard_pat = _bracket_wildcard_pat
wildcard = "[*]"
elif "*" in smiles:
wildcard_pat = _organic_wildcard_pat
wildcard = "*"
n = smiles.count("*")
if n == 1:
yield "1", smiles.replace(wildcard, "[*:1]")
return
if n == 2:
sub_terms = ["[*:2]", "[*:1]"]
yield "12", wildcard_pat.sub(lambda pat: sub_terms.pop(), smiles)
if is_symmetric:
return
sub_terms = ["[*:1]", "[*:2]"]
yield "21", wildcard_pat.sub(lambda pat: sub_terms.pop(), smiles)
return
if n == 3:
sub_terms = ["[*:3]", "[*:2]", "[*:1]"]
yield "123", wildcard_pat.sub(lambda pat: sub_terms.pop(), smiles)
if is_symmetric:
return
sub_terms = ["[*:2]", "[*:3]", "[*:1]"]
yield "132", wildcard_pat.sub(lambda pat: sub_terms.pop(), smiles)
sub_terms = ["[*:3]", "[*:1]", "[*:2]"]
yield "213", wildcard_pat.sub(lambda pat: sub_terms.pop(), smiles)
sub_terms = ["[*:1]", "[*:3]", "[*:2]"]
yield "231", wildcard_pat.sub(lambda pat: sub_terms.pop(), smiles)
sub_terms = ["[*:2]", "[*:1]", "[*:3]"]
yield "312", wildcard_pat.sub(lambda pat: sub_terms.pop(), smiles)
sub_terms = ["[*:1]", "[*:2]", "[*:3]"]
yield "321", wildcard_pat.sub(lambda pat: sub_terms.pop(), smiles)
return
raise AssertionError(smiles)
# The LHS only has "*", the RHS has "*:1", "*:2", ...
_weld_cache = {}
def weld_fragments(frag1, frag2):
key = (frag1, frag2)
value = _weld_cache.get(key, None)
if value is not None:
return value
# Also cache the lhs and rhs parts because they can be reused.
# (It's about 4% faster overall runtime on one test.)
frag1_closures = _weld_cache.get(frag1, None)
if frag1_closures is None:
frag1_closures = smiles_syntax.convert_wildcards_to_closures(frag1, [1, 2, 3])
_weld_cache[frag1] = frag1_closures
frag2_closures = _weld_cache.get(frag2, None)
if frag2_closures is None:
frag2_closures = smiles_syntax.convert_labeled_wildcards_to_closures(frag2)
_weld_cache[frag2] = frag2_closures
welded_mol = Chem.MolFromSmiles(frag1_closures + "." + frag2_closures)
assert welded_mol is not None, (frag1, frag2, frag1_closures + "." + frag2_closures)
welded_smiles = Chem.MolToSmiles(welded_mol, isomericSmiles=True)
if len(_weld_cache) > 3000:
_weld_cache.clear()
_weld_cache[frag1] = frag1_closures
_weld_cache[frag2] = frag2_closures
value = (welded_smiles, welded_mol)
_weld_cache[key] = value
return value
def _weld_and_filter(item):
frag_constant_smiles, frag_variable_smiles, substructure_pat, row = item
lhs, other_variable_smiles, envfp, envsmi, frequency = row
product_smiles, new_mol = weld_fragments(frag_constant_smiles, str(other_variable_smiles))
if substructure_pat is not None:
# The input SMARTS can contain an explict [H],
# which in SMARTS only matches explicit hydrogens,
# not implicit hydrogens. It's easier to make all
# of the hydrogens explicit than it is to adjust
# any explicit [H] terms in the query.
test_mol = Chem.AddHs(new_mol)
passed_substructure_test = test_mol.HasSubstructMatch(substructure_pat)
else:
passed_substructure_test = True
return (frag_constant_smiles, frag_variable_smiles, row, product_smiles, passed_substructure_test)
def make_transform(
db_connection, transform_fragments,
substructure_pat=None,
radius=0, min_pairs=0, min_variable_size=0, min_constant_size=0,
max_variable_size=9999,
pool=None,
cursor=None, is_symmetric=False):
if cursor is None:
cursor = get_cursor(db_connection)
assert radius in (0, 1, 2, 3, 4, 5)
# Map from the destination SMILES to the set of rule environments
# The RHS set contains (rule_id, rule_environment_id, is_reversed) tuples.
output_table = {"transformed_smi": [], "constant_smi": [], "original_frag": [], "new_frag": [],
"envsmi": [], "rule_freq": []}
# Hold the welded molecules in case I need them for a substructure search
# For each variable fragment (single, double, or triple cut) and
# for each environment, extract all rules from the DB that start
# with the given fragment and that has the same environment as the
# query fragment (for all environment levels). Consider only
# environments with radius >= the radius given as input argument.
to_weld = []
# This includes the possible fragments of hydrogens
for frag in transform_fragments:
## Note on terminology:
# constant = [*]Br.[*]c1ccccc1
# variable = c1ccc(-c2sc(-c3ccc([*])cc3)pc2[*])cc1
print("Processing fragment %r", frag)
# Check if the fragmentation is allowed
if min_variable_size and frag.variable_num_heavies < min_variable_size:
print(" The %d heavy atoms of variable %r is below the --min-variable-size of %d. Skipping fragment.",
frag.variable_num_heavies, frag.variable_smiles, min_variable_size)
continue
if frag.variable_num_heavies > max_variable_size:
print(" The %d heavy atoms of variable %r is above the --max-variable-size of %d. Skipping fragment.",
frag.variable_num_heavies, frag.variable_smiles, max_variable_size)
continue
if min_constant_size and frag.constant_num_heavies < min_constant_size:
print(" The %d heavy atoms of constant %r is below the --min-constant-size of %d. Skipping fragment.",
frag.constant_num_heavies, frag.constant_smiles, min_constant_size)
continue
# XXX TODO: handle 'constant_with_H_smiles'?
# In case of multiple cuts, permute the constant smiles to match the attachment order
if frag.num_cuts > 1:
constant_fragments = frag.constant_smiles.split(".")
new_constant_smiles = constant_fragments[int(frag.attachment_order[0])]
new_constant_smiles += "." + constant_fragments[int(frag.attachment_order[1])]
if frag.num_cuts == 3:
new_constant_smiles += "." + constant_fragments[int(frag.attachment_order[2])]
frag.constant_smiles = new_constant_smiles
# The variable SMILES contains unlabeled attachment points, while the
# rule_smiles in the database contains labeled attachment points.
# The fragment [*]CO[*] can potentially match [*:1]CO[*:2] or [*:2]CO[*:1],
# so I need to enumerate all n! possibilities and find possible matches.
query_possibilities = []
for permutation, permuted_variable_smiles in enumerate_permutations(frag.variable_smiles, is_symmetric):
permuted_variable_smiles_id = get_rule_smiles_id(permuted_variable_smiles, cursor=cursor)
if permuted_variable_smiles_id is not None:
print(" variable %r matches SMILES %r (id %d)",
frag.variable_smiles, permuted_variable_smiles, permuted_variable_smiles_id)
query_possibilities.append((permutation, permuted_variable_smiles, permuted_variable_smiles_id))
else:
print(" variable %r not found as SMILES %r",
frag.variable_smiles, permuted_variable_smiles)
if not query_possibilities:
print(" No matching rule SMILES found. Skipping fragment.")
continue
print(" Evaluating %d possible rule SMILES: %s",
len(query_possibilities), sorted(x[0] for x in query_possibilities))
# We now have a canonical variable part, and the assignment to the constant part.
# Get the constant fingerprints.
all_center_fps = environment.compute_constant_center_fingerprints_atFixRadii(
frag.constant_smiles, radius)
# For each possible way to represent the variable SMILES:
# Find all of the pairs which use the same SMILES id as the variable
# (The pairs are ordered so the matching SMILES is the 'from' side of the transform)
# The transformed SMILES goes from variable+constant -> dest_smiles+constant
# so weld the destination SMILES (smi2) with the constant
for permutation, permuted_variable_smiles, permuted_variable_smiles_id in query_possibilities:
print(" Evaluate constant %r with permutation %r against rules using SMILES %s (%d)",
frag.constant_smiles, permutation, permuted_variable_smiles, permuted_variable_smiles_id)
possible_envs = environment.get_all_possible_fingerprints(
all_center_fps, frag.variable_symmetry_class, permutation)
envs_ids = [get_fingerprint_id(env, cursor) for env in possible_envs]
rows = find_rule_environments_for_transform(
permuted_variable_smiles_id, envs_ids, min_pairs=min_pairs, cursor=cursor)
to_weld.extend((frag.constant_smiles, frag.variable_smiles, substructure_pat, row) for row in rows)
if pool is None:
results = _compat.imap(_weld_and_filter, to_weld)
else:
# A chunk size of 20 seems to maximize performance.
# Too small and there's extra pickling overhead. (Larger chunks share the same SMARTS pickle.)
# Too large and only one process might be used for all of the welding.
results = pool.imap(_weld_and_filter, to_weld, 20)
for frag_constant_smiles, frag_variable_smiles, row, product_smiles, passed_substructure_test in results:
lhs, other_variable_smiles, envfp, envsmi, frequency = row
if not passed_substructure_test:
print(" Skip rule %r + %r -> %r; does not contain --substructure",
frag_constant_smiles, str(other_variable_smiles), product_smiles)
continue
output_table["transformed_smi"].append(product_smiles)
output_table["constant_smi"].append(frag_constant_smiles)
output_table["original_frag"].append(frag_variable_smiles)
output_table["new_frag"].append(other_variable_smiles)
output_table["envsmi"].append(envsmi)
output_table["rule_freq"].append(frequency)
df = | pd.DataFrame.from_dict(output_table) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from math import sqrt
from dramkit.gentools import power
from dramkit.gentools import isnull
from dramkit.gentools import cal_pct
from dramkit.gentools import x_div_y
from dramkit.gentools import check_l_allin_l0
from dramkit.gentools import get_update_kwargs
from dramkit.gentools import con_count_ignore
from dramkit.gentools import replace_repeat_func_iter
from dramkit.datetimetools import diff_days_date
from dramkit.logtools.utils_logger import logger_show
from dramkit.plottools.plot_common import plot_series
from dramkit.plottools.plot_common import plot_series_conlabel
#%%
def signal_merge(data, sig1_col, sig2_col, merge_type=1):
'''
两个信号合并成一个信号
Parameters
----------
data : pandas.DataFrame
待处理数据,必须包含 ``sig1_col`` 和 ``sig2_col`` 指定的列
sig1_col, sig2_col : str
指定信号列,值为-1表示买(做多),1表示卖(做空)
merge_type : int
设置信号合并方式:
- 1: 两个信号出现任何一个都算有效信号
- 2: 根据两个信号的持仓量叠加计算交易信号(返回信号不适用反向开仓)
- 3: 只有两个信号方向相同时才算交易信号(返回信号不适用反向开仓)
:returns: `pd.Series` - 合并之后的信号
'''
df = data.reindex(columns=[sig1_col, sig2_col])
df.rename(columns={sig1_col: 'sig1', sig2_col: 'sig2'},
inplace=True)
if merge_type == 1:
df['sig'] = df['sig1'] + df['sig2']
df['sig'] = df['sig'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
elif merge_type == 2:
df['hold1'] = df['sig1'].replace(0, np.nan)
df['hold1'] = df['hold1'].fillna(method='ffill').fillna(0)
df['hold2'] = df['sig2'].replace(0, np.nan)
df['hold2'] = df['hold2'].fillna(method='ffill').fillna(0)
df['hold'] = df['hold1'] + df['hold2']
df['trade'] = df['hold'].diff()
df['sig'] = df['trade'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
elif merge_type == 3:
df['hold1'] = df['sig1'].replace(0, np.nan)
df['hold1'] = df['hold1'].fillna(method='ffill').fillna(0)
df['hold2'] = df['sig2'].replace(0, np.nan)
df['hold2'] = df['hold2'].fillna(method='ffill').fillna(0)
df['hold'] = df['hold1'] + df['hold2']
df['hold'] = df['hold'].apply(lambda x: 1 if x == 2 else \
(-1 if x == -2 else 0))
df['trade'] = df['hold'].diff()
df['sig'] = df['trade'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
#%%
def cal_cost_add(hold_vol, hold_cost, add_vol, add_price):
'''
| 计算加仓之后的平均持仓成本
| hold_vol为加仓前持仓量,hold_cost为加仓前平均持仓成本,add_vol为加仓量
'''
holdCost = hold_vol * hold_cost
totCost = holdCost + add_vol * add_price
return totCost / (hold_vol + add_vol)
def get_mean_cost(trade_records, dirt_col, price_col, vol_col):
'''
根据交易记录计算每期持仓成本
Parameters
----------
trade_records : pd.DataFrame
交易记录数据,必须包含 ``dirt_col`` 、 ``price_col`` 和 `vol_col` 指定的列
dirt_col : str
买卖方向列,1为买入(做多),-1为卖出(做空)
price_col : str
成交价格列
vol_col : str
为成交量列
:returns: `pd.DataFrame` - 在trade_records上增加了'holdVol', 'holdCost', 'meanCost'三列
'''
df = trade_records.copy()
ori_idx = df.index
df.index = range(0, df.shape[0])
vol_col_ = vol_col + '_'
df[vol_col_] = df[dirt_col] * df[vol_col]
df['holdVol'] = df[vol_col_].cumsum().round(4)
df.loc[df.index[0], 'holdCost'] = df[price_col].iloc[0] * df[vol_col_].iloc[0]
df.loc[df.index[0], 'meanCost'] = df[price_col].iloc[0]
for k in range(1, df.shape[0]):
holdVol_pre = df['holdVol'].iloc[k-1]
holdCost_pre = df['holdCost'].iloc[k-1]
holdVol = df['holdVol'].iloc[k]
tradeVol = df[vol_col_].iloc[k]
if tradeVol == 0:
holdCost, meanCost = holdCost_pre, df['meanCost'].iloc[k-1]
elif holdVol == 0: # 平仓
holdCost, meanCost = 0, 0
elif holdVol_pre >= 0 and holdVol > holdVol_pre: # 买入开仓或加仓
tradeVal = df[vol_col_].iloc[k] * df[price_col].iloc[k]
holdCost = holdCost_pre + tradeVal
meanCost = holdCost / holdVol
elif holdVol_pre >= 0 and holdVol > 0 and holdVol < holdVol_pre: # 买入减仓
meanCost = df['meanCost'].iloc[k-1]
holdCost = meanCost * holdVol
elif holdVol_pre >= 0 and holdVol < 0: # 买入平仓反向卖出
meanCost = df[price_col].iloc[k]
holdCost = holdVol * meanCost
elif holdVol_pre <= 0 and holdVol < holdVol_pre: # 卖出开仓或加仓
tradeVal = df[vol_col_].iloc[k] * df[price_col].iloc[k]
holdCost = holdCost_pre + tradeVal
meanCost = holdCost / holdVol
elif holdVol_pre <= 0 and holdVol < 0 and holdVol > holdVol_pre: # 卖出减仓
meanCost = df['meanCost'].iloc[k-1]
holdCost = meanCost * holdVol
elif holdVol_pre <= 0 and holdVol > 0: # 卖出平仓反向买入
meanCost = df[price_col].iloc[k]
holdCost = holdVol * meanCost
df.loc[df.index[k], 'holdCost'] = holdCost
df.loc[df.index[k], 'meanCost'] = meanCost
df.index = ori_idx
return df
#%%
def cal_gain_con_futures(price_open, price_now, n, player,
fee=0.1/100, lever=100,
n_future2target=0.001):
'''
永续合约收益计算,如火币BTC合约
Parameters
----------
price_open : float
开仓价格
price_now : float
现价
n : int
数量(张)
player : str
做空或做多
fee : float
手续费比例
lever : int
杠杆
n_future2target : float
一份合约对应的标的数量
Returns
-------
gain_lever : float
盈亏金额
gain_pct : float
盈亏比例
'''
if n == 0:
return 0, 0
b_name = ['buyer', 'Buyer', 'b', 'B', 'buy', 'Buy']
s_name = ['seller', 'Seller', 'seler', 'Seler', 's', 'S', 'sell',
'Sell', 'sel', 'Sel']
price_cost_ = price_open * n_future2target / lever
price_now_ = price_now * n_future2target / lever
if player in b_name:
Cost = price_cost_ * n * (1+fee)
Get = price_now_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Cost
elif player in s_name:
Cost = price_now_ * n * (1+fee)
Get = price_cost_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Get
gain_lever = gain * lever
return gain_lever, gain_pct
def cal_gain_con_futures2(price_open, price_now, n, player,
fee=0.1/100, lever=100):
'''
永续合约收益计算,如币安ETH合约
Parameters
----------
price_open : float
开仓价格
price_now : float
现价
n : int
数量(标的量)
player : str
做空或做多
fee : float
手续费比例
lever : int
杠杆
Returns
-------
gain_lever : float
盈亏金额
gain_pct : float
盈亏比例
'''
if n == 0:
return 0, 0
b_name = ['buyer', 'Buyer', 'b', 'B', 'buy', 'Buy']
s_name = ['seller', 'Seller', 'seler', 'Seler', 's', 'S', 'sell',
'Sell', 'sel', 'Sel']
price_cost_ = price_open / lever
price_now_ = price_now / lever
if player in b_name:
Cost = price_cost_ * n * (1+fee)
Get = price_now_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Cost
elif player in s_name:
Cost = price_now_ * n * (1+fee)
Get = price_cost_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Get
gain_lever = gain * lever
return gain_lever, gain_pct
#%%
def cal_expect_return(hit_prob, gain_loss_ratio):
'''根据胜率和盈亏比计算期望收益'''
return hit_prob*gain_loss_ratio - (1-hit_prob)
def cal_gain_pct_log(price_cost, price, pct_cost0=1):
'''
| 计算对数收益率
| price_cost为成本
| price为现价
| pct_cost0为成本price_cost为0时的返回值'''
if isnull(price_cost) or isnull(price):
return np.nan
if price_cost == 0:
return pct_cost0
elif price_cost > 0:
return np.log(price) - np.log(price_cost)
else:
raise ValueError('price_cost必须大于等于0!')
def cal_gain_pct(price_cost, price, pct_cost0=1):
'''
| 计算百分比收益率
| price_cost为成本
| price为现价
| pct_cost0为成本price_cost为0时的返回值
Note
----
默认以权利方成本price_cost为正(eg. 买入价为100,则price_cost=100)、
义务方成本price_cost为负进行计算(eg. 卖出价为100,则price_cost=-100)
'''
if isnull(price_cost) or isnull(price):
return np.nan
if price_cost > 0:
return price / price_cost - 1
elif price_cost < 0:
return 1 - price / price_cost
else:
return pct_cost0
def cal_gain_pcts(price_series, gain_type='pct',
pct_cost0=1, logger=None):
'''
| 计算资产价值序列price_series(`pd.Series`)每个时间的收益率
| gain_type:
| 为'pct',使用普通百分比收益
| 为'log',使用对数收益率(当price_series存在小于等于0时不能使用)
| 为'dif',收益率为前后差值(适用于累加净值情况)
| pct_cost0为当成本为0时收益率的指定值
'''
if (price_series <= 0).sum() > 0:
gain_type = 'pct'
logger_show('存在小于等于0的值,将用百分比收益率代替对数收益率!',
logger, 'warning')
if gain_type == 'pct':
df = pd.DataFrame({'price_now': price_series})
df['price_cost'] = df['price_now'].shift(1)
df['pct'] = df[['price_cost', 'price_now']].apply(lambda x:
cal_gain_pct(x['price_cost'], x['price_now'],
pct_cost0=pct_cost0), axis=1)
return df['pct']
elif gain_type == 'log':
return price_series.apply(np.log).diff()
elif gain_type == 'dif':
return price_series.diff()
else:
raise ValueError('未识别的`gain_type`,请检查!')
#%%
def cal_beta(values_target, values_base, gain_type='pct', pct_cost0=1):
'''
| 计算贝塔系数
| values_target, values_base分别为目标价值序列和基准价值序列
| gain_type和pct_cost0同 :func:`dramkit.fintools.utils_gains.cal_gain_pcts` 中的参数
| 参考:
| https://www.joinquant.com/help/api/help#api:风险指标
| https://blog.csdn.net/thfyshz/article/details/83443783
'''
values_target = pd.Series(values_target)
values_base = pd.Series(values_base)
pcts_target = cal_gain_pcts(values_target, gain_type=gain_type, pct_cost0=pct_cost0)
pcts_base = cal_gain_pcts(values_base, gain_type=gain_type, pct_cost0=pct_cost0)
pcts_target = pcts_target.iloc[1:]
pcts_base = pcts_base.iloc[1:]
return np.cov(pcts_target, pcts_base)[0][1] / np.var(pcts_base, ddof=1)
def cal_alpha_beta(values_target, values_base, r0=3.0/100, nn=252,
gain_type='pct', rtype='exp', pct_cost0=1, logger=None):
'''
| 计算alpha和beta系数
| 参数参考 :func:`cal_beta` 和 :func:`cal_returns_period` 函数
'''
r = cal_returns_period(values_target, gain_type=gain_type, rtype=rtype,
nn=nn, pct_cost0=pct_cost0, logger=logger)
r_base = cal_returns_period(values_base, gain_type=gain_type, rtype=rtype,
nn=nn, pct_cost0=pct_cost0, logger=logger)
beta = cal_beta(values_target, values_base,
gain_type=gain_type, pct_cost0=pct_cost0)
return r - (r0 + beta*(r_base-r0)), beta
def cal_alpha_by_beta_and_r(r, r_base, beta, r0=3.0/100):
'''
| 根据年化收益以及beta计算alpha
| r为策略年化收益,r_base为基准年化收益,r0为无风险收益率,beta为策略beta值
'''
return r - (r0 + beta*(r_base-r0))
#%%
def cal_return_period_by_gain_pct(gain_pct, n, nn=250, rtype='exp',
gain_pct_type='pct'):
'''
给定最终收益率gain_pct,计算周期化收益率
Parameters
----------
gain_pct : float
给定的最终收益率
n : int
期数
nn : int
| 一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
rtype : str
周期化时采用指数方式'exp'或平均方式'mean'
gain_pct_type : str
| 设置最终收益率gain_pct得来的计算方式,可选'pct', 'log'
| 默认为百分比收益,若为对数收益,则计算周期化收益率时只能采用平均法,不能用指数法
.. hint::
| 百分比收益率:
| 复利(指数)公式:1 + R = (1 + r) ^ (n / nn) ——> r = (1 + R) ^ (nn / n) - 1
| 单利(平均)公式:1 + R = 1 + r * (n / nn) ——> r = R * nn / n
| 对数收益率:
| R = r * (n / nn) ——> r = R * nn / n(采用对数收益率计算年化收益只能用平均法)
Returns
-------
r : float
周期化收益率,其周期由nn确定
References
----------
https://zhuanlan.zhihu.com/p/112211063
'''
if gain_pct_type in ['log', 'ln', 'lg']:
rtype = 'mean' # 对数收益率只能采用平均法进行周期化
if rtype == 'exp':
r = power(1 + gain_pct, nn / n) - 1
elif rtype == 'mean':
r = nn * gain_pct / n
return r
def cal_ext_return_period_by_gain_pct(gain_pct, gain_pct_base, n,
nn=250, rtype='exp',
gain_pct_type='pct',
ext_type=1):
'''
| 给定收益率和基准收益率,计算周期化超额收益率
| rtype周期化收益率方法,可选'exp'或'mean'或'log'
| ext_type设置超额收益率计算方式:
| 若为1,先算各自周期化收益率,再相减
| 若为2,先相减,再周期化算超额
| 若为3,先还原两者实际净值,再以相对于基准净值的收益计算周期化超额
| 其他参数意义同 :func:`cal_return_period_by_gain_pct` 函数
| 参考:
| https://xueqiu.com/1930958059/167803003?page=1
'''
if rtype == 'log':
ext_type = 3
if ext_type == 1:
p1 = cal_return_period_by_gain_pct(gain_pct, n, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type)
p2 = cal_return_period_by_gain_pct(gain_pct_base, n, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type)
return p1 - p2
elif ext_type == 2:
p = cal_return_period_by_gain_pct(gain_pct-gain_pct_base, n, nn=nn,
rtype=rtype, gain_pct_type=gain_pct_type)
return p
if ext_type == 3:
if gain_pct_type in ['log', 'ln', 'lg']:
p = np.exp(gain_pct)
p_base = np.exp(gain_pct_base)
elif gain_pct_type == 'pct':
p = 1 + gain_pct
p_base = 1 + gain_pct_base
if rtype == 'exp':
return power(p / p_base, nn / n) - 1
elif rtype == 'mean':
return (p / p_base - 1) * nn / n
elif rtype == 'log':
return (np.log(p) - np.log(p_base)) * nn / n
else:
raise ValueError('未识别的ext_type参数,请检查!')
def cal_ext_return_period(values, values_base, gain_type='pct', rtype='exp',
nn=250, pct_cost0=1, ext_type=1, logger=None):
'''
| 根据给定价格或价值序列计values和基准序列values_base,算超额收益
| pct_cost0参考 :func:`cal_gain_pct` 和 :func:`cal_gain_pct_log` 函数
| 其它参数参考 :func:`cal_ext_return_period_by_gain_pct` 函数
'''
values, values_base = np.array(values), np.array(values_base)
n1, n0 = len(values), len(values_base)
if n1 != n0:
raise ValueError('两个序列长度不相等,请检查!')
if gain_type == 'log':
if (values[0] <= 0 or values_base[-1] <= 0) or \
(values_base[0] <= 0 or values_base[-1] <= 0):
logger_show('发现开始值或结束值为负,用百分比收益率代替对数收益率!',
logger, 'warning')
p1 = cal_gain_pct(values[0], values[-1], pct_cost0=pct_cost0)
p0 = cal_gain_pct(values_base[0], values_base[-1], pct_cost0=pct_cost0)
gain_pct_type = 'pct'
else:
p1 = cal_gain_pct_log(values[0], values[-1], pct_cost0=pct_cost0)
p0 = cal_gain_pct_log(values_base[0], values_base[-1], pct_cost0=pct_cost0)
rtype = 'mean' # 采用对数收益率计算年化收益只能用平均法
gain_pct_type = 'log'
elif gain_type == 'pct':
p1 = cal_gain_pct(values[0], values[-1], pct_cost0=pct_cost0)
p0 = cal_gain_pct(values_base[0], values_base[-1], pct_cost0=pct_cost0)
gain_pct_type = 'pct'
elif gain_type == 'dif':
p1 = values[-1] - values[0]
p1 = values_base[-1] - values_base[0]
rtype = 'mean'
gain_pct_type = 'pct'
else:
raise ValueError('未识别的`gain_gype`,请检查!')
extr = cal_ext_return_period_by_gain_pct(p1, p0, n1, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type,
ext_type=ext_type)
return extr
def cal_returns_period(price_series, gain_type='pct', rtype='exp',
nn=252, pct_cost0=1, logger=None):
'''
计算周期化收益率
Parameters
----------
price_series : pd.Series, np.array, list
资产价值序列(有负值时不能使用对数收益率)
gain_type : str
| 收益率计算方式设置
| 为'pct',使用普通百分比收益
| 为'log',使用对数收益率(当price_series存在小于等于0时不能使用)
| 为'dif',收益率为前后差值(适用于累加净值情况)
rtype : str
| 收益率周期化时采用指数方式'exp'或平均方式'mean'
| (采用对数收益率计算年化收益只能用平均法)
nn : int
| 一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
pct_cost0 : float
成本为0时收益率的指定值,参见 :func:`cal_gain_pct` 和 :func:`cal_gain_pct_log` 函数
Returns
-------
r : float
周期化收益率,其周期由nn确定
See Also
--------
:func:`cal_return_period_by_gain_pct`
'''
price_series = np.array(price_series)
n_ = len(price_series)
if gain_type == 'log':
if price_series[0] <= 0 or price_series[-1] <= 0:
logger_show('发现开始值或结束值为负,用百分比收益率代替对数收益率!',
logger, 'warning')
gain_pct = cal_gain_pct(price_series[0], price_series[-1],
pct_cost0=pct_cost0)
gain_pct_type = 'pct'
else:
gain_pct = cal_gain_pct_log(price_series[0], price_series[-1],
pct_cost0=pct_cost0)
rtype = 'mean' # 采用对数收益率计算年化收益只能用平均法
gain_pct_type = 'log'
elif gain_type == 'pct':
gain_pct = cal_gain_pct(price_series[0], price_series[-1],
pct_cost0=pct_cost0)
gain_pct_type = 'pct'
elif gain_type == 'dif':
gain_pct = price_series[-1] - price_series[0]
gain_pct_type = 'pct'
rtype = 'mean'
else:
raise ValueError('未识别的`gain_type`,请检查!')
r = cal_return_period_by_gain_pct(gain_pct, n_, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type)
return r
def cal_returns_period_mean(price_series, gain_type='pct', nn=252,
pct_cost0=1, logger=None):
'''
| 计算周期化收益率,采用收益率直接平均的方法
| price_series为资产价值序列,pd.Series或list或np.array(有负值时不能使用对数收益率)
| gain_type和pct_cost0参数参见 :func:`cal_gain_pcts`
| nn为一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
'''
price_series = pd.Series(price_series)
price_series.name = 'series'
df = pd.DataFrame(price_series)
# 收益率
df['gain_pct'] = cal_gain_pcts(price_series, gain_type=gain_type,
pct_cost0=pct_cost0, logger=logger)
return nn * df['gain_pct'].mean()
def cal_volatility(price_series, gain_type='pct', nn=252,
pct_cost0=0, logger=None):
'''
| 价格序列price_series的周期化波动率计算
| price_series为资产价值序列,pd.Series或list或np.array(有负值时不能使用对数收益率)
| gain_type和pct_cost0参数参见 :func:`cal_gain_pcts`
| nn为一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
| 返回收益波动率,其周期由nn确定
| 参考:
| https://wiki.mbalib.com/wiki/%E5%8E%86%E5%8F%B2%E6%B3%A2%E5%8A%A8%E7%8E%87
'''
price_series = pd.Series(price_series)
price_series.name = 'series'
df = pd.DataFrame(price_series)
# 收益率
df['gain_pct'] = cal_gain_pcts(price_series, gain_type=gain_type,
pct_cost0=pct_cost0, logger=logger)
# 波动率
r = df['gain_pct'].std(ddof=1) * sqrt(nn) # ddof为1表示计算标准差时分母为n-1
return r
def cal_sharpe(values, r=3/100, nn=252, gain_type='pct',
ann_rtype='exp', pct_cost0=1, logger=None):
'''
| 计算夏普比率,先算期望收益和波动率,再算夏普
| r为无风险收益率
| 其余参数间 :func:`cal_returns_period` 和 :func:`cal_volatility`
'''
return_ann = cal_returns_period(values, gain_type=gain_type,
rtype=ann_rtype, nn=nn,
pct_cost0=pct_cost0, logger=logger)
volatility = cal_volatility(values, gain_type=gain_type,
nn=nn, pct_cost0=pct_cost0,
logger=logger)
sharpe = (return_ann - r) / volatility
return sharpe
def cal_sharpe2(values, r=3/100, nn=252, gain_type='pct',
pct_cost0=1, logger=None):
'''
计算夏普比率
Parameters
----------
values : pd.Series
资产价值序列
r : float
无风险收益率
nn : int
| 无风险收益率r的周期所包含的values的周期数,eg.
| 若values周期为日,r为年化无风险收益率时,N一般为252(一年的交易日数)
| 若values周期为日,r月度无风险收益率时,N一般为21(一个月的交易日数)
| 若values周期为分钟,r为年化无风险收益率时,N一般为252*240(一年的交易分钟数)
gain_type : str
| 收益率计算方式设置
| 为'pct',使用普通百分比收益
| 为'log',使用对数收益率(当price_series存在小于等于0时不能使用)
| 为'dif',收益率为前后差值(适用于累加净值情况)
References
----------
- https://www.joinquant.com/help/api/help?name=api#风险指标
- https://www.zhihu.com/question/348938505/answer/1848898074
- https://blog.csdn.net/thfyshz/article/details/83443783
- https://www.jianshu.com/p/363aa2dd3441 (夏普计算方法貌似有误)
'''
df = pd.DataFrame({'values': values})
df['gains'] = cal_gain_pcts(df['values'], gain_type=gain_type,
pct_cost0=pct_cost0, logger=logger) # 收益率序列
df['gains_ex'] = df['gains'] - r/nn # 超额收益
return sqrt(nn) * df['gains_ex'].mean() / df['gains_ex'].std()
def get_maxdown(values, return_idx=True, abs_val=False):
'''
最大回撤计算
Parameters
----------
values : list, np.array, pd.Series
资产价值序列
return_idx : bool
是否返回最大回撤区间起止位置,若为False,则起止位置返回None
abs_val : bool
若为True,则计算最大回撤时采用亏损绝对值而不是亏损比率
Returns
-------
maxdown : float
最大回撤幅度(正值)
start_end_idxs : tuple
最大回撤起止位置(start_idx, end_idx),若return_idx为False,则为(None, None)
References
----------
- https://www.cnblogs.com/xunziji/p/6760019.html
- https://blog.csdn.net/changyan_123/article/details/80994170
'''
n = len(values)
data = np.array(values)
if not return_idx:
maxdown, tmp_max = 0, -np.inf
for k in range(1, n):
tmp_max = max(tmp_max, data[k-1])
if not abs_val:
maxdown = min(maxdown, data[k] / tmp_max - 1)
else:
maxdown = min(maxdown, data[k] - tmp_max)
start_end_idxs = (None, None)
return -maxdown, start_end_idxs
else:
Cmax, Cmax_idxs = np.zeros(n-1), [0 for _ in range(n-1)]
tmp_max = -np.inf
tmp_idx = 0
for k in range(1, n):
if data[k-1] > tmp_max:
tmp_max = data[k-1]
tmp_idx = k-1
Cmax[k-1] = tmp_max
Cmax_idxs[k-1] = tmp_idx
maxdown = 0.0
start_idx, end_idx = 0, 0
for k in range(1, n):
if not abs_val:
tmp = data[k] / Cmax[k-1] - 1
else:
tmp = data[k] - Cmax[k-1]
if tmp < maxdown:
maxdown = tmp
start_idx, end_idx = Cmax_idxs[k-1], k
start_end_idxs = (start_idx, end_idx)
return -maxdown, start_end_idxs
def get_maxdown_all(values, abs_val=False):
'''计算区间每个时期的最大回撤'''
n = len(values)
data = np.array(values)
maxdown_all= [0.0]
maxdown, tmp_max = 0, -np.inf
for k in range(1, n):
tmp_max = max(tmp_max, data[k-1])
if not abs_val:
maxdown = min(maxdown, data[k] / tmp_max - 1)
maxdown_all.append(maxdown)
else:
maxdown = min(maxdown, data[k] - tmp_max)
maxdown_all.append(maxdown)
return np.array(maxdown_all)
def get_maxdown_dy(values, abs_val=False):
'''计算动态最大回撤(每个时间点之前的最高值到当前的回撤)'''
data = pd.DataFrame({'values': values})
data['cummax'] = data['values'].cummax()
if not abs_val:
data['dyMaxDown'] = data['values'] / data['cummax'] - 1
else:
data['dyMaxDown'] = data['values'] - data['cummax']
return np.array(data['dyMaxDown'])
def get_maxup(values, return_idx=True, abs_val=False):
'''
最大盈利计算(与最大回撤 :func:`dramkit.fintools.utils_gains.get_maxdown`
相对应,即做空情况下的最大回撤)
'''
n = len(values)
data = np.array(values)
if not return_idx:
maxup, tmp_min = 0, np.inf
for k in range(1, n):
tmp_min = min(tmp_min, data[k-1])
if not abs_val:
maxup = max(maxup, data[k] / tmp_min - 1)
else:
maxup = max(maxup, data[k] - tmp_min)
return maxup, (None, None)
else:
Cmin, Cmin_idxs = np.zeros(n-1), [0 for _ in range(n-1)]
tmp_min = np.inf
tmp_idx = 0
for k in range(1, n):
if data[k-1] < tmp_min:
tmp_min = data[k-1]
tmp_idx = k-1
Cmin[k-1] = tmp_min
Cmin_idxs[k-1] = tmp_idx
maxup = 0.0
start_idx, end_idx = 0, 0
for k in range(1, n):
if not abs_val:
tmp = data[k] / Cmin[k-1] - 1
else:
tmp = data[k] - Cmin[k-1]
if tmp > maxup:
maxup = tmp
start_idx, end_idx = Cmin_idxs[k-1], k
return maxup, (start_idx, end_idx)
def get_maxdown_pd(series, abs_val=False):
'''
使用pd计算最大回撤计算
Parameters
----------
series : pd.Series, np.array, list
资产价值序列
abs_val : bool
若为True,则计算最大回撤时采用亏损绝对值而不是亏损比率
Returns
-------
maxdown : float
最大回撤幅度(正值)
start_end_idxs : tuple
最大回撤起止索引:(start_idx, end_idx)
start_end_iloc : tuple
最大回撤起止位置(int):(start_iloc, end_iloc)
'''
df = pd.DataFrame(series)
df.columns = ['val']
df['idx'] = range(0, df.shape[0])
df['Cmax'] = df['val'].cummax()
if not abs_val:
df['maxdown_now'] = df['val'] / df['Cmax'] - 1
else:
df['maxdown_now'] = df['val'] - df['Cmax']
maxdown = -df['maxdown_now'].min()
end_iloc = df['maxdown_now'].argmin()
end_idx = df.index[end_iloc]
start_idx = df[df['val'] == df.loc[df.index[end_iloc], 'Cmax']].index[0]
start_iloc = df[df['val'] == df.loc[df.index[end_iloc], 'Cmax']]['idx'][0]
start_end_idxs = (start_idx, end_idx)
start_end_iloc = (start_iloc, end_iloc)
return maxdown, start_end_idxs, start_end_iloc
def cal_n_period_1year(df, col_date='date', n1year=252):
'''根据交易记录及日期列估计一年的交易期数'''
days = diff_days_date(df[col_date].max(), df[col_date].min())
days_trade = days * (n1year / 365)
nPerTradeDay = df.shape[0] / days_trade
n_period = int(nPerTradeDay * n1year)
return n_period
#%%
def get_netval_prod(pct_series):
'''
累乘法净值计算,pct_series(`pd.Series`)为收益率序列(注:单位应为%)
'''
return (1 + pct_series / 100).cumprod()
def get_netval_sum(pct_series):
'''
累加法净值计算,pct_series(`pd.Series`)为收益率序列(注:单位应为%)
'''
return (pct_series / 100).cumsum() + 1
def cal_pct_by_cost_gain(cost, gain, vcost0=1):
'''
计算在成本为cost,盈利为gain时的盈亏百分比
- vcost0为当成本cost为0且盈利gain为正时的返回值,gain为负时取负号
'''
if isnull(cost) or isnull(gain):
return np.nan
if cost == 0:
if gain == 0:
return 0
elif gain > 0:
return vcost0
elif gain < 0:
return -vcost0
elif cost > 0:
return gain / cost
elif cost < 0:
return -gain / cost
def get_gains_act(df_settle, act_gain_pct_method=2):
'''
| 根据资金转入转出和资产总值记录计算实际总盈亏%
| df_settle须包含列['转入', '转出', '资产总值']
| 返回df中包含['累计转入', '累计转出', '累计净流入', '累计总值',
'累计盈亏', '实际总盈亏%']这几列
'''
assert act_gain_pct_method in [1, 2, 3]
df = df_settle.reindex(columns=['转入', '转出', '资产总值'])
df['累计转入'] = df['转入'].cumsum()
df['累计转出'] = df['转出'].cumsum()
df['累计净流入'] = df['累计转入'] - df['累计转出']
df['累计总值'] = df['资产总值'] + df['累计转出']
df['累计盈亏'] = df['资产总值'] - df['累计净流入']
if act_gain_pct_method == 1:
df['实际总盈亏%'] = 100* df[['累计转入', '累计总值']].apply(lambda x:
cal_pct(x['累计转入'], x['累计总值']), axis=1)
elif act_gain_pct_method == 3:
df['实际总盈亏%'] = 100 * df[['累计净流入', '累计盈亏']].apply(lambda x:
cal_pct_by_cost_gain(x['累计净流入'], x['累计盈亏']), axis=1)
elif act_gain_pct_method == 2:
df['累计净流入_pre'] = df['累计净流入'].shift(1, fill_value=0)
df['累计投入'] = df['转入'] + df['累计净流入_pre']
df['实际总盈亏%'] = 100 * df[['累计投入', '累计盈亏']].apply(lambda x:
cal_pct_by_cost_gain(x['累计投入'], x['累计盈亏']), axis=1)
df = df.reindex(columns=['累计转入', '累计转出', '累计净流入',
'累计总值', '累计盈亏', '实际总盈亏%'])
return df
def get_fundnet(df_settle, when='before'):
'''
用基金净值法根据转入转出和资产总值记录计算净值
Parameters
----------
df_settle : pd.DataFrame
须包含列['转入', '转出', '资产总值']三列
when : str
| 列用于指定当期转入转出计算份额时使用的净值对应的时间:
| 若为'before',表示转入转出发生在当天净值结算之前(计算增减份额用前一期净值)
| 若为'after',表示转入转出发生在当天净值结算之后(计算增减份额用结算之后的净值)
| 若为None,当转入大于转出时设置为'before',当转出大于转入时设置为'after'
| 若为'when',在df_settle中通过'when'列指定,'when'列的值只能为'before'或'after'
:returns: `pd.DataFrame` - 包含['新增份额', '份额', '净值']三列
'''
assert when in [None, 'before', 'after', 'when']
if when != 'when':
df = df_settle.reindex(columns=['转入', '转出', '资产总值'])
if when is None:
df['when'] = df[['转入', '转出']].apply(lambda x:
'before' if x['转入'] >= x['转出'] else 'after', axis=1)
else:
df['when'] = when
else:
df = df_settle.reindex(columns=['转入', '转出', '资产总值', when])
assert check_l_allin_l0(df['when'].tolist(), ['before', 'after'])
ori_index = df.index
df.reset_index(drop=True, inplace=True)
df['净流入'] = df['转入'] - df['转出']
df['份额'] = np.nan
df['净值'] = np.nan
for k in range(0, df.shape[0]):
if k == 0:
df.loc[df.index[k], '新增份额'] = df.loc[df.index[k], '净流入']
df.loc[df.index[k], '份额'] = df.loc[df.index[k], '新增份额']
df.loc[df.index[k], '净值'] = x_div_y(df.loc[df.index[k], '资产总值'],
df.loc[df.index[k], '份额'], v_y0=1, v_xy0=1)
else:
when = df.loc[df.index[k], 'when']
if when == 'before':
df.loc[df.index[k], '新增份额'] = df.loc[df.index[k], '净流入'] / \
df.loc[df.index[k-1], '净值']
df.loc[df.index[k], '份额'] = df.loc[df.index[k-1], '份额'] + \
df.loc[df.index[k], '新增份额']
df.loc[df.index[k], '净值'] = x_div_y(df.loc[df.index[k], '资产总值'],
df.loc[df.index[k], '份额'], v_y0=1, v_xy0=1)
else:
total = df.loc[df.index[k], '资产总值'] - df.loc[df.index[k], '净流入']
df.loc[df.index[k], '净值'] = x_div_y(total,
df.loc[df.index[k-1], '份额'], v_y0=1, v_xy0=1)
df.loc[df.index[k], '新增份额'] = df.loc[df.index[k], '净流入'] / \
df.loc[df.index[k], '净值']
df.loc[df.index[k], '份额'] = df.loc[df.index[k-1], '份额'] + \
df.loc[df.index[k], '新增份额']
df.index = ori_index
return df.reindex(columns=['新增份额', '份额', '净值'])
def get_gains(df_settle, gain_types=['act', 'fundnet'], when=None,
act_gain_pct_method=2):
'''
| 不同类型的盈亏情况统计
| gain_types为累计收益计算方法,可选:
| ['act'实际总盈亏, 'prod'累乘法, 'sum'累加法, 'fundnet'基金份额净值法]
| 注意:
| 累乘法和累加法df_settle须包含'盈亏%'列
| 实际总盈亏和基金净值法df_settle须包含['转入', '转出', '资产总值']列
'''
df_gain = df_settle.copy()
ori_index = df_gain.index
df_gain.reset_index(drop=True, inplace=True)
if 'act' in gain_types:
cols = ['转入', '转出', '资产总值']
if any([x not in df_settle.columns for x in cols]):
raise ValueError('计算实际盈亏要求包含[`转入`, `转出`, `资产总值`]列!')
df_act = get_gains_act(df_gain, act_gain_pct_method=act_gain_pct_method)
df_gain = pd.merge(df_gain, df_act, how='left', left_index=True,
right_index=True)
if 'prod' in gain_types:
if not '盈亏%' in df_settle.columns:
raise ValueError('累乘法要求包含`盈亏%`列!')
df_gain['累乘净值'] = get_netval_prod(df_gain['盈亏%'])
if 'sum' in gain_types:
if not '盈亏%' in df_settle.columns:
raise ValueError('累加法要求包含`盈亏%`列!')
df_gain['累加净值'] = get_netval_sum(df_gain['盈亏%'])
if 'fundnet' in gain_types:
cols = ['转入', '转出', '资产总值']
if any([x not in df_settle.columns for x in cols]):
raise ValueError('基金净值法要求包含[`转入`, `转出`, `资产总值`]列!')
df_net = get_fundnet(df_gain, when=when)
df_gain = pd.merge(df_gain, df_net, how='left', left_index=True,
right_index=True)
df_gain.index = ori_index
return df_gain
def plot_gain_act(df_gain, time_col='日期', n=None, **kwargs_plot):
'''
| 绘制实际盈亏曲线图
| df_gain须包含列[time_col, '实际总盈亏%']
| n设置需要绘制的期数
| \*\*kwargs_plot为plot_series可接受参数
'''
n = df_gain.shape[0] if n is None or n < 1 or n > df_gain.shape[0] else n
df = df_gain.reindex(columns=[time_col, '实际总盈亏%'])
if n == df_gain.shape[0]:
df.sort_values(time_col, ascending=True, inplace=True)
tmp = pd.DataFrame(columns=['日期', '实际总盈亏%'])
tmp.loc['tmp'] = ['start', 0]
df = pd.concat((tmp, df), axis=0)
else:
df = df.sort_values(time_col, ascending=True).iloc[-n-1:, :]
df.set_index(time_col, inplace=True)
if not 'title' in kwargs_plot:
if n == df_gain.shape[0]:
kwargs_plot['title'] = '账户实际总盈亏(%)走势'
else:
kwargs_plot['title'] = '账户近{}个交易日实际总盈亏(%)走势'.format(n)
plot_series(df, {'实际总盈亏%': '-ro'}, **kwargs_plot)
def plot_gain_prod(df_gain, time_col='日期', n=None, show_gain=True,
**kwargs_plot):
'''
| 绘制盈亏净值曲线图
| df_gain须包含列[time_col, '盈亏%']
| n设置需要绘制的期数
| \*\*kwargs为plot_series可接受参数
'''
n = df_gain.shape[0] if n is None or n < 1 or n > df_gain.shape[0] else n
df = df_gain.reindex(columns=[time_col, '盈亏%'])
if n >= df.shape[0]:
df.sort_values(time_col, ascending=True, inplace=True)
tmp = pd.DataFrame(columns=[time_col, '盈亏%'])
tmp.loc['tmp'] = ['start', 0]
df = pd.concat((tmp, df), axis=0)
else:
df = df.sort_values(time_col, ascending=True).iloc[-n-1:, :]
df.set_index(time_col, inplace=True)
df.loc[df.index[0], '盈亏%'] = 0
df['净值'] = (1 + df['盈亏%'] / 100).cumprod()
gain_pct = round(100 * (df['净值'].iloc[-1]-1), 2)
if not 'title' in kwargs_plot:
if n == df_gain.shape[0]:
kwargs_plot['title'] = '账户净值曲线\n(收益率: {}%)'.format(gain_pct) \
if show_gain else '账户净值曲线'
else:
kwargs_plot['title'] = \
'账户近{}个交易日净值曲线\n(收益率: {}%)'.format(n, gain_pct) \
if show_gain else '账户近{}个交易日净值曲线'.format(n)
plot_series(df, {'净值': '-ro'}, **kwargs_plot)
#%%
def cal_sig_gains(data, sig_col, sig_type=1, shift_lag=0,
col_price='close', col_price_buy='close',
col_price_sel='close', settle_after_act=False,
func_vol_add='base_1', func_vol_sub='base_1',
func_vol_stoploss='hold_1', func_vol_stopgain='hold_1',
stop_no_same=True, ignore_no_stop=False,
hold_buy_max=None, hold_sel_max=None,
limit_min_vol=100, base_money=200000, base_vol=None,
init_cash=0.0, fee=1.5/1000, sos_money=1000,
max_loss=None, max_gain=None, max_down=None,
add_loss_pct=None, add_gain_pct=None,
stop_sig_order='both', add_sig_order='offset',
force_final0='settle', del_begin0=True,
gap_repeat=False, nshow=None, logger=None):
'''
统计信号收益情况(没考虑杠杆,适用A股)
Note
----
仅考虑市场价格为正,不考虑市场价格为负值的极端情况
.. caution::
若报错,检查数据中是否存在无效值等
Parameters
----------
data : pd.DataFrame
| 行情数据,须包含 ``sig_col``, ``col_price``, ``col_price_buy``,
``col_price_sel`` 指定的列。
| ``col_price`` 为结算价格列;``col_price_buy`` 和 ``col_price_sel``
分别为做多(买入)和做空(卖出)操作的价格列。
.. note::
当结算价格的出现时间晚于操作价格时 ``settle_after_act``
应设置为True,否则 ``settle_after_act`` 设置为False。
| ``sig_col`` 列为信号列,其值规则应满足:
| - 当 ``sig_type`` =1时, ``sig_col`` 列的值只能包含-1、1和0,
其中1为做空(卖出)信号,-1为做多(买入)信号,0为不操作
| - 当 ``sig_type`` =2时, ``sig_col`` 列的值为正|负整数或0,
其中正整数表示买入(做多)交易量,负整数表示卖出(做空)交易量,0表示不交易
func_vol_add : str, function
| 自定义开仓/加仓操作时的交易量函数,其输入和输出格式应为:
| def func_vol_add(base_vol, holdVol, cash, Price):
| # Parameters:
| # base_vol:底仓量
| # holdVol:当前持仓量
| # cash: 可用现金
| # Price: 交易价格
| # Returns:
| # tradeVol:计划交易量
| ......
| return tradeVol
| - 当 ``func_vol_add`` 指定为'base_x'时,使用预定义函数 ``get_AddTradeVol_baseX`` ,
其交易计划为:开底仓的x倍
| - 当 ``func_vol_add`` 指定为'hold_x'时,使用预定义函数 ``get_AddTradeVol_holdX`` ,
其交易计划为:无持仓时开底仓,有持仓时开持仓的x倍
| - 当 ``func_vol_add`` 指定为'all'时,使用预定义函数 ``get_AddTradeVol_all`` ,
其交易计划为:以账户当前可用资金为限额开全仓
func_vol_sub : str, function
| 自定义平仓/减仓操作时的交易量函数,其输入和输出格式应为:
| def func_vol_sub_stop(base_vol, holdVol, cash, Price, holdCost):
| # Parameters:
| # base_vol:底仓量
| # holdVol:当前持仓量(正负号表示持仓方向)
| # cash: 可用现金(不包含平|减仓释放的资金)
| # Price: 交易价格
| # holdCost: 当前持仓总成本(用于计算平|减仓释放资金量)
| # Returns:
| # tradeVol:计划交易量
| ......
| return tradeVol
| - 当指定为'base_x'时,使用预定义函数 ``get_SubStopTradeVol_baseX`` ,
其交易计划为:减底仓的x倍(若超过了持仓量相当于平仓后反向开仓)
| - 当指定为'hold_x'时,使用预定义函数 ``get_SubStopTradeVol_holdX`` ,
其交易计划为:减持仓的x倍(x大于1时相当于平仓后反向开仓)
| - 当指定为'hold_base_x'时,使用预定义函数 ``get_SubStopTradeVol_holdbaseX`` ,
其交易计划为:平仓后反向以base_vol的x倍反向开仓
| - 当指定为'hold_all'时,使用预定义函数 ``get_SubStopTradeVol_holdAll`` ,
其交易计划为:平仓后以账户当前可用资金(包含平仓释放资金)为限额反向开全仓
func_vol_stoploss : str, function
自定义止损操作时的交易量函数,格式同 ``func_vol_sub`` 参数
func_vol_stopgain : str, function
自定义止盈操作时的交易量函数,格式同 ``func_vol_sub`` 参数
stop_no_same : bool
当止盈止损和操作信号同时出现,是否忽略同向操作信号
(做多|空止损|盈后是否禁止继续做多|空)
ignore_no_stop : bool
当有持仓且没有触及止盈止损条件时是否忽略信号,为True时忽略,为False时不忽略
hold_buy_max : int, float
买入(做多)持仓量最大值限制,若信号导致持仓量超限,将被忽略
hold_sel_max : int, float
卖出(做空)持仓量最大值限制,若信号导致持仓量超限,将被忽略
limit_min_vol : int, float
最少开仓量限制(比如股票至少买100股)
base_money : float
开底仓交易限额
base_vol : int, float
开底仓交易限量
.. note::
同时设置 ``base_money`` 和 ``base_vol`` 时以 ``base_money`` 为准
init_cash : float
账户初始资金额
fee : float
单向交易综合成本比例(双向收费)
max_loss : float
止损比例
max_gain : float
止盈比例
max_down : float
平仓最大回撤比例
add_loss_pct : float
当前价格比上次同向交易价格亏损达到 ``add_loss_pct`` 时加仓,
加仓方式由 ``func_vol_add`` 决定
add_gain_pct : float
当前价格比上次同向交易价格盈利达到 ``add_gain_pct`` 时加仓,
加仓方式由 ``func_vol_add`` 决定
stop_sig_order : str
| - 若为`sig_only`,当止盈止损和操作信号同时出现时,忽略止盈止损信号
| - 若为`stop_only`,当止盈止损和操作信号同时出现时,忽略操作信号
| - 若为`stop_first`,当止盈止损和操作信号同时出现时,先考虑止盈止损及反向再开新仓
| - 若为`sig_first`,当止盈止损和操作信号同时出现时,先考虑操作信号再进行剩余止盈止损及反向
| - 若为`both`,则止盈止损及反向量和信号交易量同时考虑
add_sig_order : str
| - 若为`offset`,当加仓信号与操作信号相反时,两者抵消
| - 若为`sig_only`,当加仓信号与操作信号相反时,以操作信号为准
| - 若为`add_only`,当加仓信号与操作信号相反时,以加仓信号为准
force_final0 : str, bool
| 最后一个时间强平价格设置:
| - 若为False,则不强平,按结算价结算账户信息
| - 若为'trade',则按col_price_sel或col_price_buy强平
| - 若为'settle',则按结算价col_price强平
sos_money : float
账户本金亏损完,补资金时保证账户本金最少为 ``sos_money``
del_begin0 : bool
是否删除数据中第一个信号之前没有信号的部分数据
gap_repeat : bool, None, int
| 重复同向信号处理设置
(见 :func:`dramkit.gentools.replace_repeat_func_iter` 函数中的 ``gap`` 参数):
| - 为False时不处理
| - 为None时重复同向信号只保留第一个
| - 为整数gap时,重复同向信号每隔gap保留一个
Returns
-------
trade_gain_info : dict
返回各个收益评价指标
df : pd.DataFrame
包含中间过程数据
TODO
----
- 增加根据固定盈亏点位止盈止损/减仓(比如IF赚30个点止盈,亏20个点止损)
- 增加根据固定盈亏点位加仓(比如IF赚20个点或亏20个点加仓)
- 止盈止损信号增加动态确定函数(止盈止损或最大回撤百分比不固定,根据持仓时间和盈亏等情况确定)
- 增加平仓条件设置(比如分段回撤平仓(不同盈利水平不同回撤容忍度),参数设计成函数)
- 止损参数和加减仓设计成函数(输入为盈亏比例、最大回撤、盈亏金额、盈亏点位、最近一次交易信息等)
- 增加浮盈/亏加仓价格确定方式(例如根据距离上次的亏损比例确定)
- 重写开仓盈亏信息计算函数(先拆单再统计)
- 正常信号加仓和浮盈/亏加仓方式分开处理(目前是合并一致处理)
- 固定持仓时间平仓
- fee买入和卖出分开设置
- func_vol_add, func_vol_sub, func_vol_stoploss, func_vol_stopgain买入和卖出分开设置
(考虑股票不能卖空情况,目前可通过设置 hold_sel_max=0控制禁止买空)
- 添加更多的止盈止损交易量确定方式设置,比如:
| 1) 分段止盈止损
| (eg. 亏10%止损一半仓位,亏20%强平)(需要记录交易过程中的止盈止损历史记录)
| 2) 止盈止损和加减仓交易量函数参数扩展(加入持仓周期,盈亏比率等参数)
'''
def get_base_vol(Price):
'''计算底仓交易量'''
if not isnull(base_money):
Vol_ = limit_min_vol * (base_money // (limit_min_vol * Price * (1+fee)))
if Vol_ == 0:
raise ValueError('base_money不够开底仓,请检查设置!')
return Vol_
elif isnull(base_vol):
raise ValueError('base_money和base_vol必须设置一个!')
else:
return base_vol
def get_AddTradeVol_baseX(base_vol, x):
'''开底仓的x倍'''
return base_vol * x
def get_AddTradeVol_holdX(base_vol, holdVol, x):
'''无持仓则开底仓,有持仓则开持仓的x倍'''
if abs(holdVol) == 0:
return base_vol
return abs(holdVol * x)
def get_AddTradeVol_all(cash, Price):
'''以cash为限额计算最大可交易量,Price为交易价格'''
return limit_min_vol * (cash // (limit_min_vol * Price * (1+fee)))
def get_SubStopTradeVol_baseX(base_vol, x):
'''减底仓的x倍(超过持仓即为反向开仓)'''
return base_vol * x
def get_SubStopTradeVol_holdX(holdVol, x):
'''减持仓的x倍(x大于1即为反向开仓)'''
return abs(holdVol * x)
def get_SubStopTradeVol_holdbaseX(base_vol, holdVol, x):
'''平仓后反向开底仓的x倍'''
return abs(holdVol) + base_vol * x
def get_SubSellReleaseCash(Vol, Price, Cost):
'''计算平卖方时释放的资金量'''
cashput = abs(Vol) * Price * (1+fee)
gain = abs(Cost) - cashput
cash_release = abs(Cost) + gain
return cash_release
def get_SubStopTradeVol_holdAll(holdVol, cash, Price, holdCost):
'''平仓后反向开全仓交易量(包含平仓量)'''
if holdVol >= 0:
cash_ = holdVol * Price * (1-fee)
cashall = cash + cash_
vol_ = limit_min_vol * (cashall // (limit_min_vol * Price * (1+fee)))
else:
cash_ = get_SubSellReleaseCash(holdVol, Price, holdCost)
cashall = cash + cash_
if cashall <= 0:
vol_ = 0
else:
vol_ = limit_min_vol * (cashall // (limit_min_vol * Price * (1+fee)))
return abs(holdVol) + vol_
def get_AddTradeVol(Price, func_vol_add, hold_vol, cash):
'''开/加仓量计算'''
base_vol = get_base_vol(Price)
if isinstance(func_vol_add, str) and 'base' in func_vol_add:
x = int(float(func_vol_add.split('_')[-1]))
tradeVol = get_AddTradeVol_baseX(base_vol, x)
elif isinstance(func_vol_add, str) and 'hold' in func_vol_add:
x = int(float(func_vol_add.split('_')[-1]))
tradeVol = get_AddTradeVol_holdX(base_vol, hold_vol, x)
elif func_vol_add == 'all':
tradeVol = get_AddTradeVol_all(cash, Price)
else:
tradeVol = func_vol_add(base_vol, hold_vol, cash, Price)
return tradeVol
def get_SubStopTradeVol(Price, VolF_SubStop, hold_vol, hold_cost, cash):
'''平/减仓量计算'''
base_vol = get_base_vol(Price)
if isinstance(VolF_SubStop, str) and 'hold_base' in VolF_SubStop:
x = int(float(VolF_SubStop.split('_')[-1]))
tradeVol = get_SubStopTradeVol_holdbaseX(base_vol, hold_vol, x)
elif VolF_SubStop == 'hold_all':
tradeVol = get_SubStopTradeVol_holdAll(hold_vol, cash, Price, hold_cost)
elif isinstance(VolF_SubStop, str) and 'base' in VolF_SubStop:
x = int(float(VolF_SubStop.split('_')[-1]))
tradeVol = get_SubStopTradeVol_baseX(base_vol, x)
elif isinstance(VolF_SubStop, str) and 'hold' in VolF_SubStop:
x = int(float(VolF_SubStop.split('_')[-1]))
tradeVol = get_SubStopTradeVol_holdX(hold_vol, x)
else:
tradeVol = VolF_SubStop(base_vol, hold_vol, cash, Price, hold_cost)
return tradeVol
def get_tradeVol(sig, act_stop, holdVol_pre, holdCost_pre,
buyPrice, selPrice, cash):
'''
根据操作信号、止盈止损信号、加仓信号、当前持仓量、持仓成本、交易价格和
可用资金计算交易方向和计划交易量
'''
if sig == 0:
if holdVol_pre == 0 or act_stop == 0:
return 0, 0
else:
if holdVol_pre > 0: # 做多止损或止盈
if act_stop == 0.5: # 做多止损
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else: # 做多止盈
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
return 1, StopTradeVol
elif holdVol_pre < 0: # 做空止损或止盈
if act_stop == -0.5: # 做空止损
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else: # 做空止盈
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
return -1, StopTradeVol
elif holdVol_pre == 0:
if sig_type == 1:
tradePrice = buyPrice if sig == -1 else selPrice
tradeVol = get_AddTradeVol(tradePrice, func_vol_add,
holdVol_pre, cash)
return sig, tradeVol
elif sig_type == 2:
dirt = -1 if sig > 0 else 1
return dirt, abs(sig)
elif holdVol_pre > 0: # 持有做多仓位
if (sig_type == 1 and sig == 1) or (sig_type == 2 and sig < 0):
if act_stop == 0:
if not ignore_no_stop: # 正常减/平做多仓位
if sig_type == 1:
selVol = get_SubStopTradeVol(selPrice, func_vol_sub,
holdVol_pre, holdCost_pre, cash)
elif sig_type == 2:
selVol = abs(sig)
return 1, selVol
else: # 不触及止盈止损时忽略信号(不操作)
return 0, 0
else: # 做多仓位止盈止损信号与做空信号结合
if stop_sig_order == 'sig_only':
if sig_type == 1:
selVol = get_SubStopTradeVol(selPrice, func_vol_sub,
holdVol_pre, holdCost_pre, cash)
elif sig_type == 2:
selVol = abs(sig)
return 1, selVol
elif stop_sig_order == 'stop_only':
if act_stop == 0.5:
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else:
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
return 1, StopTradeVol
elif stop_sig_order == 'stop_first':
if act_stop == 0.5: # 先止损后再开做空仓位
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else: # 先止盈后再开做空仓位
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
if sig_type == 1:
cash_ = cash + holdVol_pre * selPrice * (1-fee)
selVol = get_AddTradeVol(selPrice, func_vol_add, 0, cash_)
elif sig_type == 2:
selVol = abs(sig)
return 1, max(StopTradeVol, selVol+holdVol_pre)
elif stop_sig_order == 'sig_first':
if sig_type == 1:
selVol = get_SubStopTradeVol(selPrice, func_vol_sub,
holdVol_pre, holdCost_pre, cash)
elif sig_type == 2:
selVol = abs(sig)
LeftVol = holdVol_pre - selVol # 剩余做多仓位
if LeftVol <= 0: # 信号卖出量已经超过持仓量
return 1, selVol
cash_ = cash + selVol * selPrice * (1-fee)
holdCost_pre_ = holdCost_pre * (LeftVol / holdVol_pre)
if act_stop == 0.5: # 需要止损剩余仓位
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stoploss, LeftVol, holdCost_pre_, cash_)
else: # 需要止盈剩余仓位
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stopgain, LeftVol, holdCost_pre_, cash_)
return 1, StopTradeVol+selVol
elif stop_sig_order == 'both':
if sig_type == 1:
selVol = get_SubStopTradeVol(selPrice, func_vol_sub,
holdVol_pre, holdCost_pre, cash)
elif sig_type == 2:
selVol = abs(sig)
if act_stop == 0.5:
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else:
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
return 1, max(StopTradeVol, selVol)
else:
raise ValueError('`stop_sig_order`参数设置错误!')
elif (sig_type == 1 and sig == -1) or (sig_type == 2 and sig > 0):
if act_stop == 0:
if not ignore_no_stop: # 正常加做多仓位
if sig_type == 1:
buyVol = get_AddTradeVol(buyPrice, func_vol_add,
holdVol_pre, cash)
elif sig_type == 2:
buyVol = abs(sig)
return -1, buyVol
else: # 不触及止盈止损时忽略信号(不操作)
return 0, 0
else: # 做多仓位止盈止损信号与做多信号结合
if stop_no_same or stop_sig_order == 'stop_only': # 做多止盈止损后禁止做多
if act_stop == 0.5: # 止损
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else: # 止盈
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
return 1, StopTradeVol
if sig_type == 1:
buyVol = get_AddTradeVol(buyPrice, func_vol_add,
holdVol_pre, cash)
elif sig_type == 2:
buyVol = abs(sig)
if stop_sig_order == 'sig_only':
return -1, buyVol
if act_stop == 0.5:
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else:
StopTradeVol = get_SubStopTradeVol(selPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
if buyVol == StopTradeVol:
return 0, 0
elif buyVol > StopTradeVol:
return -1, buyVol-StopTradeVol
else:
return 1, StopTradeVol-buyVol
elif holdVol_pre < 0: # 持有做空仓位
if (sig_type == 1 and sig == 1) or (sig_type == 2 and sig < 0):
if act_stop == 0:
if not ignore_no_stop: # 正常加做空仓位
if sig_type == 1:
selVol = get_AddTradeVol(selPrice, func_vol_add,
holdVol_pre, cash)
elif sig_type == 2:
selVol = abs(sig)
return 1, selVol
else: # 不触及止盈止损时忽略信号(不操作)
return 0, 0
else: # 做空仓位止盈止损信号与做空信号结合
if stop_no_same or stop_sig_order == 'stop_only': # 做空止盈止损后禁止做空
if act_stop == 0.5: # 止损
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else: # 止盈
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
return -1, StopTradeVol
if sig_type == 1:
selVol = get_AddTradeVol(selPrice, func_vol_add,
holdVol_pre, cash)
elif sig_type == 2:
selVol = abs(sig)
if stop_sig_order == 'sig_only':
return 1, selVol
if act_stop == -0.5:
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else:
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
if selVol == StopTradeVol:
return 0, 0
elif selVol > StopTradeVol:
return 1, selVol-StopTradeVol
else:
return -1, StopTradeVol-selVol
elif (sig_type == 1 and sig == -1) or (sig_type == 2 and sig > 0):
if act_stop == 0:
if not ignore_no_stop: # 正常减/平做空仓位
if sig_type == 1:
buyVol = get_SubStopTradeVol(buyPrice, func_vol_sub,
holdVol_pre, holdCost_pre, cash)
elif sig_type == 2:
buyVol = abs(sig)
return -1, buyVol
else: # 不触及止盈止损时忽略信号(不操作)
return 0, 0
else: # 做空仓位止盈止损信号与做多信号结合
if stop_sig_order == 'sig_only':
if sig_type == 1:
buyVol = get_SubStopTradeVol(buyPrice, func_vol_sub,
holdVol_pre, holdCost_pre, cash)
elif sig_type == 2:
buyVol = abs(sig)
return -1, buyVol
elif stop_sig_order == 'stop_only':
if act_stop == -0.5:
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else:
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
return -1, StopTradeVol
elif stop_sig_order == 'stop_first':
if act_stop == -0.5: # 先止损再开做多仓位
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else: # 先止盈再开做多仓位
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
if sig_type == 1:
cash_ = cash + get_SubSellReleaseCash(holdVol_pre,
buyPrice, holdCost_pre)
buyVol = get_AddTradeVol(buyPrice, func_vol_add,
0, cash_)
elif sig_type == 2:
buyVol = abs(sig)
return -1, max(StopTradeVol, buyVol+abs(holdVol_pre))
elif stop_sig_order == 'sig_first':
if sig_type == 1:
buyVol = get_SubStopTradeVol(buyPrice, func_vol_sub,
holdVol_pre, holdCost_pre, cash)
elif sig_type == 2:
buyVol = abs(sig)
LeftVol = holdVol_pre + buyVol # 剩余做空仓位
if LeftVol >= 0: # 信号买入量已经超过持仓量
return -1, buyVol
cash_ = cash + get_SubSellReleaseCash(buyVol, buyPrice,
holdCost_pre * (buyVol / holdVol_pre))
holdCost_pre_ = holdCost_pre * (LeftVol / holdVol_pre)
if act_stop == -0.5: # 需要止损剩余仓位
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stoploss, LeftVol, holdCost_pre_, cash_)
else: # 需要止盈剩余仓位
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stopgain, LeftVol, holdCost_pre_, cash_)
return -1, StopTradeVol+buyVol
elif stop_sig_order == 'both':
if sig_type == 1:
buyVol = get_SubStopTradeVol(buyPrice, func_vol_sub,
holdVol_pre, holdCost_pre, cash)
elif sig_type == 2:
buyVol = abs(sig)
if act_stop == -0.5:
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stoploss, holdVol_pre, holdCost_pre, cash)
else:
StopTradeVol = get_SubStopTradeVol(buyPrice,
func_vol_stopgain, holdVol_pre, holdCost_pre, cash)
return -1, max(StopTradeVol, buyVol)
else:
raise ValueError('`stop_sig_order`参数设置错误!')
def buy_act(df, k, buy_price, buy_vol, hold_vol_pre, hold_cost_pre,
hold_cash_pre):
'''买入操作记录'''
df.loc[df.index[k], 'act_price'] = buy_price
df.loc[df.index[k], 'buyVol'] = buy_vol
df.loc[df.index[k], 'holdVol'] = buy_vol + hold_vol_pre
cashPut = buy_vol * buy_price * (1+fee)
df.loc[df.index[k], 'cashPut'] = cashPut
if hold_vol_pre >= 0: # 做多加仓或开仓
df.loc[df.index[k], 'holdCost'] = hold_cost_pre + cashPut
if cashPut >= hold_cash_pre:
cashNeed = cashPut - hold_cash_pre
holdCash = 0
else:
cashNeed = 0
holdCash = hold_cash_pre - cashPut
else:
# 之前持有的平均成本
hold_cost_mean_pre = hold_cost_pre / hold_vol_pre
if buy_vol <= abs(hold_vol_pre): # 减或平做空仓位
if buy_vol == abs(hold_vol_pre):
df.loc[df.index[k], 'holdCost'] = 0
else:
df.loc[df.index[k], 'holdCost'] = hold_cost_mean_pre * \
(buy_vol + hold_vol_pre)
# df.loc[df.index[k], 'holdCost'] = hold_cost_pre + cashPut
# cashGet_pre为空单开仓时的成本金
# (注:默认空单成本为负值,没考虑成本为正值情况)
cashGet_pre = buy_vol * hold_cost_mean_pre
gain = cashGet_pre - cashPut # 空单盈亏
df.loc[df.index[k], 'act_gain_Cost'] = -cashGet_pre
df.loc[df.index[k], 'act_gain_Val'] = -cashPut
# 若空单赚钱,盈利和占用资金转化为现金
# 若空单亏钱,平仓可能需要补资金,占用资金除去亏损剩余部分转化为现金
if gain >= 0:
cashNeed = 0
holdCash = (cashGet_pre + gain) + hold_cash_pre
elif gain < 0:
cashNeed_ = abs(gain) - cashGet_pre
if cashNeed_ >= hold_cash_pre:
cashNeed = cashNeed_ - hold_cash_pre
holdCash = 0
else:
cashNeed = 0
holdCash = hold_cash_pre + (cashGet_pre + gain)
elif buy_vol > abs(hold_vol_pre): # 平做空仓位后反向开做多仓位
# buyNeed_反向开多单需要的资金
buyNeed_ = (buy_vol + hold_vol_pre) * buy_price * (1+fee)
df.loc[df.index[k], 'holdCost'] = buyNeed_
# gain空单盈亏
val_sub = hold_vol_pre*buy_price*(1+fee)
gain = val_sub - hold_cost_pre
cash_ = hold_cash_pre + (gain-hold_cost_pre)
if buyNeed_ >= cash_:
cashNeed = buyNeed_ - cash_
holdCash = 0
else:
cashNeed = 0
holdCash = cash_ - buyNeed_
df.loc[df.index[k], 'act_gain_Cost'] = hold_cost_pre
df.loc[df.index[k], 'act_gain_Val'] = val_sub
df.loc[df.index[k], 'act_gain'] = gain
if buy_price*(1+fee) <= hold_cost_mean_pre:
df.loc[df.index[k], 'act_gain_label'] = 1
elif buy_price*(1+fee) > hold_cost_mean_pre:
df.loc[df.index[k], 'act_gain_label'] = -1
if k == 0:
df.loc[df.index[k], 'cashNeed'] = max(init_cash,
cashNeed+init_cash)
else:
df.loc[df.index[k], 'cashNeed'] = cashNeed
df.loc[df.index[k], 'holdCash'] = holdCash
return df
def sel_act(df, k, sel_price, sel_vol, hold_vol_pre, hold_cost_pre,
hold_cash_pre):
'''卖出操作记录'''
df.loc[df.index[k], 'act_price'] = sel_price
df.loc[df.index[k], 'selVol'] = sel_vol
df.loc[df.index[k], 'holdVol'] = hold_vol_pre - sel_vol
cashGet = sel_vol * sel_price * (1-fee)
df.loc[df.index[k], 'cashGet'] = cashGet
if hold_vol_pre <= 0: # 做空加仓或开仓
df.loc[df.index[k], 'holdCost'] = hold_cost_pre - cashGet
cashNeed_ = sel_vol * sel_price * (1+fee)
else:
# 之前持有的平均成本
hold_cost_mean_pre = hold_cost_pre / hold_vol_pre
if sel_vol <= hold_vol_pre: # 减或平做多仓位
if sel_vol == hold_vol_pre:
df.loc[df.index[k], 'holdCost'] = 0
else:
df.loc[df.index[k], 'holdCost'] = hold_cost_mean_pre * \
(hold_vol_pre - sel_vol)
# df.loc[df.index[k], 'holdCost'] = hold_cost_pre - cashGet
cashNeed_ = -cashGet
cashPut_pre = hold_cost_mean_pre * sel_vol
gain = cashGet - cashPut_pre
df.loc[df.index[k], 'act_gain_Cost'] = cashPut_pre
df.loc[df.index[k], 'act_gain_Val'] = cashGet
elif sel_vol > hold_vol_pre: # 平做多仓位后反向开做空仓位
df.loc[df.index[k], 'holdCost'] = \
(hold_vol_pre-sel_vol) * sel_price * (1-fee)
cashGet_pre = hold_vol_pre * sel_price * (1-fee)
cashNeed_ = (sel_vol-hold_vol_pre) * sel_price * (1+fee) \
- cashGet_pre
gain = cashGet_pre - hold_cost_pre
df.loc[df.index[k], 'act_gain_Cost'] = hold_cost_pre
df.loc[df.index[k], 'act_gain_Val'] = cashGet_pre
df.loc[df.index[k], 'act_gain'] = gain
if sel_price*(1-fee) >= hold_cost_mean_pre:
df.loc[df.index[k], 'act_gain_label'] = 1
elif sel_price*(1-fee) < hold_cost_mean_pre:
df.loc[df.index[k], 'act_gain_label'] = -1
if cashNeed_ >= hold_cash_pre:
if k == 0:
df.loc[df.index[k], 'cashNeed'] = max(init_cash,
init_cash + cashNeed_ - hold_cash_pre)
else:
df.loc[df.index[k], 'cashNeed'] = cashNeed_ - hold_cash_pre
df.loc[df.index[k], 'holdCash'] = 0
else:
if k == 0:
df.loc[df.index[k], 'cashNeed'] = init_cash
else:
df.loc[df.index[k], 'cashNeed'] = 0
df.loc[df.index[k], 'holdCash'] = hold_cash_pre - cashNeed_
return df
if sig_type == 1:
act_types = list(data[sig_col].unique())
if not check_l_allin_l0(act_types, [0, 1, -1]):
raise ValueError('data.{}列的值只能是0或1或-1!'.format(sig_col))
assert (data[sig_col] == 0).sum() < data.shape[0], '{}信号列全为0!'.format(sig_col)
if force_final0:
if force_final0 not in ['trade', 'settle']:
raise ValueError('请设置`force_final0`为False或`trade`或`settle`')
cols = list(set([sig_col, col_price, col_price_buy, col_price_sel]))
df = data.reindex(columns=cols)
for col in cols:
if df[col].isna().sum() > 0:
raise ValueError('{}列存在无效值,请检查!'.format(col))
df[sig_col] = df[sig_col].shift(shift_lag)
df[sig_col] = df[sig_col].fillna(0)
if del_begin0:
k = 0
while k < df.shape[0] and df[sig_col].iloc[k] == 0:
k += 1
df = df.iloc[k:, :].copy()
if gap_repeat != False:
df[sig_col] = replace_repeat_func_iter(df[sig_col],
lambda x: x > 0,
lambda x: 0,
gap=gap_repeat)
df[sig_col] = replace_repeat_func_iter(df[sig_col],
lambda x: x < 0,
lambda x: 0,
gap=gap_repeat)
# 新增一条记录用于最终强平(避免最后一条记录有信号时与强平混淆)
iend = '{}_'.format(df.index[-1])
if df[sig_col].iloc[-1] != 0:
logger_show('将新增一条记录处理最终结算|强平(会略微影响收益评价指标)。',
logger, level='warn')
df.loc[iend, :] = df.iloc[-1, :]
df.loc[iend, sig_col] = 0
ori_index = df.index
df.reset_index(drop=True, inplace=True)
df['buyVol'] = 0 # 做多(买入)量
df['selVol'] = 0 # 做空(卖出)量
df['holdVol'] = 0 # 持仓量(交易完成后)
df['holdVal'] = 0 # 持仓价值(交易完成后)
df['cashPut'] = 0 # 现金流出(买入做多算手续费后成本资金)
df['cashGet'] = 0 # 现金流入(卖出做空算手续费后成本资金)
df['cashNeed'] = 0 # 交易时账户需要转入的资金(当笔交易)
df.loc[df.index[0], 'cashNeed'] = init_cash
df['holdCash'] = 0 # 账户持有现金(交易完成后)
df['holdCost'] = 0 # 现有持仓总成本(交易完成后)
df['holdCost_mean'] = 0 # 现有持仓平均成本(交易完成后)
df['holdPreGainPct'] = 0 # 持仓盈亏(交易完成前)
df['holdPreGainPctMax'] = 0 # 持仓达到过的最高收益(交易完成前)
df['holdPreLossPctMax'] = 0 # 持仓达到过的最大亏损(交易完成前)
df['holdPreMaxDown'] = 0 # 持仓最大回撤(交易完成前)
df['act_stop'] = 0 # 止盈止损标注(0.5多止损,1.5多止盈,-0.5空止损,-1.5空止盈)
df['act'] = 0 # 实际操作(1做空,-1做多)(用于信号被过滤时进行更正)
df['act_price'] = np.nan # 交易价格
df['act_gain'] = 0 # 平|减仓盈亏
df['act_gain_Cost'] = 0 # 减|平仓位的总成本
df['act_gain_Val'] = 0 # 减|平仓位的交易额
df['act_gain_label'] = 0 # 若操作为平仓或减仓,记录盈亏标志(1为盈利,-1为亏损)
df['holdGainPct'] = 0 # 现有持仓盈亏(交易完成后)
last_act = 0 # 上一个操作类型
last_acted = np.nan # 上一个操作类型,只有操作才更新
last_actPrice = np.nan # 上一个操作类型,只有操作才更新
for k in range(0, df.shape[0]):
if nshow and k % nshow == 0:
logger_show('simTrading: {} / {}, {} ...'.format(
k, df.shape[0], ori_index[k]),
logger)
# 交易前持仓量
if k == 0:
holdVol_pre = 0
holdCost_pre = 0
act_stop = 0
act_add = 0
holdPreGainPct = 0
holdPreGainPctMax = 0
holdPreLossPctMax = 0
holdPreMaxDown = 0
holdCash_pre = init_cash
Price_settle = np.nan
else:
holdVol_pre = df.loc[df.index[k-1], 'holdVol']
holdCost_pre = df.loc[df.index[k-1], 'holdCost']
holdCash_pre = df.loc[df.index[k-1], 'holdCash']
if holdVol_pre == 0:
act_stop = 0
holdPreGainPct = 0
holdPreGainPctMax = 0
holdPreLossPctMax = 0
holdPreMaxDown = 0
else:
# 检查止盈止损及加仓条件是否触及
if settle_after_act:
Price_settle = df.loc[df.index[k-1], col_price]
else:
Price_settle = df.loc[df.index[k], col_price]
if holdVol_pre > 0:
holdVal_pre = holdVol_pre * Price_settle * (1-fee)
elif holdVol_pre < 0:
holdVal_pre = holdVol_pre * Price_settle * (1+fee)
holdPreGainPct = cal_gain_pct(holdCost_pre, holdVal_pre, pct_cost0=0)
# 若前一次有操作,则计算持仓盈利和回撤(交易前)须重新计算
if last_act == 0:
holdPreGainPctMax = max(holdPreGainPct,
df.loc[df.index[k-1], 'holdPreGainPctMax'])
holdPreLossPctMax = min(holdPreGainPct,
df.loc[df.index[k-1], 'holdPreLossPctMax'])
else:
holdPreGainPctMax = max(holdPreGainPct, 0)
holdPreLossPctMax = min(holdPreGainPct, 0)
# 最大回撤
# holdPreMaxDown = holdPreGainPctMax - holdPreGainPct
if holdCost_pre > 0:
holdValMax_pre = holdCost_pre * (1 + holdPreGainPctMax)
elif holdCost_pre < 0:
holdValMax_pre = holdCost_pre * (1 - holdPreGainPctMax)
holdPreMaxDown = abs(cal_gain_pct(holdValMax_pre, holdVal_pre,
pct_cost0=0))
# 没有止盈止损
if isnull(max_loss) and isnull(max_gain) and isnull(max_down):
act_stop = 0
# 固定比率止盈止损
elif not isnull(max_loss) or not isnull(max_gain):
# 止损
if not isnull(max_loss) and holdPreGainPct <= -max_loss:
if holdCost_pre > 0:
act_stop = 0.5 # 做多止损
elif holdCost_pre < 0:
act_stop = -0.5 # 做空止损
else:
act_stop = 0
# 止盈
elif not isnull(max_gain) and holdPreGainPct >= max_gain:
if holdCost_pre > 0:
act_stop = 1.5 # 做多止盈
elif holdCost_pre < 0:
act_stop = -1.5 # 做空止盈
else:
act_stop = 0
else:
act_stop = 0
# 最大回撤平仓
elif not isnull(max_down):
if holdPreMaxDown < max_down:
act_stop = 0
else:
if holdCost_pre > 0:
# act_stop = 0.5 # 做多平仓
if holdPreGainPct < 0:
act_stop = 0.5 # 做多止损
elif holdPreGainPct > 0:
act_stop = 1.5 # 做多止盈
else:
act_stop = 0
elif holdCost_pre < 0:
# act_stop = -0.5 # 做空平仓
if holdPreGainPct < 0:
act_stop = -0.5 # 做空止损
elif holdPreGainPct > 0:
act_stop = -1.5 # 做空止盈
else:
act_stop = 0
else:
act_stop = 0
# 没有加仓
if (isnull(add_loss_pct) and isnull(add_gain_pct)) \
or holdVol_pre == 0 or last_acted == 0:
act_add = 0
else:
# 比上次同向操作时价格涨跌幅
pct_lastacted = cal_pct(last_actPrice, Price_settle)
# 浮盈加仓|浮亏加仓
if holdVol_pre > 0 and last_acted == -1 and \
not isnull(add_gain_pct) and pct_lastacted >= add_gain_pct:
act_add = -1 # 做多浮盈加仓
elif holdVol_pre < 0 and last_acted == 1 and \
not isnull(add_gain_pct) and pct_lastacted <- add_gain_pct:
act_add = 1 # 做空浮盈加仓
elif holdVol_pre > 0 and last_acted == -1 and \
not isnull(add_loss_pct) and pct_lastacted <= -add_loss_pct:
act_add = -1 # 做多浮亏加仓
elif holdVol_pre < 0 and last_acted == 1 and \
not isnull(add_loss_pct) and pct_lastacted >= add_loss_pct:
act_add = 1 # 做空浮亏加仓
else:
act_add = 0
df.loc[df.index[k], 'act_stop'] = act_stop
df.loc[df.index[k], 'holdPreGainPct'] = holdPreGainPct
df.loc[df.index[k], 'holdPreGainPctMax'] = holdPreGainPctMax
df.loc[df.index[k], 'holdPreLossPctMax'] = holdPreLossPctMax
df.loc[df.index[k], 'holdPreMaxDown'] = holdPreMaxDown
buyPrice = df.loc[df.index[k], col_price_buy]
selPrice = df.loc[df.index[k], col_price_sel]
sig = df.loc[df.index[k], sig_col] # 操作信号
if sig == 0:
sig = act_add
elif sig + act_add == 0:
if add_sig_order == 'offset':
sig = 0
elif add_sig_order == 'add_only':
sig = act_add
elif add_sig_order == 'sig_only':
sig == sig
else:
raise ValueError('`add_sig_order`参数设置错误!')
# 确定交易计划
if k == df.shape[0]-1 and force_final0:
if holdVol_pre > 0:
act, tradeVol = 1, holdVol_pre # 强平多仓
last_force = 'sel'
if force_final0 == 'settle':
selPrice = df.loc[df.index[k], col_price]
elif holdVol_pre < 0:
act, tradeVol = -1, abs(holdVol_pre) # 强平空仓
last_force = 'buy'
if force_final0 == 'settle':
buyPrice = df.loc[df.index[k], col_price]
else:
act, tradeVol = 0, 0
last_force = None
else:
act, tradeVol = get_tradeVol(sig, act_stop, holdVol_pre,
holdCost_pre, buyPrice, selPrice, holdCash_pre)
# 检查交易后是否会导致持仓量超限,更正交易量
if act == -1:
if not isnull(hold_buy_max):
if holdVol_pre >= hold_buy_max:
act, tradeVol = 0, 0
elif holdVol_pre + tradeVol > hold_buy_max:
act, tradeVol = -1, hold_buy_max-holdVol_pre
elif act == 1:
if not isnull(hold_sel_max):
if -holdVol_pre >= hold_sel_max:
act, tradeVol = 0, 0
elif -holdVol_pre + tradeVol > hold_sel_max:
act, tradeVol = 1, hold_sel_max+holdVol_pre
if tradeVol == 0:
act = 0
# 更新实际操作方向
df.loc[df.index[k], 'act'] = act
# 交易执行
if act == 0:
df.loc[df.index[k], 'holdVol'] = holdVol_pre
df.loc[df.index[k], 'holdCost'] = holdCost_pre
df.loc[df.index[k], 'holdCash'] = holdCash_pre
elif act == -1:
df = buy_act(df, k, buyPrice, tradeVol, holdVol_pre, holdCost_pre,
holdCash_pre)
elif act == 1:
df = sel_act(df, k, selPrice, tradeVol, holdVol_pre, holdCost_pre,
holdCash_pre)
# 持仓信息更新
holdVol = df.loc[df.index[k], 'holdVol']
Price = df.loc[df.index[k], col_price]
if holdVol > 0:
df.loc[df.index[k], 'holdVal'] = holdVol * Price * (1-fee)
elif holdVol < 0:
df.loc[df.index[k], 'holdVal'] = holdVol * Price * (1+fee)
df.loc[df.index[k], 'holdGainPct'] = cal_gain_pct(
df.loc[df.index[k], 'holdCost'], df.loc[df.index[k], 'holdVal'], 0)
# 盈亏和资金占用更新
if k == 0:
df.loc[df.index[k], 'cashGet_cum'] = df.loc[df.index[k], 'cashGet']
df.loc[df.index[k], 'cashPut_cum'] = df.loc[df.index[k], 'cashPut']
df.loc[df.index[k], 'cashUsedtmp'] = df.loc[df.index[k], 'cashNeed']
else:
df.loc[df.index[k], 'cashGet_cum'] = \
df.loc[df.index[k-1], 'cashGet_cum'] + \
df.loc[df.index[k], 'cashGet']
df.loc[df.index[k], 'cashPut_cum'] = \
df.loc[df.index[k-1], 'cashPut_cum'] + \
df.loc[df.index[k], 'cashPut']
df.loc[df.index[k], 'cashUsedtmp'] = \
df.loc[df.index[k-1], 'cashUsedtmp'] + \
df.loc[df.index[k], 'cashNeed']
df.loc[df.index[k], 'gain_cum'] = \
df.loc[df.index[k], 'cashGet_cum'] + \
df.loc[df.index[k], 'holdVal'] - \
df.loc[df.index[k], 'cashPut_cum']
df.loc[df.index[k], 'tmpValue'] = \
df.loc[df.index[k], 'gain_cum'] + \
df.loc[df.index[k], 'cashUsedtmp']
# 若存在空单本金亏完的情况,需要补资金
if df.loc[df.index[k], 'tmpValue'] <= 0:
df.loc[df.index[k], 'cashNeed'] = \
df.loc[df.index[k], 'cashNeed'] - \
df.loc[df.index[k], 'tmpValue'] + sos_money
df.loc[df.index[k], 'holdCash'] = sos_money
if k == 0:
df.loc[df.index[k], 'cashUsedtmp'] = df.loc[df.index[k], 'cashNeed']
else:
df.loc[df.index[k], 'cashUsedtmp'] = \
df.loc[df.index[k-1], 'cashUsedtmp'] + \
df.loc[df.index[k], 'cashNeed']
last_act = act
last_acted = act if act != 0 else last_acted
if act == 1:
last_actPrice = selPrice
elif act == -1:
last_actPrice = buyPrice
# 现有持仓平均成本(交易完成后)
df['holdCost_mean'] = (df['holdCost'] / df['holdVol']).fillna(0)
df['holdCost_mean'] = df[['holdCost_mean', 'holdVol']].apply(lambda x:
x['holdCost_mean'] if x['holdVol'] >= 0 else \
-x['holdCost_mean'], axis=1)
# 减|平仓位盈亏百分比(用于计算百分比盈亏比和赢率)
df['act_gain_pct'] = df[['act_gain_Cost', 'act_gain_Val']].apply(
lambda x: cal_gain_pct(x['act_gain_Cost'], x['act_gain_Val'], 0), axis=1)
df['cashUsedMax'] = df['cashNeed'].cumsum() # 实际最大资金占用
df['pctGain_maxUsed'] = df[['gain_cum', 'cashUsedMax']].apply( lambda x:
x_div_y(x['gain_cum'], x['cashUsedMax'], v_xy0=0), axis=1) # 收益/最大占用
# 最大占用资金最大值
cashUsedMax = df['cashUsedMax'].iloc[-1]
# 账户总值(总投入+盈利)
# 实际总值(用于计算基金净值)
df['AccountValue_act'] = df['gain_cum'] + df['cashUsedMax']
df['AccountValue_act_net'] = df['AccountValue_act'] / df['cashUsedMax']
# 按最大成本算总值(用于根据账户总价值计算年化收益率、夏普、最大回撤等)
df['AccountValue_maxUsed'] = df['gain_cum'] + cashUsedMax
df['AccountValue_maxUsed_net'] = df['AccountValue_maxUsed'] / cashUsedMax
df['pct_mkt'] = df[col_price].pct_change()
# 持有仓位当期收益率(可用于累加和累乘收益计算)
df['holdGainPct_cur'] = df['holdVal'].rolling(2).apply(lambda x:
cal_gain_pct(x.iloc[0], x.iloc[1], pct_cost0=0))
df.loc[df.index[0], 'holdGainPct_cur'] = df['holdGainPct'].iloc[0]
for k in range(1, df.shape[0]):
buyVol_cur = df['buyVol'].iloc[k]
selVol_cur = df['selVol'].iloc[k]
if buyVol_cur == 0 and selVol_cur == 0:
continue
else:
preHoldVal = df['holdVal'].iloc[k-1] # 前期持仓总价值
preHoldVol = df['holdVol'].iloc[k-1] # 前期持仓总量
curHoldVal = df['holdVal'].iloc[k] # 当前持仓总价值
tradeVol_cur = buyVol_cur - selVol_cur # 当期交易量
if preHoldVol == 0: # 开仓
df.loc[df.index[k], 'holdGainPct_cur'] = \
df.loc[df.index[k], 'holdGainPct']
elif preHoldVol > 0 and tradeVol_cur > 0: # buy加仓
CostTotal = preHoldVal + df.loc[df.index[k], 'cashPut']
df.loc[df.index[k], 'holdGainPct_cur'] = cal_gain_pct(
CostTotal, curHoldVal, pct_cost0=0)
elif preHoldVol < 0 and tradeVol_cur < 0: # sel加仓
CostTotal = preHoldVal - df.loc[df.index[k], 'cashGet']
df.loc[df.index[k], 'holdGainPct_cur'] = cal_gain_pct(
CostTotal, curHoldVal, pct_cost0=0)
elif preHoldVol > 0 and tradeVol_cur < 0 and \
preHoldVol + tradeVol_cur >= 0: # buy减|平仓
ValTotal = curHoldVal + df.loc[df.index[k], 'cashGet']
df.loc[df.index[k], 'holdGainPct_cur'] = cal_gain_pct(
preHoldVal, ValTotal, pct_cost0=0)
elif preHoldVol < 0 and tradeVol_cur > 0 and \
preHoldVol + tradeVol_cur <= 0: # sel减|平仓
ValTotal = curHoldVal - df.loc[df.index[k], 'cashPut']
df.loc[df.index[k], 'holdGainPct_cur'] = cal_gain_pct(
preHoldVal, ValTotal, pct_cost0=0)
elif preHoldVol > 0 and tradeVol_cur < 0 and \
preHoldVol + tradeVol_cur < 0: # 平buy反向sel
ValTotal = df.loc[df.index[k], 'cashGet']
CostTotal = preHoldVal - curHoldVal
df.loc[df.index[k], 'holdGainPct_cur'] = cal_gain_pct(
CostTotal, ValTotal, pct_cost0=0)
elif preHoldVol < 0 and tradeVol_cur > 0 and \
preHoldVol + tradeVol_cur > 0: # 平sel反向buy
CostTotal = df.loc[df.index[k], 'cashPut']
ValTotal = curHoldVal - preHoldVal
df.loc[df.index[k], 'holdGainPct_cur'] = cal_gain_pct(
CostTotal, ValTotal, pct_cost0=0)
totalGain = df['gain_cum'].iloc[-1] # 总收益额
pctGain_maxUsed = df['pctGain_maxUsed'].iloc[-1]
# 交易次数
Nbuy = df[df['buyVol'] != 0].shape[0]
Nsel = df[df['selVol'] != 0].shape[0]
if not force_final0:
if df['holdVol'].iloc[-1] > 0 and df['selVol'].iloc[-1] == 0:
Nsel += 1
elif df['holdVol'].iloc[-1] < 0 and df['buyVol'].iloc[-1] == 0:
Nbuy += 1
Nacts = Nbuy + Nsel
# 平均赢率(总交易次数)
Nliqs = df[df['act_gain_label'] != 0].shape[0]
Nliqs_gain = df[df['act_gain_label'] == 1].shape[0]
if not force_final0:
if df['holdVol'].iloc[-1] > 0 and df['selVol'].iloc[-1] == 0:
Nliqs += 1
if df['holdVal'].iloc[-1] >= df['holdCost'].iloc[-1]:
Nliqs_gain += 1
elif df['holdVol'].iloc[-1] < 0 and df['buyVol'].iloc[-1] == 0:
Nliqs += 1
if df['holdVal'].iloc[-1] >= df['holdCost'].iloc[-1]:
Nliqs_gain += 1
# 盈亏比
TotGain = df[df['act_gain'] > 0]['act_gain'].sum()
TotLoss = df[df['act_gain'] < 0]['act_gain'].sum()
if not force_final0:
if df['holdVal'].iloc[-1] >= df['holdCost'].iloc[-1]:
TotGain += (df['holdVal'].iloc[-1] - df['holdCost'].iloc[-1])
else:
TotLoss += (df['holdVal'].iloc[-1] - df['holdCost'].iloc[-1])
TotLoss = abs(TotLoss)
MeanGain = x_div_y(TotGain, Nliqs_gain, v_x0=0, v_y0=0, v_xy0=0)
MeanLoss = x_div_y(TotLoss, Nliqs-Nliqs_gain, v_x0=0, v_y0=0, v_xy0=0)
gain_loss_rate = x_div_y(MeanGain, MeanLoss, v_x0=0, v_y0=np.inf)
# 百分比盈亏比
TotGainPct = df[df['act_gain_pct'] > 0]['act_gain_pct'].sum()
TotLossPct = df[df['act_gain_pct'] < 0]['act_gain_pct'].sum()
if not force_final0:
if df['holdVal'].iloc[-1] >= df['holdCost'].iloc[-1]:
TotGainPct += cal_gain_pct(df['holdCost'].iloc[-1],
df['holdVal'].iloc[-1], 0)
else:
TotLossPct += cal_gain_pct(df['holdCost'].iloc[-1],
df['holdVal'].iloc[-1], 0)
TotLossPct = abs(TotLossPct)
MeanGainPct = x_div_y(TotGainPct, Nliqs_gain, v_x0=0, v_y0=0, v_xy0=0)
MeanLossPct = x_div_y(TotLossPct, Nliqs-Nliqs_gain, v_x0=0, v_y0=0, v_xy0=0)
gain_loss_rate_pct = x_div_y(MeanGainPct, MeanLossPct, v_x0=0, v_y0=np.inf)
# 百分比平均赢率
meanGainPct_pct = (TotGainPct - TotLossPct) / Nliqs
# 计算开仓统计量时先对最后一个时间的强平价格做更正
df_ = df.copy()
if force_final0 == 'settle':
if last_force == 'buy':
df_.loc[df.index[-1], col_price_buy] = \
df_.loc[df.index[-1], col_price]
elif last_force == 'sel':
df_.loc[df.index[-1], col_price_sel] = \
df_.loc[df.index[-1], col_price]
trade_gain_info_open = get_open_gain_info(df_, col_price=col_price,
col_price_buy=col_price_buy, col_price_sel=col_price_sel, fee=fee,
force_final0=force_final0, nshow=nshow, logger=logger)
# 最大连续开仓次数
df['act_n'] = df[['act', 'holdVol']].apply(lambda x:
np.nan if x['act'] != 0 and x['holdVol'] == 0 else \
(-1 if x['act'] == -1 and x['holdVol'] > 0 else \
(1 if x['act'] == 1 and x['holdVol'] < 0 else 0)), axis=1)
df['con_buy_n'] = con_count_ignore(df['act_n'], lambda x: x == -1,
func_ignore=lambda x: x == 0)
df['con_sel_n'] = con_count_ignore(df['act_n'], lambda x: x == 1,
func_ignore=lambda x: x == 0)
trade_gain_info = {
'收益/最大占用比': pctGain_maxUsed,
'总收益': totalGain,
'最大占用': cashUsedMax,
'buy次数': Nbuy,
'sel次数': Nsel,
'总交易次数': Nacts,
'平均赢率(总交易次数)': 2 * pctGain_maxUsed / Nacts,
'百分比平均赢率(总交易次数)':
((TotGainPct-TotLossPct) + \
(trade_gain_info_open['盈利百分比和(开仓)']-\
trade_gain_info_open['亏损百分比和(开仓)'])) / \
(Nliqs+trade_gain_info_open['开仓次数']),
'平或减仓次数': Nliqs,
'盈利次数(平仓或减仓)': Nliqs_gain,
'亏损次数(平仓或减仓)': Nliqs - Nliqs_gain,
'胜率(平仓或减仓)': Nliqs_gain / Nliqs,
'平均赢率(平仓或减仓)': pctGain_maxUsed / Nliqs,
'平均赢率(开仓)': pctGain_maxUsed / trade_gain_info_open['开仓次数'],
'百分比平均赢率(平仓或减仓)': meanGainPct_pct,
'盈亏比(平仓)': gain_loss_rate,
'百分比盈亏比(平仓或减仓)': gain_loss_rate_pct,
'总盈利额(平仓)': TotGain,
'总亏损额(平仓)': TotLoss,
'盈利百分比和(平仓或减仓)': TotGainPct,
'亏损百分比和(平仓或减仓)': TotLossPct,
'平均盈利(平仓)': MeanGain,
'平均亏损(平仓)': MeanLoss,
'平均盈利百分比(平仓或减仓)': MeanGainPct,
'平均亏损百分比(平仓或减仓)': MeanLossPct,
'单笔最大回撤(平仓或减仓)': df['holdPreMaxDown'].max(),
'单笔最大亏损(平仓或减仓)': df['holdPreLossPctMax'].min(),
'开仓频率': df.shape[0] / trade_gain_info_open['开仓次数'],
'最大连续buy开(加)仓次数': df['con_buy_n'].max(),
'最大连续sel开(加)次数': df['con_sel_n'].max()
}
trade_gain_info.update(trade_gain_info_open)
df.index = ori_index
return trade_gain_info, df
#%%
def get_open_gain_info(df_gain, col_price='close', col_price_buy='close',
col_price_sel='close', fee=1.5/1000,
force_final0='settle', nshow=None, logger=None):
'''
| 以开仓为统计口径,计算盈亏指标
| ``df_gain`` 为 :func:`cal_sig_gains` 函数的输出 ``df``
'''
cols = list(set([col_price, col_price_buy, col_price_sel]))
df = df_gain.reindex(columns=['buyVol', 'selVol', 'holdVol']+cols)
holdVolLast = df['holdVol'].iloc[-1]
if holdVolLast < 0:
df.loc[df.index[-1], 'buyVol'] = df['buyVol'].iloc[-1] - holdVolLast
df.loc[df.index[-1], 'holdVol'] = 0
if force_final0 != 'trade':
df.loc[df.index[-1], col_price_buy] = \
df.loc[df.index[-1], col_price]
elif holdVolLast > 0:
df.loc[df.index[-1], 'selVol'] = df['selVol'].iloc[-1] + holdVolLast
df.loc[df.index[-1], 'holdVol'] = 0
if force_final0 != 'trade':
df.loc[df.index[-1], col_price_sel] = \
df.loc[df.index[-1], col_price]
df['Tidx'] = range(0, df.shape[0])
df['BuySelVol'] = df['buyVol'] - df['selVol']
# df = df[df['BuySelVol'] != 0].copy()
n_open, n_gain, n_loss = 0, 0, 0 # 记录开仓次数, 盈利次数, 亏损次数
Vgain, Vloss, PctGain, PctLoss = 0, 0, 0, 0 # 记录总盈利和总亏损额及百分比之和
i, N = 0, df.shape[0]
hold_times_all = [] # 记录持仓周期
maxdowns, maxlosss = [], [] # 记录分笔最大回撤和最大亏损
while i < N:
if nshow and i % nshow == 0:
logger_show('GetOpenGainInfo: {} / {} ...'.format(i, df.shape[0]),
logger)
volOpen = df['BuySelVol'].iloc[i] # 开仓量
if volOpen == 0:
i += 1
continue
n_open += 1
# 开仓总成本
if volOpen > 0:
costOpen = df[col_price_buy].iloc[i] * volOpen * (1+fee)
elif volOpen < 0:
costOpen = df[col_price_sel].iloc[i] * volOpen * (1-fee)
# 寻找对应平仓位置和量
valLiqs = 0 # 平仓总值
volLiq = 0
maxdown, maxloss, maxgain = 0, 0, 0 # 最大回撤、最大亏损和最大盈利跟踪
volLeft = abs(volOpen) - abs(volLiq)
j = i+1
hold_times = []
while j < N and volLeft > 0:
# 回撤更新
Price_settle = df[col_price].iloc[j-1]
if volOpen > 0:
Value_settle = valLiqs - volLeft * Price_settle * (1-fee)
else:
Value_settle = valLiqs + volLeft * Price_settle * (1+fee)
pct = cal_gain_pct(costOpen, -Value_settle, pct_cost0=0)
maxloss = min(pct, maxloss)
maxgain = max(pct, maxgain)
if volOpen > 0:
valMax = costOpen * (1 + maxgain)
elif volOpen < 0:
valMax = costOpen * (1 - maxgain)
maxdown = abs(cal_gain_pct(valMax, -Value_settle, pct_cost0=0))
if df['BuySelVol'].iloc[j] == 0:
j += 1
continue
# 操作方向相反则纳入平仓量
vol_ = df['BuySelVol'].iloc[j]
if volOpen * vol_ < 0:
volLiq += df['BuySelVol'].iloc[j]
volLeft = abs(volOpen) - abs(volLiq)
if volLeft == 0: # 刚好完成平仓
df.loc[df.index[j], 'BuySelVol'] = 0
if volOpen > 0:
valLiqs += df[col_price_sel].iloc[j] * vol_ * (1-fee)
elif volOpen < 0:
valLiqs += df[col_price_buy].iloc[j] * vol_ * (1+fee)
if costOpen + valLiqs > 0:
n_loss += 1
Vloss += (costOpen + valLiqs)
pct = cal_gain_pct(costOpen, -valLiqs, pct_cost0=0)
PctLoss += abs(pct)
maxloss = min(pct, maxloss)
maxgain = max(pct, maxgain)
if volOpen > 0:
valMax = costOpen * (1 + maxgain)
elif volOpen < 0:
valMax = costOpen * (1 - maxgain)
maxdown = abs(cal_gain_pct(valMax, -valLiqs, pct_cost0=0))
else:
n_gain += 1
Vgain += abs(costOpen + valLiqs)
PctGain += cal_gain_pct(costOpen, -valLiqs, pct_cost0=0)
hold_times.append(df.loc[df.index[j], 'Tidx']- \
df.loc[df.index[i], 'Tidx'])
elif volLeft > 0: # 未完成平仓
df.loc[df.index[j], 'BuySelVol'] = 0
if volOpen > 0:
valLiqs += df[col_price_sel].iloc[j] * vol_ * (1-fee)
elif volOpen < 0:
valLiqs += df[col_price_buy].iloc[j] * vol_ * (1+fee)
hold_times.append(df.loc[df.index[j], 'Tidx']- \
df.loc[df.index[i], 'Tidx'])
j += 1
elif volLeft < 0: # 完成平仓且开新仓
if volOpen > 0:
df.loc[df.index[j], 'BuySelVol'] = volLeft
vol__ = vol_ - volLeft
valLiqs += df[col_price_sel].iloc[j] * vol__ * (1-fee)
elif volOpen < 0:
df.loc[df.index[j], 'BuySelVol'] = -volLeft
vol__ = vol_ + volLeft
valLiqs += df[col_price_buy].iloc[j] * vol__ * (1+fee)
if costOpen + valLiqs > 0:
n_loss += 1
Vloss += (costOpen + valLiqs)
pct = cal_gain_pct(costOpen, -valLiqs, pct_cost0=0)
PctLoss += abs(pct)
maxloss = min(pct, maxloss)
maxgain = max(pct, maxgain)
if volOpen > 0:
valMax = costOpen * (1 + maxgain)
elif volOpen < 0:
valMax = costOpen * (1 - maxgain)
maxdown = abs(cal_gain_pct(valMax, -valLiqs, pct_cost0=0))
else:
n_gain += 1
Vgain += abs(costOpen + valLiqs)
PctGain += cal_gain_pct(costOpen, -valLiqs, pct_cost0=0)
hold_times.append(df.loc[df.index[j], 'Tidx']- \
df.loc[df.index[i], 'Tidx'])
else:
j += 1
hold_times_all.append(hold_times)
maxdowns.append(maxdown)
maxlosss.append(maxloss)
i += 1
Mgain = x_div_y(Vgain, n_gain, v_x0=0, v_y0=0, v_xy0=0)
Mloss = x_div_y(Vloss, n_loss, v_x0=0, v_y0=0, v_xy0=0)
Mgain_pct = x_div_y(PctGain, n_gain, v_x0=0, v_y0=0, v_xy0=0)
Mloss_pct = x_div_y(PctLoss, n_loss, v_x0=0, v_y0=0, v_xy0=0)
mean_hold_time1 = sum([sum(x) for x in hold_times_all]) / \
sum([len(x) for x in hold_times_all])
mean_hold_time2 = np.mean([np.mean(x) for x in hold_times_all])
gain_info = {'开仓次数': n_open,
'盈利次数(开仓)': n_gain,
'亏损次数(开仓)': n_loss,
'胜率(开仓)': n_gain / n_open,
'总盈利额(开仓)': Vgain,
'总亏损额(开仓)': Vloss,
'盈利百分比和(开仓)': PctGain,
'亏损百分比和(开仓)': PctLoss,
'平均盈利(开仓)': Mgain,
'平均亏损(开仓)': Mloss,
'平均盈利百分比(开仓)': Mgain_pct,
'平均亏损百分比(开仓)': Mloss_pct,
'盈亏比(开仓)': x_div_y(Mgain, Mloss, v_x0=0, v_y0=np.inf),
'百分比盈亏比(开仓)': x_div_y(Mgain_pct, Mloss_pct,
v_x0=0, v_y0=np.inf),
'百分比平均赢率(开仓)': (PctGain-PctLoss) / n_open,
'平均持仓周期(所有平均)': mean_hold_time1,
'平均持仓周期(每次平均)': mean_hold_time2,
'单笔最大回撤(开仓)': max(maxdowns),
'单笔最大亏损(开仓)': min(maxlosss)}
return gain_info
#%%
def get_yield_curve(data, sig_col, nn=252, ext_type=1,
net_type='fundnet', gain_type='pct', rtype='exp',
show_sigs=True, show_dy_maxdown=False, show_key_infos=True,
logger=None, plot=True, kwargs_plot={'figsize': (11, 7)},
**kwargs_gain):
'''
根据信号生成收益曲线
'''
assert net_type in ['fundnet', 'prod', 'sum']
if not logger is None:
kwargs_gain['logger'] = logger
trade_gain_info, df_gain = cal_sig_gains(data, sig_col, **kwargs_gain)
cols_to_plot = []
for col_key in ['cols_styl_up_left', 'cols_styl_up_right',
'cols_styl_low_left', 'cols_styl_low_right',
'cols_to_label_info', 'xparls_info']:
if col_key in kwargs_plot.keys():
if col_key == 'cols_to_label_info':
for col_name in kwargs_plot[col_key].keys():
cols_to_plot.append(col_name)
for lbl_info in kwargs_plot[col_key][col_name]:
cols_to_plot.append(kwargs_plot[col_key][col_name][0][0])
else:
for col_name in kwargs_plot[col_key]:
cols_to_plot.append(col_name)
cols_to_plot = list(set(cols_to_plot))
for col_name in cols_to_plot:
if col_name in df_gain.columns:
logger_show('{}列画图数据被更新!'.format(col_name), logger, 'warn')
continue
df_gain[col_name] = data[col_name]
if 'col_price' in kwargs_gain.keys():
col_price = kwargs_gain['col_price']
else:
col_price = 'close'
if net_type == 'sum':
df_gain['value_mkt'] = \
1 + df_gain[col_price].pct_change().cumsum().fillna(0)
df_gain['AccountValue_maxUsed_net1'] = \
1 + df_gain['AccountValue_maxUsed'].pct_change().cumsum().fillna(0)
else:
df_gain['value_mkt'] = df_gain[col_price] / df_gain[col_price].iloc[0]
df_gain['AccountValue_maxUsed_net1'] = df_gain['AccountValue_maxUsed_net'].copy()
df_gain['转入'] = df_gain['cashNeed']
df_gain['转出'] = 0
df_gain['资产总值'] = df_gain['AccountValue_act']
df_gain['盈亏%'] = df_gain['holdGainPct_cur'] * 100
df_gain = get_gains(df_gain, gain_types=['fundnet', 'prod', 'sum', 'act'])
df_gain['基金净值'] = df_gain['净值'].copy()
if net_type == 'prod':
df_gain['净值'] = df_gain['累乘净值'].copy()
elif net_type == 'sum':
df_gain['净值'] = df_gain['累加净值'].copy()
gain_type = 'pct'
rtype = 'mean'
MDabsV = True if net_type == 'sum' else False
if net_type in ['sum', 'prod']:
# 按百分比算每期盈亏比和每期平均赢率
TotGain = df_gain[df_gain['holdGainPct_cur'] > 0]['holdGainPct_cur'].sum()
TotLoss = abs(df_gain[df_gain['holdGainPct_cur'] < 0]['holdGainPct_cur'].sum())
Nhit = df_gain[df_gain['holdGainPct_cur'] > 0].shape[0]
Nlos = df_gain[df_gain['holdGainPct_cur'] < 0].shape[0]
MeanGain = x_div_y(TotGain, Nhit, v_x0=0, v_y0=0, v_xy0=0)
MeanLoss = x_div_y(TotLoss, Nlos, v_x0=0, v_y0=0, v_xy0=0)
gain_loss_rate_pct = x_div_y(MeanGain, MeanLoss, v_x0=0, v_y0=np.inf)
trade_gain_info.update(
{'盈亏比(百分比每期)': gain_loss_rate_pct,
'平均赢率(百分比每期)': (TotGain-TotLoss) / (Nhit+Nlos)})
# 年化收益
return_ann_maxUsed = cal_returns_period(df_gain['AccountValue_maxUsed_net1'],
gain_type=gain_type, rtype=rtype, nn=nn)
return_ann_net = cal_returns_period(df_gain['净值'], gain_type=gain_type,
rtype=rtype, nn=nn)
return_ann_mkt = cal_returns_period(df_gain['value_mkt'], gain_type=gain_type,
rtype=rtype, nn=nn)
# 夏普
sharpe_maxUsed = cal_sharpe(df_gain['AccountValue_maxUsed_net1'], r=3/100,
nn=nn, gain_type=gain_type, ann_rtype=rtype)
sharpe_net = cal_sharpe(df_gain['净值'], r=3/100, nn=nn,
gain_type=gain_type, ann_rtype=rtype)
sharpe_mkt = cal_sharpe(df_gain['value_mkt'], r=3/100, nn=nn,
gain_type=gain_type, ann_rtype=rtype)
# 最大回撤
maxDown_maxUsed, (strt_idx_maxUsed, end_idx_maxUsed) = get_maxdown(
df_gain['AccountValue_maxUsed_net1'], abs_val=MDabsV)
maxDown_net, (strt_idx_net, end_idx_net) = get_maxdown(df_gain['净值'],
abs_val=MDabsV)
maxDown_mkt, (strt_idx_mkt, end_idx_mkt) = get_maxdown(df_gain['value_mkt'],
abs_val=MDabsV)
ori_index = df_gain.index
df_gain.index = range(0, df_gain.shape[0])
# 回撤标注
df_gain['inMaxDown_net'] = 0
df_gain.loc[df_gain.index[strt_idx_net: end_idx_net+1], 'inMaxDown_net'] = 1
df_gain['inMaxDown_mkt'] = 0
df_gain.loc[df_gain.index[strt_idx_mkt: end_idx_mkt+1], 'inMaxDown_mkt'] = 1
# 动态最大回撤
df_gain['dyMaxDown'] = get_maxdown_dy(df_gain['净值'])
df_gain.index = ori_index
if plot:
plot_series(df_gain,
{'AccountValue_act_net': ('-m', '账户净值(价值/实际最大占用)'),
'AccountValue_maxUsed_net': ('-r', '账户净值(价值/最终最大占用)'),
'基金净值': ('-b', '账户净值(基金份额法)'),
'累加净值': ('-c', '账户净值(累加净值)'),
'累乘净值': ('-y', '账户净值(累乘净值)'),
'value_mkt': ('-k', '市场净值(价格/初始价格)')},
**kwargs_plot)
if show_sigs:
sigs_label_info = {
'value_mkt':
[['act', (-1, 1), ('r^', 'gv'), ('买', '卖')],
['act_stop', (-1.5, 1.5, -0.5, 0.5), ('r*', 'g*', 'mo', 'bo'),
('做空止盈', '做多止盈', '做空止损', '做多止损')]]}
else:
sigs_label_info = {}
conlabel_info = {
'净值':
[['inMaxDown_net', (1, 0), ('-m', '-b'), ('最大回撤区间', '账户净值')]],
'value_mkt':
[['inMaxDown_mkt', (1, 0), ('-m', '-k'), (False, 'market')]]
}
conlabel_info, kwargs_plot = get_update_kwargs('conlabel_info',
conlabel_info, kwargs_plot)
cols_to_label_info, kwargs_plot = get_update_kwargs('cols_to_label_info',
sigs_label_info, kwargs_plot)
cols_styl_up_left = {'value_mkt': ('-w', False),
'净值': ('-w', False)}
cols_styl_up_left, kwargs_plot = get_update_kwargs('cols_styl_up_left',
cols_styl_up_left, kwargs_plot)
if show_dy_maxdown:
cols_styl_up_right = {'dyMaxDown': ('-c', '动态最大回撤',
{'alpha': 0.2})}
cols_to_fill_info = {'dyMaxDown': {'color': 'c', 'alpha': 0.2}}
else:
cols_styl_up_right = {}
cols_to_fill_info = {}
cols_styl_up_right, kwargs_plot = get_update_kwargs('cols_styl_up_right',
cols_styl_up_right, kwargs_plot)
cols_to_fill_info, kwargs_plot = get_update_kwargs('cols_to_fill_info',
cols_to_fill_info, kwargs_plot)
plot_series_conlabel(df_gain, conlabel_info=conlabel_info,
cols_styl_up_left=cols_styl_up_left,
cols_styl_up_right=cols_styl_up_right,
cols_to_label_info=cols_to_label_info,
cols_to_fill_info=cols_to_fill_info,
**kwargs_plot)
gain_stats = pd.DataFrame({
'账户(最大占用)': [return_ann_maxUsed, sharpe_maxUsed, maxDown_maxUsed],
'账户(净值)': [return_ann_net, sharpe_net, maxDown_net],
'市场': [return_ann_mkt, sharpe_mkt, maxDown_mkt]})
gain_stats.index = ['年化收益', '夏普', '最大回撤']
# 超额收益
extr = cal_ext_return_period(df_gain['净值'], df_gain['value_mkt'],
gain_type=gain_type, rtype=rtype,
nn=nn, ext_type=ext_type)
trade_gain_info.update({'年化超额': extr})
trade_gain_info.update({'最大回撤区间长度': df_gain['inMaxDown_net'].sum()})
# alpha,beta
alpha, beta = cal_alpha_beta(df_gain['净值'], df_gain['value_mkt'],
gain_type=gain_type, rtype=rtype,
nn=nn, logger=logger)
trade_gain_info.update({
'年化收益(净值)': gain_stats.loc['年化收益', '账户(净值)'],
'年化收益(最大占用)': gain_stats.loc['年化收益', '账户(最大占用)'],
'年化收益(市场)': gain_stats.loc['年化收益', '市场'],
'夏普(净值)': gain_stats.loc['夏普', '账户(净值)'],
'夏普(最大占用)': gain_stats.loc['夏普', '账户(最大占用)'],
'夏普(市场)': gain_stats.loc['夏普', '市场'],
'最大回撤(净值)': gain_stats.loc['最大回撤', '账户(净值)'],
'最大回撤(最大占用)': gain_stats.loc['最大回撤', '账户(最大占用)'],
'最大回撤(市场)': gain_stats.loc['最大回撤', '市场'],
'alpha': alpha,
'beta': beta
})
if show_key_infos:
_print_key_infos(trade_gain_info, net_type=net_type)
return trade_gain_info, df_gain
#%%
def _print_key_infos(trade_gain_info, net_type='fundnet'):
print('年化收益率:{};'.format(round(trade_gain_info['年化收益(净值)'], 4)) + \
'年化收益率(市场):{}'.format(round(trade_gain_info['年化收益(市场)'], 4)))
print('年化收益率(超额):{}'.format(round(trade_gain_info['年化超额'], 4)))
print('最大回撤:{};'.format(round(trade_gain_info['最大回撤(净值)'], 4)) + \
'最大回撤(市场):{}'.format(round(trade_gain_info['最大回撤(市场)'], 4)))
print('胜率(平):{};'.format(round(trade_gain_info['胜率(平仓或减仓)'], 4)) + \
'胜率(开):{}'.format(round(trade_gain_info['胜率(开仓)'], 4)))
if net_type in ['prod', 'sum']:
print('盈亏比(平): {};'.format(round(trade_gain_info['百分比盈亏比(平仓或减仓)'], 4)) + \
'平均赢率:{}'.format(round(trade_gain_info['百分比平均赢率(总交易次数)'], 4)))
else:
print('盈亏比(平): {};'.format(round(trade_gain_info['盈亏比(平仓)'], 4)) + \
'平均赢率:{}'.format(round(trade_gain_info['平均赢率(总交易次数)'], 4)))
print('夏普比率:{};'.format(round(trade_gain_info['夏普(净值)'], 4)) + \
'夏普比率(市场):{}'.format(round(trade_gain_info['夏普(市场)'], 4)))
print('单笔最大回撤(平): {};'.format(round(trade_gain_info['单笔最大回撤(平仓或减仓)'], 4)) + \
'单笔最大亏损(平):{}'.format(round(trade_gain_info['单笔最大亏损(平仓或减仓)'], 4)))
print('最大回撤区间长度: {}'.format(round(trade_gain_info['最大回撤区间长度'], 4)))
print('平均持仓周期: {};'.format(round(trade_gain_info['平均持仓周期(所有平均)'], 4)) + \
'开仓频率: {}'.format(round(trade_gain_info['开仓频率'], 4)))
#%%
def get_yield_curve2(data, col_gain, col_cost, col_price=None, nn=252,
net_type='fundnet', gain_type='pct', rtype='exp',
ext_type=1, show_mkt=False, logger=None,
show_dy_maxdown=False, show_key_infos=True, plot=True,
kwargs_plot={}):
'''根据每期收益和成本/资金占用计算收益曲线'''
assert net_type in ['fundnet', 'prod', 'sum']
# 价格列检查
if isnull(col_price) and \
(show_mkt or (ext_type is not False and not isnull(ext_type))):
logger_show('未识别价格列,收益曲线无法与市场基准比较!', logger, 'warn')
if isnull(col_price):
show_mkt = False
df = data.reindex(columns=[col_gain, col_cost])
else:
df = data.reindex(columns=[col_gain, col_cost, col_price])
df[col_gain] = df[col_gain].fillna(0)
df[col_cost] = df[col_cost].fillna(0)
ori_idx = df.index
df.index = range(0, df.shape[0])
# 净值及收益计算
df['转入'] = 0
df.loc[df.index[0], '转入'] = df[col_cost].iloc[0]
df['资产总值'] = 0
df.loc[df.index[0], '资产总值'] = df['转入'].iloc[0] + df[col_gain].iloc[0]
for k in range(1, df.shape[0]):
if df['资产总值'].iloc[k-1] >= df[col_cost].iloc[k]:
df.loc[df.index[k], '转入'] = 0
else:
df.loc[df.index[k], '转入'] = df[col_cost].iloc[k] - \
df['资产总值'].iloc[k-1]
df.loc[df.index[k], '资产总值'] = df['转入'].iloc[k] + \
df['资产总值'].iloc[k-1] + df[col_gain].iloc[k]
df['转出'] = 0
df['GainPct'] = df[[col_gain, col_cost]].apply(lambda x:
x_div_y(x[col_gain], x[col_cost], v_x0=0, v_y0=0, v_xy0=0),
axis=1)
df['盈亏%'] = df['GainPct'] * 100
df = get_gains(df, gain_types=['fundnet', 'prod', 'sum', 'act'])
df['基金净值'] = df['净值'].copy()
if net_type == 'prod':
df['净值'] = df['累乘净值'].copy()
elif net_type == 'sum':
df['净值'] = df['累加净值']
gain_type = 'pct'
rtype = 'mean'
MDabsV = True if net_type == 'sum' else False
df['cashUsedMax'] = df['转入'].cumsum() # 实际最大资金占用
df['AccountValue_act_net'] = df['资产总值'] / df['cashUsedMax']
cashUsedMax = df['cashUsedMax'].iloc[-1]
df['AccountValue_maxUsed'] = df[col_gain].cumsum() + cashUsedMax
df['AccountValue_maxUsed_net'] = df['AccountValue_maxUsed'] / cashUsedMax
if net_type == 'sum':
df['AccountValue_maxUsed_net1'] = \
1 + df['AccountValue_maxUsed'].pct_change().cumsum().fillna(0)
else:
df['AccountValue_maxUsed_net1'] = df['AccountValue_maxUsed_net'].copy()
# 年化收益
return_ann = cal_returns_period(df['净值'], nn=nn,
gain_type=gain_type, rtype=rtype)
return_ann_maxUsed = cal_returns_period(df['AccountValue_maxUsed_net1'],
nn=nn, gain_type=gain_type, rtype=rtype)
# 夏普
sharpe = cal_sharpe(df['净值'], r=3/100, nn=nn,
gain_type=gain_type, ann_rtype=rtype)
sharpe_maxUsed = cal_sharpe(df['AccountValue_maxUsed_net1'], r=3/100,
nn=nn, gain_type=gain_type, ann_rtype=rtype)
# 最大回撤
maxDown, (strt_idx, end_idx) = get_maxdown(df['净值'], abs_val=MDabsV)
df['inMaxDown'] = 0
df.loc[df.index[strt_idx: end_idx+1], 'inMaxDown'] = 1
maxDown_maxUsed, (strt_idx_maxUsed, end_idx_maxUsed) = get_maxdown(
df['AccountValue_maxUsed_net1'], abs_val=MDabsV)
# 动态最大回撤
df['dyMaxDown'] = get_maxdown_dy(df['净值'])
if not isnull(col_price):
df['value_mkt'] = df[col_price] / df[col_price].iloc[0]
if net_type == 'sum':
df['value_mkt'] = \
1 + df[col_price].pct_change().cumsum().fillna(0)
return_ann_mkt = cal_returns_period(df['value_mkt'], nn=nn,
gain_type=gain_type, rtype=rtype)
sharpe_mkt = cal_sharpe(df['value_mkt'], r=3/100, nn=nn,
gain_type=gain_type, ann_rtype=rtype)
maxDown_mkt, (strt_idx_mkt, end_idx_mkt) = get_maxdown(df['value_mkt'],
abs_val=MDabsV)
df['inMaxDown_mkt'] = 0
df.loc[df.index[strt_idx_mkt: end_idx_mkt+1], 'inMaxDown_mkt'] = 1
extr = cal_ext_return_period(df['净值'], df['value_mkt'],
gain_type=gain_type, rtype=rtype,
nn=nn, ext_type=ext_type)
else:
return_ann_mkt = np.nan
sharpe_mkt = np.nan
maxDown_mkt = np.nan
extr = np.nan
Nhit = df[(df[col_cost] != 0) & (df[col_gain] >= 0)].shape[0]
Nlos = df[(df[col_cost] != 0) & (df[col_gain] < 0)].shape[0]
Ntot = Nhit + Nlos
hit_rate = Nhit / Ntot
sumGain = df[df[col_gain] > 0][col_gain].sum()
sumLoss = abs(df[df[col_gain] < 0][col_gain].sum())
if net_type in ['prod', 'sum']:
TotGain = df[df[col_gain] > 0]['GainPct'].sum()
TotLoss = abs(df[df[col_gain] < 0]['GainPct'].sum())
else:
TotGain = sumGain
TotLoss = sumLoss
MeanGain = x_div_y(TotGain, Nhit, v_x0=0, v_y0=0, v_xy0=0)
MeanLoss = x_div_y(TotLoss, Nlos, v_x0=0, v_y0=0, v_xy0=0)
gain_loss_rate = x_div_y(MeanGain, MeanLoss, v_x0=0, v_y0=np.inf)
maxUsed = df['转入'].sum()
finalGain = df[col_gain].sum()
if net_type in ['prod', 'sum']:
meanGainPct = df['GainPct'].sum() / Ntot
else:
meanGainPct = df[col_gain].sum() / df['转入'].sum() / Ntot
gain_info = {'年化收益': return_ann,
'夏普': sharpe,
'最大回撤': maxDown,
'年化收益(市场)': return_ann_mkt,
'夏普(市场)': sharpe_mkt,
'最大回撤(市场)': maxDown_mkt,
'年化收益(最大占用)': return_ann_maxUsed,
'夏普(最大占用)': sharpe_maxUsed,
'最大回撤(最大占用)': maxDown_maxUsed,
'年化超额': extr,
'胜率': hit_rate,
'盈亏比': gain_loss_rate,
'平均赢率': meanGainPct,
'最大回撤区间长度': df['inMaxDown'].sum(),
'收益/最大占用比': finalGain / maxUsed,
'总收益': finalGain,
'最大占用': maxUsed,
'总(交易)期数': Ntot,
'盈利期数': Nhit,
'亏损期数': Nlos,
'总盈利额': sumGain,
'总亏损额': sumLoss,
'平均赢率额': x_div_y(sumGain, Nhit, v_x0=0, v_y0=0, v_xy0=0),
'平均亏损额': x_div_y(sumLoss, Nlos, v_x0=0, v_y0=0, v_xy0=0),
'单期最大亏损': df['GainPct'].min()}
df.index = ori_idx
if plot:
cols_styl_up_left = \
{'AccountValue_act_net': ('-m', '账户净值(价值/实际最大占用)'),
'AccountValue_maxUsed_net': ('-r', '账户净值(价值/最终最大占用)'),
'基金净值': ('-b', '账户净值(基金份额法)'),
'累加净值': ('-c', '账户净值(累加净值)'),
'累乘净值': ('-y', '账户净值(累乘净值)')}
if not isnull(col_price):
cols_styl_up_left.update(
{'value_mkt': ('-k', '市场净值(价格/初始价格)')})
plot_series(df, cols_styl_up_left, **kwargs_plot)
cols_to_plot = []
for col_key in ['cols_styl_up_left', 'cols_styl_up_right',
'cols_styl_low_left', 'cols_styl_low_right',
'cols_to_label_info', 'xparls_info']:
if col_key in kwargs_plot.keys():
if col_key == 'cols_to_label_info':
for col_name in kwargs_plot[col_key].keys():
cols_to_plot.append(col_name)
for lbl_info in kwargs_plot[col_key][col_name]:
cols_to_plot.append(kwargs_plot[col_key][col_name][0][0])
else:
for col_name in kwargs_plot[col_key]:
cols_to_plot.append(col_name)
cols_to_plot = list(set(cols_to_plot))
for col_name in cols_to_plot:
if col_name in df.columns:
logger_show('{}列画图数据被更新!'.format(col_name), logger, 'warn')
continue
df[col_name] = data[col_name]
conlabel_info = {'净值':
[['inMaxDown', (1, 0), ('-m', '-b'), ('最大回撤区间', '账户净值')]]}
if not isnull(col_price) and show_mkt:
conlabel_info.update({'value_mkt':
[['inMaxDown_mkt', (1, 0), ('-m', '-k'), (False, 'market')]]})
conlabel_info, kwargs_plot = get_update_kwargs(
'conlabel_info', conlabel_info, kwargs_plot)
cols_styl_up_left = {'净值': ('-w', False)}
if not isnull(col_price) and show_mkt:
cols_styl_up_left.update({'value_mkt': ('-w', False)})
cols_styl_up_left, kwargs_plot = get_update_kwargs('cols_styl_up_left',
cols_styl_up_left, kwargs_plot)
cols_styl_up_right = {}
cols_to_fill_info = {}
if show_dy_maxdown:
cols_styl_up_right.update({'dyMaxDown': ('-c', '动态最大回撤',
{'alpha': 0.2})})
cols_to_fill_info.update({'dyMaxDown': {'color': 'c', 'alpha': 0.2}})
cols_styl_up_right, kwargs_plot = get_update_kwargs('cols_styl_up_right',
cols_styl_up_right, kwargs_plot)
cols_to_fill_info, kwargs_plot = get_update_kwargs('cols_to_fill_info',
cols_to_fill_info, kwargs_plot)
plot_series_conlabel(df, conlabel_info=conlabel_info,
cols_styl_up_left=cols_styl_up_left,
cols_styl_up_right=cols_styl_up_right,
cols_to_fill_info=cols_to_fill_info,
**kwargs_plot)
if show_key_infos:
if not isnull(col_price) and show_mkt:
print('年化收益率:{};'.format(round(gain_info['年化收益'], 4)) + \
'年化收益率(市场):{}'.format(round(gain_info['年化收益(市场)'], 4)))
print('年化收益率(超额):{}'.format(round(gain_info['年化超额'], 4)))
print('最大回撤:{};'.format(round(gain_info['最大回撤'], 4)) + \
'最大回撤(市场):{}'.format(round(gain_info['最大回撤(市场)'], 4)))
print('胜率:{};'.format(round(gain_info['胜率'], 4)) + \
'盈亏比: {}'.format(round(gain_info['盈亏比'], 4)))
print('平均赢率:{}'.format(round(gain_info['平均赢率'], 4)))
print('夏普比率:{};'.format(round(gain_info['夏普'], 4)) + \
'夏普比率(市场):{}'.format(round(gain_info['夏普(市场)'], 4)))
print('最大回撤区间长度: {}'.format(round(gain_info['最大回撤区间长度'], 4)))
else:
print('年化收益率:{}'.format(round(gain_info['年化收益'], 4)))
print('最大回撤:{}'.format(round(gain_info['最大回撤'], 4)))
print('胜率:{};'.format(round(gain_info['胜率'], 4)) + \
'盈亏比: {}'.format(round(gain_info['盈亏比'], 4)))
print('平均赢率:{}'.format(round(gain_info['平均赢率'], 4)))
print('夏普比率:{}'.format(round(gain_info['夏普'], 4)))
print('最大回撤区间长度: {}'.format(round(gain_info['最大回撤区间长度'], 4)))
return gain_info, df
#%%
if __name__ == '__main__':
import time
from pprint import pprint
from dramkit import load_csv
strt_tm = time.time()
#%%
# 最大回撤测试
values = [1.0, 1.01, 1.05, 1.1, 1.11, 1.07, 1.03, 1.03, 1.01, 1.02, 1.04,
1.05, 1.07, 1.06, 1.05, 1.06, 1.07, 1.09, 1.12, 1.18, 1.15,
1.15, 1.18, 1.16, 1.19, 1.17, 1.17, 1.18, 1.19, 1.23, 1.24,
1.25, 1.24, 1.25, 1.24, 1.25, 1.24, 1.25, 1.24, 1.27, 1.23,
1.22, 1.18, 1.2, 1.22, 1.25, 1.25, 1.27, 1.26, 1.31, 1.32, 1.31,
1.33, 1.33, 1.36, 1.33, 1.35, 1.38, 1.4, 1.42, 1.45, 1.43, 1.46,
1.48, 1.52, 1.53, 1.52, 1.55, 1.54, 1.53, 1.55, 1.54, 1.52,
1.53, 1.53, 1.5, 1.45, 1.43, 1.42, 1.41, 1.43, 1.42, 1.45, 1.45,
1.49, 1.49, 1.51, 1.54, 1.53, 1.56, 1.52, 1.53, 1.58, 1.58,
1.58, 1.61, 1.63, 1.61, 1.59]
data = | pd.DataFrame(values, columns=['values']) | pandas.DataFrame |
import time
import threading
import argparse
import tushare as ts
import numpy as np
import pandas as pd
from pandas import datetime as dt
from tqdm import tqdm
from utils import *
with open('../../tushare_token.txt', 'r') as f:
token = f.readline()
ts.set_token(token)
tushare_api = ts.pro_api()
# 概念分类表
df_all = tushare_api.concept(src='ts')
# 概念股明细表
df = pd.DataFrame()
for code in tqdm(df_all['code'].values):
df_i = safe_get(
tushare_api.concept_detail,
id=code,
fields='id, concept_name, ts_code, name, in_date, out_date'
)
df_i = df_i.drop_duplicates()
df_i.insert(0, 'code', [c[:6] for c in df_i['ts_code']])
df = df.append(df_i)
df = df.reset_index(drop=True)
df.to_csv('../../data/financial_statements/'
'concept_details_sheet_by_concept.csv', index=False)
# 股票列表
df_list = []
for list_status in ['L', 'D', 'P']:
df_i = tushare_api.stock_basic(
exchange='',
list_status=list_status,
fields='ts_code')
df_list.append(df_i)
df_all = pd.concat(df_list)
# 概念股明细表
interval = 0.16
df = | pd.DataFrame() | pandas.DataFrame |
from pandas import DataFrame
import numpy as np
import nltk
from collections import Counter
from collections import OrderedDict
from sklearn.feature_extraction.text import TfidfVectorizer
def extract_sim_words(model, brand, result_path, freq_dist, min_count, save=True, topn=20):
df = DataFrame(columns=[['word', 'sim', 'freq']])
result = model.most_similar([model.docvecs[brand]], topn=topn)
if save:
for tup in result:
if freq_dist[tup[0]] >= min_count:
df.loc[len(df)] = [tup[0], tup[1], freq_dist[tup[0]]]
df.to_csv(result_path + 'keywords/' + brand + "_sim_words.csv", index=False)
return
else:
for tup in result:
if freq_dist[tup[0]] >= min_count:
df.loc[len(df)] = [tup[0], tup[1], freq_dist[tup[0]]]
return df
def extract_sim_brand(model, brand, result_path, save=True, topn=20):
df = DataFrame(columns=[['word', 'sim']])
result = model.docvecs.most_similar(brand, topn=topn)
if save:
for tup in result:
df.loc[len(df)] = [tup[0], tup[1]]
df.to_csv(result_path + 'keywords/' + brand + "_sim_brands.csv", index=False)
return
else:
for tup in result:
df.loc[len(df)] = [tup[0], tup[1]]
return df
def cal_mean_cluster(df_result, cluster_idx, doc2vec_model, group_name='Cluster'):
df = df_result[df_result[group_name] == cluster_idx]
names = list(df['Name'].unique())
all_arr = np.zeros((doc2vec_model.vector_size, len(names)))
for index, name in enumerate(names):
all_arr[:, index] = doc2vec_model.docvecs[name]
return all_arr.mean(axis=1)
def print_result(vector, model, freq_dist, min_count, topn=50):
df = | DataFrame(columns=[['word','cos','freq']]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.cm as cm
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import matplotlib.transforms as transforms
import matplotlib.colors as colors
import os
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##-----------------SECCION UNO: PROMEDIOS HORARIOS Y MENSUALES----------------##
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
"""
Porgrama par aobterner los promedios horarios y mensuales de la radacion y la
potencia, dentro del periodo de registro de los paneles.
"""
################################################################################
## ----------ACOTANDO LAS FECHAS POR DIA Y MES PARA TOMAR LOS DATOS---------- ##
################################################################################
fi_m = 3
fi_d = 23
ff_m = 12
ff_d = 20
##############################################################################
## ----------------LECTURA DE LOS DATOS DE LOS EXPERIMENTOS---------------- ##
##############################################################################
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
def lectura_datos_piranometro(df):
df['Fecha_hora'] = df.index
df.index = pd.to_datetime(df.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## -----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS----------------- ##
df = df[df['radiacion'] > 0]
df = df[(df['NI'] >= 0) & (df['strength'] >= 0)& (df['strength'] <= 100)]
## --------------------ACOTANDO LOS DATOS POR CALIDAD-------------------- ##
if 'calidad' in df.columns:
df = df[df['calidad']<100]
df = df.between_time('06:00', '17:00')
## ---------------------AGRUPANDO LOS DATOS A HORAS---------------------- ##
df_h = df.groupby(pd.Grouper(freq="H")).mean()
df_h = df_h.between_time('06:00', '17:00')
return df_h, df
df_P975_h, df_P975 = lectura_datos_piranometro(df_P975)
df_P350_h, df_P350 = lectura_datos_piranometro(df_P350)
df_P348_h, df_P348 = lectura_datos_piranometro(df_P348)
df_P975_h = df_P975_h[(df_P975_h.index.date >= pd.to_datetime('2019-'+str(fi_m)+ '-' +str(fi_d)).date()) & (df_P975_h.index.date <= pd.to_datetime('2019-'+str(ff_m)+ '-'+str(ff_d)).date())]
df_P350_h = df_P350_h[(df_P350_h.index.date >= pd.to_datetime('2019-'+str(fi_m)+ '-' +str(fi_d)).date()) & (df_P350_h.index.date <= pd.to_datetime('2019-'+str(ff_m)+ '-'+str(ff_d)).date())]
df_P348_h = df_P348_h[(df_P348_h.index.date >= pd.to_datetime('2019-'+str(fi_m)+ '-' +str(fi_d)).date()) & (df_P348_h.index.date <= pd.to_datetime('2019-'+str(ff_m)+ '-'+str(ff_d)).date())]
##############################################################################
## --------------------DESVIASIONES Y PROMEDIOS TEMPORALES----------------- ##
##############################################################################
df_P975_season_mean = df_P975.groupby([df_P975.index.month, df_P975.index.hour]).mean()
df_P350_season_mean = df_P350.groupby([df_P350.index.month, df_P350.index.hour]).mean()
df_P348_season_mean = df_P348.groupby([df_P348.index.month, df_P348.index.hour]).mean()
df_P975_season_std = df_P975.groupby([df_P975.index.month, df_P975.index.hour]).std()
df_P350_season_std = df_P350.groupby([df_P350.index.month, df_P350.index.hour]).std()
df_P348_season_std = df_P348.groupby([df_P348.index.month, df_P348.index.hour]).std()
##############################################################################
## ----------GRAFICA DE LAS DESVIASIONES Y PROMEDIOS TEMPORALES ----------- ##
##############################################################################
Hour = [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
Month = [ 'Ene', 'Feb', 'Mar', 'Abr', 'May', 'Jun', 'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dic']
colors_list = ['#800000', '#e6194B', '#f58231', '#9A6324', '#bfef45', '#3cb44b', '#42d4f4', '#469990', '#000075', '#4363d8', '#911eb4', '#f032e6']
plt.close('all')
fig = plt.figure(figsize=(13,23))
ax1 = fig.add_subplot(3, 2, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P350_season_mean.iloc[df_P350_season_mean.index.get_level_values(0) == i].radiacion.values
ax1.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax1.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax1.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax1.set_ylabel(u"$[W/m^{2}]$", fontproperties = prop_1, fontsize=20)
ax1.set_xticks(range(6, 18), minor=False)
ax1.set_xticklabels(Hour, minor=False, rotation = 20)
ax1.set_ylim(0, 1000)
ax1.set_title(u'Promedios horarios de radiacion \n por cada mes en el Oeste', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax2 = fig.add_subplot(3, 2, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P350_season_std.iloc[df_P350_season_std.index.get_level_values(0) == i].radiacion.values
ax2.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax2.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax2.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax2.set_ylabel(u"$[W/m^{2}]$", fontproperties = prop_1, fontsize=20)
ax2.set_xticks(range(6, 18), minor=False)
ax2.set_xticklabels(Hour, minor=False, rotation = 20)
ax2.set_ylim(0, 600)
ax2.set_title(u'Desviasiones estándar de radiacion \n por cada mes en el Oeste', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax3 = fig.add_subplot(3, 2, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P975_season_mean.iloc[df_P975_season_mean.index.get_level_values(0) == i].radiacion.values
ax3.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax3.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax3.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax3.set_ylabel(u"$[W/m^{2}]$", fontproperties = prop_1, fontsize=20)
ax3.set_xticks(range(6, 18), minor=False)
ax3.set_xticklabels(Hour, minor=False, rotation = 20)
ax3.set_ylim(0, 1000)
ax3.set_title(u'Promedios horarios de radiacion \n por cada mes en el Centro-Oeste', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax4 = fig.add_subplot(3, 2, 4)
ax4.spines['top'].set_visible(False)
ax4.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P975_season_std.iloc[df_P975_season_std.index.get_level_values(0) == i].radiacion.values
ax4.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax4.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax4.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax4.set_ylabel(u"$[W/m^{2}]$", fontproperties = prop_1, fontsize=20)
ax4.set_xticks(range(6, 18), minor=False)
ax4.set_xticklabels(Hour, minor=False, rotation = 20)
ax4.set_ylim(0, 600)
ax4.set_title(u'Desviasiones estándar de radiacion \n por cada mes en el Centro-Oeste', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax5 = fig.add_subplot(3, 2, 5)
ax5.spines['top'].set_visible(False)
ax5.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P348_season_mean.iloc[df_P348_season_mean.index.get_level_values(0) == i].radiacion.values
ax5.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax5.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax5.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax5.set_ylabel(u"$[W/m^{2}]$", fontproperties = prop_1, fontsize=20)
ax5.set_xticks(range(6, 18), minor=False)
ax5.set_xticklabels(Hour, minor=False, rotation = 20)
ax5.set_ylim(0, 1000)
ax5.set_title(u'Promedios horarios de radiacion \n por cada mes en el Este', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax6 = fig.add_subplot(3, 2, 6)
ax6.spines['top'].set_visible(False)
ax6.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P348_season_std.iloc[df_P348_season_std.index.get_level_values(0) == i].radiacion.values
ax6.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax6.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax6.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax6.set_ylabel(u"$[W/m^{2}]$", fontproperties = prop_1, fontsize=20)
ax6.set_xticks(range(6, 18), minor=False)
ax6.set_xticklabels(Hour, minor=False, rotation = 20)
ax6.set_ylim(0, 600)
ax6.set_title(u'Desviasiones estándar de radiacion \n por cada mes en el Este', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
plt.subplots_adjust(left=0.125, bottom=0.085, right=0.9, top=0.95, wspace=0.2, hspace=0.25)
plt.savefig('/home/nacorreasa/Escritorio/Figuras/PromDesv_radiacion.pdf', format='pdf', transparent=True)
os.system('scp /home/nacorreasa/Escritorio/Figuras/PromDesv_radiacion.pdf [email protected]:/var/www/nacorreasa/Graficas_Resultados/Estudio')
Hour = [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
Month = [ 'Ene', 'Feb', 'Mar', 'Abr', 'May', 'Jun', 'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dic']
colors_list = ['#800000', '#e6194B', '#f58231', '#9A6324', '#bfef45', '#3cb44b', '#42d4f4', '#469990', '#000075', '#4363d8', '#911eb4', '#f032e6']
plt.close('all')
fig = plt.figure(figsize=(13,23))
ax1 = fig.add_subplot(3, 2, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P350_season_mean.iloc[df_P350_season_mean.index.get_level_values(0) == i].strength.values
ax1.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax1.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax1.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax1.set_ylabel(u"$[W]$", fontproperties = prop_1, fontsize=20)
ax1.set_xticks(range(6, 18), minor=False)
ax1.set_xticklabels(Hour, minor=False, rotation = 20)
ax1.set_ylim(0, 80)
ax1.set_title(u'Promedios horarios de potencia \n por cada mes en el Oeste', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax2 = fig.add_subplot(3, 2, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P350_season_std.iloc[df_P350_season_std.index.get_level_values(0) == i].strength.values
ax2.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax2.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax2.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax2.set_ylabel(u"$[W]$", fontproperties = prop_1, fontsize=20)
ax2.set_xticks(range(6, 18), minor=False)
ax2.set_xticklabels(Hour, minor=False, rotation = 20)
ax2.set_ylim(0, 50)
ax2.set_title(u'Desviasiones estándar de potencia \n por cada mes en el Oeste', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax3 = fig.add_subplot(3, 2, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P975_season_mean.iloc[df_P975_season_mean.index.get_level_values(0) == i].strength.values
ax3.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax3.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax3.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax3.set_ylabel(u"$[W]$", fontproperties = prop_1, fontsize=20)
ax3.set_xticks(range(6, 18), minor=False)
ax3.set_xticklabels(Hour, minor=False, rotation = 20)
ax3.set_ylim(0, 80)
ax3.set_title(u'Promedios horarios de potencia \n por cada mes en el Centro-Oeste', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax4 = fig.add_subplot(3, 2, 4)
ax4.spines['top'].set_visible(False)
ax4.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P975_season_std.iloc[df_P975_season_std.index.get_level_values(0) == i].strength.values
ax4.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax4.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax4.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax4.set_ylabel(u"$[W]$", fontproperties = prop_1, fontsize=20)
ax4.set_xticks(range(6, 18), minor=False)
ax4.set_xticklabels(Hour, minor=False, rotation = 20)
ax4.set_ylim(0, 50)
ax4.set_title(u'Desviasiones estándar de potencia \n por cada mes en el Centro-Oeste', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax5 = fig.add_subplot(3, 2, 5)
ax5.spines['top'].set_visible(False)
ax5.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P348_season_mean.iloc[df_P348_season_mean.index.get_level_values(0) == i].strength.values
ax5.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax5.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax5.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax5.set_ylabel(u"$[W]$", fontproperties = prop_1, fontsize=20)
ax5.set_xticks(range(6, 18), minor=False)
ax5.set_xticklabels(Hour, minor=False, rotation = 20)
ax5.set_ylim(0, 80)
ax5.set_title(u'Promedios horarios de potencia \n por cada mes en el Este', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
ax6 = fig.add_subplot(3, 2, 6)
ax6.spines['top'].set_visible(False)
ax6.spines['right'].set_visible(False)
for i in range(3,len(Hour)+1):
serie = df_P348_season_std.iloc[df_P348_season_std.index.get_level_values(0) == i].strength.values
ax6.plot(Hour, serie, color = colors_list[i-1], lw=1.5, label = Month[i-1])
ax6.scatter(Hour, serie, marker='.', c = colors_list[i-1], s=30)
ax6.set_xlabel('Horas del dia', fontproperties = prop_1, fontsize=17)
ax6.set_ylabel(u"$[W]$", fontproperties = prop_1, fontsize=20)
ax6.set_xticks(range(6, 18), minor=False)
ax6.set_xticklabels(Hour, minor=False, rotation = 20)
ax6.set_ylim(0, 50)
ax6.set_title(u'Desviasiones estándar de potencia \n por cada mes en el Este', loc = 'center', fontproperties = prop, fontsize=18)
plt.legend()
plt.subplots_adjust(left=0.125, bottom=0.085, right=0.9, top=0.95, wspace=0.2, hspace=0.25)
plt.savefig('/home/nacorreasa/Escritorio/Figuras/PromDesv_potencia.pdf', format='pdf', transparent=True)
os.system('scp /home/nacorreasa/Escritorio/Figuras/PromDesv_potencia.pdf [email protected]:/var/www/nacorreasa/Graficas_Resultados/Estudio')
##-------------SECCION DOS: PROMEDIOS CASOS DEPEJADOS Y NUBLADOS-------------##
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
"""
Porgrama para el establecimeinto de los promedios de potencia y radiacion bajo
condiciones despejadas y nubladas.
"""
###############################################################################
## ------------------LECTURA DE LOS DATOS RADIAION TEORICA------------------ ##
###############################################################################
df_Theoric = pd.read_csv("/home/nacorreasa/Maestria/Datos_Tesis/RadiacionTeorica_DataFrames/df_GIS.csv", sep=',', index_col =0)
df_Theoric.index = | pd.to_datetime(df_Theoric.index, format="%Y-%m-%d %H:%M:%S", errors='coerce') | pandas.to_datetime |
import numpy as np
import pandas as pd
from typing import Tuple
from sklearn.metrics import mean_absolute_percentage_error as fmape, \
mean_squared_error as fmse, \
mean_absolute_error as fmae, \
r2_score as fr2, \
f1_score as ff1, \
precision_score as fprecision, \
recall_score as frecall, \
accuracy_score as faccuracy, \
roc_auc_score as fauroc, \
confusion_matrix, cohen_kappa_score
def tunar_hiperparams(
tipo_modelo,
selec_hiperparams: Tuple[pd.Index],
X_train: np.array or pd.DataFrame or pd.Series,
X_test: np.array or pd.DataFrame or pd.Series,
y_train: np.array or pd.Series,
y_test: np.array or pd.Series,
fixed_hiperparams: dict or None = None,
tipo_dado: str = 'quantitativo',
metricas: dict or list or None = None,
scaler = None,
func_metricas_kwargs: dict = dict()
) -> pd.DataFrame:
if metricas is None and tipo_dado not in [ 'quantitativo', 'qualitativo']:
raise NameError("'tipo_dado' deve ser 'qualitativo' ou 'quantitativo'.")
if scaler is not None:
sc = scaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
if fixed_hiperparams is None:
fixed_hiperparams = dict()
# create test cases
comb_idx = | pd.MultiIndex.from_product(selec_hiperparams) | pandas.MultiIndex.from_product |
import operator
from typing import Optional, Dict, Tuple, List, Union
import pandas as pd
import pytest
import sklearn
import numpy as np
import plotly.graph_objs as go
from trelawney.base_explainer import BaseExplainer
class FakeExplainer(BaseExplainer):
def fit(self, model: sklearn.base.BaseEstimator, x_train: pd.DataFrame, y_train: pd.DataFrame):
return super().fit(model, x_train, y_train)
@staticmethod
def _regularize(importance_dict: List[Tuple[str, float]]) -> List[Tuple[str, float]]:
total = sum(map(operator.itemgetter(1), importance_dict))
return [
(key, -(2 * (i % 2) - 1) * (value / total))
for i, (key, value) in enumerate(importance_dict)
]
def feature_importance(self, x_explain: pd.DataFrame, n_cols: Optional[int] = None):
importance = self._regularize(sorted(
((col, np.mean(np.abs(x_explain.loc[:, col]))) for col in x_explain.columns),
key=operator.itemgetter(1),
reverse=True
))
total_mvmt = sum(map(operator.itemgetter(1), importance))
res = dict(importance[:n_cols])
res['rest'] = total_mvmt - sum(res.values())
return res
def explain_local(self, x_explain: pd.DataFrame, n_cols: Optional[int] = None):
res = []
for sample_explanation in x_explain.abs().to_dict(orient='records'):
importance = self._regularize(sorted(sample_explanation.items(), key=operator.itemgetter(1), reverse=True))
total_mvmt = sum(map(operator.itemgetter(1), importance))
res_ind = dict(importance[:n_cols])
res_ind['rest'] = total_mvmt - sum(res_ind.values())
res.append(res_ind)
return res
def _float_error_resilient_compare(left: Union[List[Dict], Dict], right: Union[List[Dict], Dict]):
assert len(left) == len(right)
if isinstance(left, list):
return [_float_error_resilient_compare(ind_left, ind_right) for ind_left, ind_right in zip(left, right)]
for key, value in left.items():
assert key in right
assert abs(value - right[key]) < 0.0001
def test_explainer_basic():
explainer = FakeExplainer()
_float_error_resilient_compare(
explainer.feature_importance(pd.DataFrame([[10, 0], [0, -5]], columns=['var_1', 'var_2'])),
{'var_1': 5 / 7.5, 'var_2': -2.5 / 7.5, 'rest': 0.}
)
_float_error_resilient_compare(
explainer.feature_importance(pd.DataFrame([[10, 0], [0, -5]], columns=['var_1', 'var_2']), n_cols=1),
{'var_1': 5. / 7.5, 'rest': -2.5 / 7.5}
)
_float_error_resilient_compare(
explainer.explain_local(pd.DataFrame([[10, 0], [0, -5]], columns=['var_1', 'var_2'])),
[{'var_1': 1., 'var_2': 0., 'rest': 0.}, {'var_2': 1., 'var_1': 0., 'rest': 0.}]
)
_float_error_resilient_compare(
explainer.explain_local(pd.DataFrame([[10, 0], [0, -5]], columns=['var_1', 'var_2']), n_cols=1),
[{'var_1': 1., 'rest': 0.},{'var_2': 1, 'rest': 0.}]
)
def test_explainer_filter():
explainer = FakeExplainer()
_float_error_resilient_compare(
explainer.filtered_feature_importance(pd.DataFrame(
[[10, 0, 4], [0, -5, 3]], columns=['var_1', 'var_2', 'var_3']), cols=['var_1', 'var_3']
),
{'var_1': 10 / 22, 'var_3': -7 / 22, 'rest': 5 / 22}
)
_float_error_resilient_compare(
explainer.filtered_feature_importance(
pd.DataFrame([[10, 0, 4], [0, -5, 3]], columns=['var_1', 'var_2', 'var_3']), n_cols=1,
cols=['var_1', 'var_3']
),
{'var_1': 10 / 22, 'rest': -2 / 22}
)
_float_error_resilient_compare(
explainer.explain_filtered_local(
| pd.DataFrame([[10, 0, 4], [0, -5, 3]], columns=['var_1', 'var_2', 'var_3']) | pandas.DataFrame |
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.optimize import linear_sum_assignment
import utils
from challenge.dataset import EXP_TRAIN
from utils.neighbors import k_neighbors_classify, k_neighbors_classify_scores
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', nargs='+', type=str, required=True, help='path to configuration file')
parser.add_argument('-n', '--n_neighbors', nargs='+', type=int, required=False, default=[20, 20, 20, 20],
help='number of neighbors')
parser.add_argument('--use_valid', action='store_true', help='whether to use valid for test predictions')
return parser.parse_args()
def get_group_scores(embeddings_train: np.ndarray,
labels_train: np.ndarray,
groups_train: np.ndarray,
embeddings_test: np.ndarray,
n_neighbors: int) -> np.ndarray:
scores = np.zeros(4, dtype=float)
for group in range(4):
mask = groups_train == group
_, scores_ = k_neighbors_classify(
X_train=embeddings_train[mask],
y_train=labels_train[mask],
X_test=embeddings_test,
n_neighbors=n_neighbors
)
scores[group] = scores_.mean()
return scores
def get_train_group_mapping(root: Path) -> dict:
# Mapping from the first sirna in group to group number
sirna_to_group = {0: 0, 1: 1, 2: 2, 4: 3}
df_train = | pd.read_csv(root / 'train.csv') | pandas.read_csv |
### Data
import pandas as pd
import numpy as np
import pickle
### Graphing
import plotly.graph_objects as go
### Dash
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Output, Input
## Navbar
from navbar import Navbar
from dash import no_update
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
#metrics import classification_report
from sklearn.metrics import classification_report,roc_curve
pkl_filename="Hotel_Bookings.pkl"
with open(pkl_filename, 'rb') as file:
df_dummies = pickle.load(file)
X = df_dummies.drop(columns=['is_canceled'])
X = X.drop(columns=['arrival_date_year', 'arrival_date_week_number',
'arrival_date_day_of_month', 'stays_in_weekend_nights',
'stays_in_week_nights', 'is_repeated_guest', 'previous_cancellations',
'previous_bookings_not_canceled', 'booking_changes','required_car_parking_spaces',
'total_of_special_requests', 'total_guest','reservation_status_Canceled','reservation_status_Check-Out',
'reservation_status_No-Show'])
Y = df_dummies['is_canceled'].values
x_train, x_test, y_train, y_test = train_test_split(X,Y,test_size = 0.3,random_state=0)
def generate_table(dataframe, max_rows=10):
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
)
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.UNITED,'https://codepen.io/chriddyp/pen/bWLwgP.css'])
server=app.server
#app = dash.Dash(__name__, external_stylesheets=[dbc.themes.UNITED])
app.config.suppress_callback_exceptions = True
nav = Navbar()
header = html.H3(
'Random Forest'
)
header9 = html.H6('Because of Space and Server Constraint Please use the combinations of parameters given :')
para = html.P('1- bootstrap=True | max_depth = 100 | max_features = 0.5 | min_samples_leaf = 1 | min_samples_split = 4 | n_estimators=300')
para1 = html.P('2- bootstrap=True | max_depth = 100 | max_features = 0.5 | min_samples_leaf = 3 | min_samples_split = 2 | n_estimators=100')
para2 = html.P('3- bootstrap=True | max_depth = 10 | max_features = 0.5 | min_samples_leaf = 1 | min_samples_split = 2 | n_estimators=100')
para3 = html.P('4- bootstrap=True | max_depth = 10 | max_features = 0.1 | min_samples_leaf = 1 | min_samples_split = 2 | n_estimators=100')
para4 = html.P('5- bootstrap=True | max_depth = 50 | max_features = 1 | min_samples_leaf = 5 | min_samples_split = 8 | n_estimators=200')
dropdown = html.Div([
html.Label('Bootstrap'),
dcc.Dropdown(
id = 'bs',
options = [{'label': 'True', 'value': 'True'},
{'label': 'False', 'value': 'False'}],value='True'
),
html.Label('Max number of features'),
dcc.Dropdown(
id = 'maxf',
options = [{'label': '0.5', 'value': 0.5},{'label': '1.0', 'value': 1}],value=0.5
),
html.Label('Max Depth: '),
dcc.Dropdown(
id = 'maxd',
options = [{'label': '4', 'value': 4},
{'label': '10', 'value': 10},
{'label':'100','value':100}],value=4
),
html.Label('minimum samples leaf: '),
dcc.Dropdown(
id = 'min_samples_leaf',
options = [{'label': '1', 'value': 1},
{'label': '3', 'value': 3}],value=1
),
html.Label('No. of estimators: '),
dcc.Dropdown(
id = 'n_est',
options = [{'label': '100', 'value': 100}],value=100
),
html.Label('Minimum samples split: '),
dcc.Dropdown(
id = 'min_samples_split',
options = [{'label': '2', 'value': 2},
{'label': '4', 'value': 4}],value=2
),
])
output = html.Div(id = 'output',
children = [],
)
header1 = html.H3('Classification Report')
output7 = html.Div(id='rforest')
header2 = html.H3('TPR v/s FPR')
output1 = html.Div([dcc.Graph(id='rfgraph')])
def App():
layout = html.Div([
nav,
header,
header9,
para,
para1,
para2,
para3,
para4,
dropdown,
output,
header1,
output7,
header2,
output1,
])
return layout
def randomForest(bs, maxd, maxf, min_samples_leaf,n_est,min_samples_split):
if bs=='True' and maxd==4 and maxf==0.5 and min_samples_leaf==1 and n_est==100 and min_samples_split==4:
print("1")
pkl1_filename = "rf.pkl"
with open(pkl1_filename, 'rb') as file1:
rf_model = pickle.load(file1)
y_pred = rf_model.predict(x_test)
report = classification_report(y_test, y_pred, output_dict=True)
df = pd.DataFrame(report).transpose()
elif bs=='True' and maxd==100 and maxf==0.5 and min_samples_leaf==3 and n_est==100 and min_samples_split==2:
print("2")
pkl1_filename = "rf1.pkl"
with open(pkl1_filename, 'rb') as file1:
rf_model = pickle.load(file1)
y_pred = rf_model.predict(x_test)
report = classification_report(y_test, y_pred, output_dict=True)
df = pd.DataFrame(report).transpose()
elif bs == 'True' and maxd == 10 and maxf == 0.5 and min_samples_leaf == 1 and n_est == 100 and min_samples_split == 2:
print("3")
pkl1_filename = "rf2.pkl"
with open(pkl1_filename, 'rb') as file1:
rf_model = pickle.load(file1)
y_pred = rf_model.predict(x_test)
report = classification_report(y_test, y_pred, output_dict=True)
df = pd.DataFrame(report).transpose()
elif bs == 'True' and maxd == 10 and maxf == 0.1 and min_samples_leaf == 1 and n_est == 100 and min_samples_split == 2:
print("4")
pkl1_filename = "rf3.pkl"
with open(pkl1_filename, 'rb') as file1:
rf_model = pickle.load(file1)
y_pred = rf_model.predict(x_test)
report = classification_report(y_test, y_pred, output_dict=True)
df = | pd.DataFrame(report) | pandas.DataFrame |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
def BestNEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate mean forecast for ensemble of models."""
# id_list = list(ensemble_params['models'].keys())
# does it handle missing models well?
# model_indexes = [x for x in forecasts.keys() if x in id_list]
model_count = len(forecasts.keys())
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
ens_df = ens_df + x
ens_df = ens_df / model_count
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in lower_forecasts.items():
ens_df_lower = ens_df_lower + x
ens_df_lower = ens_df_lower / model_count
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in upper_forecasts.items():
ens_df_upper = ens_df_upper + x
ens_df_upper = ens_df_upper / model_count
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unavailable models or unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
# this is meant to fill in any failures
available_models = list(forecasts.keys())
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in all_series.items():
try:
c_fore = forecasts[mod_id][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
print(f"Horizontal ensemble unable to add model {repr(e)}")
# upper
c_fore = upper_forecasts[mod_id][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[mod_id][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
# make sure columns align to original
forecast_df.reindex(columns=org_idx)
u_forecast_df.reindex(columns=org_idx)
l_forecast_df.reindex(columns=org_idx)
# combine runtimes
ens_runtime = datetime.timedelta(0)
for idx, x in forecasts_runtime.items():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for per_series per distance ensembling."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
id_list = list(ensemble_params['models'].keys())
mod_dic = {x: idx for idx, x in enumerate(forecasts_list) if x in id_list}
forecast_length = forecasts[0].shape[0]
dist_n = int(np.ceil(ensemble_params['dis_frac'] * forecast_length))
dist_last = forecast_length - dist_n
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series1'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
forecast_df2, u_forecast_df2, l_forecast_df2 = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series2'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df2 = pd.concat([forecast_df2, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df2 = pd.concat([u_forecast_df2, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df2 = pd.concat([l_forecast_df2, c_fore], axis=1)
forecast_df = pd.concat(
[forecast_df.head(dist_n), forecast_df2.tail(dist_last)], axis=0
)
u_forecast_df = pd.concat(
[u_forecast_df.head(dist_n), u_forecast_df2.tail(dist_last)], axis=0
)
l_forecast_df = pd.concat(
[l_forecast_df.head(dist_n), l_forecast_df2.tail(dist_last)], axis=0
)
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in list(mod_dic.values()):
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def EnsembleForecast(
ensemble_str,
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Return PredictionObject for given ensemble method."""
s3list = ['best3', 'best3horizontal', 'bestn']
if ensemble_params['model_name'].lower().strip() in s3list:
ens_forecast = BestNEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
if ensemble_params['model_name'].lower().strip() == 'dist':
ens_forecast = DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
hlist = ['horizontal', 'probabilistic']
if ensemble_params['model_name'].lower().strip() in hlist:
ens_forecast = HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
if ensemble_params['model_name'].lower().strip() == 'hdist':
ens_forecast = HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
def EnsembleTemplateGenerator(
initial_results, forecast_length: int = 14, ensemble: str = "simple"
):
"""Generate ensemble templates given a table of results."""
ensemble_templates = pd.DataFrame()
if 'simple' in ensemble:
ens_temp = initial_results.model_results.drop_duplicates(subset='ID')
ens_temp = ens_temp[ens_temp['Ensemble'] == 0]
# best 3, all can be of same model type
best3nonunique = ens_temp.nsmallest(3, columns=['Score'])
n_models = best3nonunique.shape[0]
if n_models == 3:
ensemble_models = {}
for index, row in best3nonunique.iterrows():
temp_dict = {
'Model': row['Model'],
'ModelParameters': row['ModelParameters'],
'TransformationParameters': row['TransformationParameters'],
}
ensemble_models[row['ID']] = temp_dict
best3nu_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'BestN',
'model_count': n_models,
'model_metric': 'best_score',
'models': ensemble_models,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
best3nu_params = pd.DataFrame(best3nu_params, index=[0])
ensemble_templates = pd.concat([ensemble_templates, best3nu_params], axis=0)
# best 3, by SMAPE, RMSE, SPL
bestsmape = ens_temp.nsmallest(1, columns=['smape_weighted'])
bestrmse = ens_temp.nsmallest(2, columns=['rmse_weighted'])
bestmae = ens_temp.nsmallest(3, columns=['spl_weighted'])
best3metric = pd.concat([bestsmape, bestrmse, bestmae], axis=0)
best3metric = best3metric.drop_duplicates().head(3)
n_models = best3metric.shape[0]
if n_models == 3:
ensemble_models = {}
for index, row in best3metric.iterrows():
temp_dict = {
'Model': row['Model'],
'ModelParameters': row['ModelParameters'],
'TransformationParameters': row['TransformationParameters'],
}
ensemble_models[row['ID']] = temp_dict
best3m_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'BestN',
'model_count': n_models,
'model_metric': 'mixed_metric',
'models': ensemble_models,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
best3m_params = pd.DataFrame(best3m_params, index=[0])
ensemble_templates = | pd.concat([ensemble_templates, best3m_params], axis=0) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
from sklearn import preprocessing
# ### About dataset
# This dataset is about past loans. The __Loan_train.csv__ data set includes details of 346 customers whose loan are already paid off or defaulted. It includes following fields:
#
# | Field | Description |
# |----------------|---------------------------------------------------------------------------------------|
# | Loan_status | Whether a loan is paid off on in collection |
# | Principal | Basic principal loan amount at the |
# | Terms | Origination terms which can be weekly (7 days), biweekly, and monthly payoff schedule |
# | Effective_date | When the loan got originated and took effects |
# | Due_date | Since it’s one-time payoff schedule, each loan has one single due date |
# | Age | Age of applicant |
# | Education | Education of applicant |
# | Gender | The gender of applicant |
# ### Load Data From CSV File
# In[ ]:
df = pd.read_csv("../../../input/zhijinzhai_loandata/Loan payments data.csv")
df.head()
# In[ ]:
df.shape
# ### Convert to date time object
# In[ ]:
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = | pd.to_datetime(df['effective_date']) | pandas.to_datetime |
"""
Do volume, salt, and salt-squared budgets for user-specified volumes.
This is a fairly complicated task because it involves coordinating
the net salt and volume time series from flux_[get,lowpass]_s.py,
the TEF transports through any open sections of the volume,
and all the rivers flowing into the volume.
Run with a command like:
run flux_salt_budget
and then it runs for all volumes in vol_list and all years in year_list.
Currently need to do more extractions to support 2018 and 2019.
"""
import os; import sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import tef_fun
import flux_fun
from importlib import reload
reload(flux_fun)
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pandas as pd
from datetime import datetime, timedelta
import argparse
# debugging imports
import netCDF4 as nc
from time import time
import zfun
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gridname', type=str, default='cas6')
parser.add_argument('-t', '--tag', type=str, default='v3')
parser.add_argument('-x', '--ex_name', type=str, default='lo8b')
parser.add_argument('-test', '--testing', type=zfun.boolean_string, default=False)
args = parser.parse_args()
testing = args.testing
old_dt = False
# Get Ldir
Ldir = Lfun.Lstart(args.gridname, args.tag)
gtagex = args.gridname + '_' + args.tag + '_' + args.ex_name
if testing:
year_list = [2019]
vol_list = ['Puget Sound']
save_figs = False
else:
year_list = [2017, 2018, 2019]
vol_list = ['Salish Sea', 'Puget Sound', 'Hood Canal']
save_figs = True
# make DataFrames to hold error statistics
err_df_vol = pd.DataFrame(index=year_list, columns=vol_list)
err_df_salt = pd.DataFrame(index=year_list, columns=vol_list)
err_df_salt2 = pd.DataFrame(index=year_list, columns=vol_list)
plt.close('all')
for which_vol in vol_list:
for year in year_list:
year_str = str(year)
# select input/output location
run_name = gtagex+'_'+year_str+'.01.01_'+year_str+'.12.31'
indir00 = Ldir['LOo'] + 'tef2/'
indir0 = indir00 + run_name + '/'
outdir = indir00 + 'salt_budget_plots/'
Lfun.make_dir(outdir)
# load low passed segment volume, net salt, and other DataFrames
v_lp_df = pd.read_pickle(indir0 + 'flux/daily_segment_volume.p')
sv_lp_df = pd.read_pickle(indir0 + 'flux/daily_segment_net_salt.p')
s2v_lp_df = | pd.read_pickle(indir0 + 'flux/daily_segment_net_salt2.p') | pandas.read_pickle |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = | Period(freq='D', year=2006, month=12, day=31) | pandas.tseries.period.Period |
from datetime import datetime,timedelta
import ntpath
import pytz
import logging
import re
import pandas as pd
import numpy as np
logger = logging.getLogger(__name__)
def is_month_complete(start,end):
if end.month == (end + timedelta(days=1)).month:
return False
if start.day == 1:
return True
else:
return False
def _clean_grunddaten_from_sheet(sheets):
rechnung_grunddaten = {}
kad = sheets['Kunden-Absender Daten']
rechnung_grunddaten['rechnungsnummer'] = kad[kad['key'] == 'Rechnungsnummer:'].value.item()
rechnung_grunddaten['rahmenvertragsnummer'] = kad[kad['key'] == 'Rahmenvertragsnummer:'].value.item()
rechnung_grunddaten['umsatzsteuer'] = float(kad[kad['key'] == 'Umsatzsteuer:'].value.item()[:-2].replace(',','.'))
rechnung_grunddaten['abrechnungsperiode_start'] = pytz.utc.localize(datetime.strptime(kad[kad['key'] == 'Beginn der Abrechnungsperiode:'].value.item(), '%d.%m.%Y'))
rechnung_grunddaten['abrechnungsperiode_ende'] = pytz.utc.localize(datetime.strptime(kad[kad['key'] == 'Ende der Abrechnungsperiode:'].value.item(), '%d.%m.%Y'))
rechnung_grunddaten['rechnungsmonat_komplett'] = is_month_complete(rechnung_grunddaten['abrechnungsperiode_start'],rechnung_grunddaten['abrechnungsperiode_ende'])
rs = sheets['Rechnungssummen']
rechnung_grunddaten['rechnung_betrag_dtag_netto'] = float(rs[rs['text'] == 'Betrag Telekom Deutschland GmbH']['summen_betrag_netto'].item())
rechnung_grunddaten['rechnung_betrag_dtag_brutto'] = float(rs[rs['text'] == 'Betrag Telekom Deutschland GmbH']['brutto_betrag'].item())
rechnung_grunddaten['rechnung_betrag_drittanbieter_brutto'] = float(rs[rs['text'] == 'Genutzte Angebote']['betrag'].sum())
rechnung_grunddaten['rechnung_summe_netto'] = float(rs[rs['text'] == 'Rechnungsbetrag']['summen_betrag_netto'].item())
rechnung_grunddaten['rechnung_summe_brutto'] = float(rs[rs['text'] == 'Zu zahlender Betrag']['brutto_betrag'].item())
rp = sheets['Rechnungspositionen']
rechnung_grunddaten['rechnung_betrag_vda_brutto'] = rp[(rp['service'] == "VDA") & (rp['rechnungsbereich'] == "Telekom Deutschland GmbH")]['summen_nettobetrag'].sum()
zusatzangaben = rp[rp['rechnungsbereich'] == "Zusatzangaben zum Rechnungsbetrag"]
if not zusatzangaben['rechnungsposition'].empty:
regex = r'^([0-9,]+)% Vergünstigung auf\s(.*)$'
match = re.search(regex, zusatzangaben['rechnungsposition'].item())
rechnung_grunddaten[f"rechnung_zusatzangaben_auf_rechnungsbereich"] = match.group(2)
rechnung_grunddaten[f"rechnung_zusatzangaben_prozent"] = match.group(1)
rechnung_grunddaten = pd.DataFrame(rechnung_grunddaten, index=[0])
return rechnung_grunddaten
def _clean_summen_der_verguenstigungen(sheets):
# Summen der Vergünstigen berechnen:
# Erst alle Einzelpositionen zusammensetzen
# und am Ende der Funktion aufsummieren
rp_sheet = sheets['Rechnungspositionen']
rp = rp_sheet[(rp_sheet['service'] == "Telefonie")
& (rp_sheet['eur_netto'].notnull())
& (rp_sheet['summen_brutto_betraege'].isnull())
& (rp_sheet['andere_leistungen_eur_brutto'].isnull())]
df = pd.DataFrame()
regex = r'^[0-9,]+% auf\s?(Grundpreis\s(.*)|(TwinBill - Aufpreis))$'
tmp = rp['rechnungsposition'].str.extractall(regex).droplevel(-1)
df['kartennummer'] = rp['kartennummer']
df['rufnummer'] = rp['rufnummer']
df['verguenstigung_grundpreis_art'] = tmp[1].combine_first(tmp[2])
df['verguenstigung_grundpreis_art'].replace(['TwinBill - Aufpreis'], 'TwinBill Aufpreis', inplace=True)
df['verguenstigung_grundpreis_summe'] = pd.to_numeric(rp['eur_netto'], errors='coerce')
# Die Reihen ohne "verguenstigung_grundpreis_art" sind unvergünstigte Grundpreise
# und müssen daher rausgefiltert werden für die Berechnung der vergünstigten Grundpreise
df = df.dropna(axis=0)
df = df.groupby(['kartennummer','rufnummer']).sum()
return df
def _clean_berechne_echte_grundpreise_u_variable_kosten(sheets, df1):
rp_sheet = sheets['Rechnungspositionen']
# DTAG Kosten und errechneter Grundpreise inkl. Vergünstigungen
# daraus dann können die variablen Kosten berechnet werden
df2 = rp_sheet[(rp_sheet['service'] == "Telefonie")
& (rp_sheet['summen_nettobetrag'].notnull())
& (rp_sheet['kartennummer'].notna())]
df2 = df2.groupby(['kartennummer','kostenstelle','kostenstellennutzer','rufnummer','rechnungsbereich'], dropna=False).sum()
df2 = df2.reset_index()
df2 = df2.pivot(index=['kartennummer','kostenstelle','kostenstellennutzer','rufnummer'], columns=['rechnungsbereich'], values='summen_nettobetrag')
df2 = df2[['Grundpreise','Telekom Deutschland GmbH']]
df2 = df2.reset_index()
df2 = df2.set_index(['kartennummer','rufnummer'])
df = pd.concat((df1, df2), axis=1)
df = df.reset_index()
cols = ['Grundpreise','Telekom Deutschland GmbH','verguenstigung_grundpreis_summe']
df[cols] =df[cols].apply(pd.to_numeric, errors='coerce')
df[cols] =df[cols].fillna(0)
df['grundpreise_echt'] = df['Grundpreise']+df['verguenstigung_grundpreis_summe']
df['variable_kosten'] = df['Telekom Deutschland GmbH'] - df['grundpreise_echt']
df = df.drop(columns=['Grundpreise','verguenstigung_grundpreis_summe'])
return df
def _rechnungspositionen_komplett(sheets):
# Rechnungspositionen komplett importieren ohne zu bearbeiten
rp = sheets['Rechnungspositionen']
cols = ['beginn_datum','ende_datum']
rp['beginn_datum'] = pd.to_datetime(rp['beginn_datum'],utc=True, format='%d.%m.%Y')
rp['ende_datum'] = pd.to_datetime(rp['ende_datum'],utc=True, format='%d.%m.%Y')
rp[cols] = rp[cols].astype(object)
#rp = rp.where(pd.notnull(rp), None)
rp = rp[rp['rufnummer'].notnull()]
return rp
def _drittanbieterkosten(sheets):
rp_sheet = sheets['Rechnungspositionen']
df3 = rp_sheet[rp_sheet['summen_brutto_betraege'].notnull()]
df3 = df3.rename(columns={'summen_brutto_betraege': 'drittanbieterkosten'})
df3 = df3[['kartennummer','rufnummer','drittanbieterkosten']]
df3 = df3.set_index(['kartennummer','rufnummer'])
return df3['drittanbieterkosten']
def validate_or_process_invoice(data):
head, tail = ntpath.split(data.name)
filename = tail or ntpath.basename(data.name)
file = data.file
if not re.match(r'^Rechnung_',filename):
raise ValueError(('Der Dateiname \"%s\" beginnt nicht mit \"Rechnung_\". Ist es wirklich eine Telekom-Rechnung?' % filename))
sheets = pd.read_excel(file, sheet_name = None, dtype = object)
if ('Kunden-Absender Daten' not in sheets):
raise ValueError(('Das Excel-Sheet "Kunden-Absender Daten" fehlt.'))
if ('Rechnungssummen' not in sheets):
raise ValueError(('Das Excel-Sheet "Rechnungssummen" fehlt.'))
if ('Rechnungspositionen' not in sheets):
raise ValueError(('Das Excel-Sheet "Rechnungspositionen" fehlt.'))
if ('Optionen' not in sheets):
raise ValueError(('Das Excel-Sheet "Optionen" fehlt.'))
sheets = pd.read_excel(file, sheet_name = None, dtype = object)
# Umbenennen aller Spalten in allen Sheets
sheets['Kunden-Absender Daten'] = sheets['Kunden-Absender Daten'].rename(columns={
'Ihre Mobilfunk-Rechnung': "key",
'Unnamed: 1': "value"
}
)
sheets['Rechnungssummen'] = sheets['Rechnungssummen'].rename(columns={
'Anbieter': 'anbieter',
'Text': 'text',
'Betrag': 'betrag',
'Summen-betrag Netto': 'summen_betrag_netto',
'USt-Betrag': 'ust_betrag',
'Brutto-Betrag': 'brutto_betrag',
}
)
sheets['Rechnungspositionen'] = sheets['Rechnungspositionen'].rename(columns={
'Karten-/Profilnummer': 'kartennummer',
'Kostenstelle': 'kostenstelle',
'Kostenstellennutzer': 'kostenstellennutzer',
'Rufnummer': 'rufnummer',
'Service': 'service',
'Rechnungsbereich': 'rechnungsbereich',
'Rechnungsposition': 'rechnungsposition',
'Menge': 'menge',
'Infomenge': 'infomenge',
'Einheit': 'einheit',
'Beginn-Datum': 'beginn_datum',
'Ende-Datum': 'ende_datum',
'Info-Betrag': 'info_betrag',
'EUR (Netto)': 'eur_netto',
'Summen Nettobetrag': 'summen_nettobetrag',
'Andere Leistungen EUR (brutto)': 'andere_leistungen_eur_brutto',
'Summen Brutto-beträge': 'summen_brutto_betraege'
}
)
sheets['Optionen'] = sheets['Optionen'].rename(columns={
'Karten-/Profilnummer': 'kartennummer',
'Rufnummer': 'rufnummer',
'Dienst-bezeichnung': 'dienst_bezeichnung',
'Option': 'option',
'Bemerkung': 'bemerkung',
'gültig ab': 'gueltig_ab',
'gültig bis': 'gueltig_bis',
}
)
# Alle numerischen Spalten in allen Sheets to_numeric wandeln
cols = ['menge', 'infomenge', 'info_betrag',
'eur_netto','summen_nettobetrag',
'andere_leistungen_eur_brutto',
'summen_brutto_betraege']
sheets['Rechnungspositionen'][cols] = sheets['Rechnungspositionen'][cols].apply(pd.to_numeric, errors='coerce', axis=1)
cols = ['betrag', 'summen_betrag_netto', 'ust_betrag', 'brutto_betrag']
sheets['Rechnungssummen'][cols] = sheets['Rechnungssummen'][cols].apply(pd.to_numeric, errors='coerce', axis=1)
# Datumsspalten in Datum konvertieren (UTC)
sheets['Rechnungspositionen']['beginn_datum'] = pd.to_datetime(sheets['Rechnungspositionen']['beginn_datum'],utc=True, format='%d.%m.%Y')
sheets['Rechnungspositionen']['ende_datum'] = pd.to_datetime(sheets['Rechnungspositionen']['ende_datum'],utc=True, format='%d.%m.%Y')
sheets['Optionen']['gueltig_ab'] = pd.to_datetime(sheets['Optionen']['gueltig_ab'],utc=True, format='%d.%m.%Y')
sheets['Optionen']['gueltig_bis'] = pd.to_datetime(sheets['Optionen']['gueltig_bis'],utc=True, format='%d.%m.%Y')
rp = _rechnungspositionen_komplett(sheets)
rechnung_grunddaten = _clean_grunddaten_from_sheet(sheets)
sum_rabatt = _clean_summen_der_verguenstigungen(sheets)
df = _clean_berechne_echte_grundpreise_u_variable_kosten(sheets,sum_rabatt)
df = df.set_index(['kartennummer','rufnummer'])
df['drittanbieterkosten'] = _drittanbieterkosten(sheets)
df['drittanbieterkosten'] = df['drittanbieterkosten'].fillna(value=0)
# Zusatzangaben & Mehrwertsteuer auf variable, Fixkosten und Summe anwenden
if ('rechnung_zusatzangaben_auf_rechnungsbereich' in rechnung_grunddaten.columns):
if(rechnung_grunddaten["rechnung_zusatzangaben_auf_rechnungsbereich"].item() == "Betrag Telekom Deutschland GmbH"):
abzug = 1 - float(rechnung_grunddaten['rechnung_zusatzangaben_prozent'].item())/100
df['variable_kosten'] = df['variable_kosten'] * abzug
df['grundpreise_echt'] = df['grundpreise_echt'] * abzug
df['Telekom Deutschland GmbH'] = df['Telekom Deutschland GmbH'] * abzug
rechnung_grunddaten['rechnung_betrag_vda_brutto'] = rechnung_grunddaten['rechnung_betrag_vda_brutto'] * abzug
if (rechnung_grunddaten["umsatzsteuer"].item()):
steuer = 1 + float(rechnung_grunddaten['umsatzsteuer'].item())/100
df['variable_kosten'] = df['variable_kosten'] * steuer
df['grundpreise_echt'] = df['grundpreise_echt'] * steuer
df['Telekom Deutschland GmbH'] = df['Telekom Deutschland GmbH'] * steuer
rechnung_grunddaten['rechnung_betrag_vda_brutto'] = rechnung_grunddaten['rechnung_betrag_vda_brutto'] * steuer
df['summe_komplett_brutto'] = df['Telekom Deutschland GmbH']+df['drittanbieterkosten']
cols = ['Telekom Deutschland GmbH', 'grundpreise_echt', 'variable_kosten', 'drittanbieterkosten', 'summe_komplett_brutto']
df[cols] = df[cols].fillna(0)
# Überprüfung Einzelabweichungen
df['einzel_abweichung'] = df['Telekom Deutschland GmbH'] - (df['variable_kosten'] + df['grundpreise_echt'])
df['einzel_abweichung'] = df['einzel_abweichung'].abs()
anzahl_einzel_abweichung = len(df[df['einzel_abweichung']> 1e-6])
if (anzahl_einzel_abweichung):
#print(df[df['einzel_abweichung']> 1e-6].round(2))
raise ValueError("Rechnung konnte nicht importiert werden, da bei %i Positionen die Summe zwischen den berechneten Kosten (variable Kosten + Fixkosten) nicht mit den aus der Datei übereinstimmen" % anzahl_einzel_abweichung)
df = df.drop(columns=['einzel_abweichung'])
# Überprüfung Gesamtsumme
rechnung_grunddaten['rechnung_summe_brutto_berechnet'] = round(df['summe_komplett_brutto'].sum() + rechnung_grunddaten['rechnung_betrag_vda_brutto'],2)
abweichung = float(np.absolute(rechnung_grunddaten['rechnung_summe_brutto_berechnet'] - rechnung_grunddaten['rechnung_summe_brutto']))
if (abweichung >= 0.02):
raise ValueError("Rechnung konnte nicht importiert werden, da die maximale Abweichung (2 Cent) zwischen der berechneten und der importierten Gesamtsumme zu hoch ist: {:.2f} €".format(abweichung))
# Überprüfung auf negative variable Kosten
anzahl_negative_variable_kosten = len(df[df['variable_kosten']<-1e-6])
if anzahl_negative_variable_kosten:
raise ValueError("Rechnung konnte nicht importiert werden, da bei %i Positionen negative variable Kosten berechnet wurden" % anzahl_negative_variable_kosten)
# Alle Checks OK
# Liste von neuen DataFrames zusammensetzen für weitere Verarbeitung
rechnung_grunddaten = rechnung_grunddaten.reset_index()
df = df.reset_index()
rp = rp.reset_index()
df_list = list()
df_list.append(rechnung_grunddaten)
df = df.round(2)
# (SQLite | Django)? kennt kein NaN
df = df.where(pd.notnull(df), None)
rp = rp.replace({np.datetime64('NaT'): None})
rp = rp.replace({np.nan: None})
df_list.append(df)
df_list.append(rp)
return df_list
def validate_or_process_gutschrift(data):
filename = data.name
file = data.file
if not re.match(r'^Gutschrift_',filename):
raise ValueError(('Der Dateiname \"%s\" beginnt nicht mit \"Gutschrift_\". Ist es wirklich eine Telekom-Gutschrift?' % filename))
sheets = pd.read_excel(file, sheet_name = None, dtype = object)
if ('Kunden-Absender Daten' not in sheets):
raise ValueError(('Das Excel-Sheet "Kunden-Absender Daten" fehlt.'))
if ('Summen' not in sheets):
raise ValueError(('Das Excel-Sheet "Summen" fehlt.'))
if ('Zusatzangaben' not in sheets):
raise ValueError(('Das Excel-Sheet "Rechnungspositionen" fehlt.'))
if ('Grund der Gutschrift' not in sheets):
raise ValueError(('Das Excel-Sheet "Optionen" fehlt.'))
sheets = pd.read_excel(file, sheet_name = None, dtype = object)
# Umbenennen aller Spalten in allen Sheets
sheets['Kunden-Absender Daten'] = sheets['Kunden-Absender Daten'].rename(columns={
'Ihre Mobilfunk-Gutschrift': "key",
'Unnamed: 1': "value"
}
)
sheets['Summen'] = sheets['Summen'].rename(columns={
'Anbieter': 'anbieter',
'Text': 'text',
'Betrag': 'betrag',
'Summen-betrag Netto': 'summen_betrag_netto',
'USt-Betrag': 'ust_betrag',
'Brutto-Betrag': 'brutto_betrag',
}
)
sheets['Zusatzangaben'] = sheets['Zusatzangaben'].rename(columns={
'Rechnungsnummer': 'rechnungsnummer',
'Rechnungsdatum': 'rechnungsdatum',
'Karten-/Profilnummer': 'kartennummer',
'Kostenstelle': 'kostenstelle',
'Kostenstellennutzer': 'kostenstellennutzer',
'Rufnummer': 'rufnummer',
'Text': 'text',
'EUR (Netto)': 'eur_netto',
'EUR (Brutto)': 'eur_brutto',
}
)
sheets['Grund der Gutschrift'] = sheets['Grund der Gutschrift'].rename(columns={
'Text': 'text',
}
)
# Alle numerischen Spalten in allen Sheets to_numeric wandeln
cols = ['betrag', 'summen_betrag_netto', 'ust_betrag',
'brutto_betrag']
sheets['Summen'][cols] = sheets['Summen'][cols].apply(pd.to_numeric, errors='coerce', axis=1)
cols = ['eur_netto', 'eur_brutto']
sheets['Zusatzangaben'][cols] = sheets['Zusatzangaben'][cols].apply(pd.to_numeric, errors='coerce', axis=1)
# Datumsspalten in Datum konvertieren (UTC)
sheets['Zusatzangaben']['rechnungsdatum'] = pd.to_datetime(sheets['Zusatzangaben']['rechnungsdatum'],utc=True, format='%d.%m.%Y')
df = sheets['Kunden-Absender Daten']
df = df.dropna()
gs = dict(zip(df.key.str.replace(r':$', '', regex=True), df.value))
gs['Gutschriftsdatum'] = pytz.utc.localize(datetime.strptime(gs['Gutschriftsdatum'], "%d.%m.%Y"))
sheet_summen = sheets['Summen']
# Check ob nur Telekom Gutschriften. Drittanbieter-Gutschriften werden nicht beruecksichtigt
if not sheet_summen[(sheet_summen['anbieter']!="Telekom Deutschland GmbH")]['anbieter'].dropna().empty:
_drittanbieter = sheet_summen[(sheet_summen['anbieter']!="Telekom Deutschland GmbH")]['anbieter'].dropna().to_json()
raise ValueError('Die Gutschrift enhält nicht unterstützte Drittanbieter: "%s"' % _drittanbieter)
_summen = sheet_summen[(sheet_summen['text']=="Gutschriftsbetrag inkl. Umsatzsteuer")][['summen_betrag_netto','ust_betrag','brutto_betrag']].to_dict('records')
if _summen.__len__() > 1:
raise ValueError('Die Gutschrift enhält %s Einzelgutschriften. Das wird nicht unterstützt' % _summen.__len__())
gs.update(_summen[0])
gz = sheets['Zusatzangaben']
if len(gz.index) > 1:
raise ValueError('Die Gutschrift enhält %s Einzelgutschriften. Das wird zur Zeit nicht unterstützt.' % len(gz.index))
if gs['summen_betrag_netto'] != gz['eur_netto'].item():
raise ValueError('Die Beträge in Zusatzangaben (%s) und Summen (%s) stimmen nicht überein.' % (gz['eur_netto'].item(), gs['summen_betrag_netto']))
gz['gutschriftsnummer'] = gs['Gutschriftsnummer']
gz['eur_brutto'] = gs['brutto_betrag']
gz['grund'] = sheets['Grund der Gutschrift']['text'].item()
return gz
def validate_or_process_masterreport(data):
filename = data.name
file = data.file
#if DataFile.objects.filter(data=filename).exists():
# raise forms.ValidationError(('Die Datei "%s" wurde bereits hochgeladen' % filename), code='file_already_exists')
if not re.match(r'^\d{4}\d{2}\d{2}',filename):
raise ValueError(('Der Dateiname "%s" beginnt nicht mit einem Datum (Ymd). Ist es wirklich ein Masterreport?'), code='wrong_filename')
sheets = pd.read_excel(file, sheet_name = None, dtype = object)
if ('Kundennummer' not in sheets):
raise ValueError(('Das Excel-Sheet "Kundennummer" fehlt. Ist es wirklich ein Masterreport?'), code='wrong_file')
df = sheets['Kundennummer']
auswahl_spalten = [
'Rufnummer',
'Kostenstelle',
'Kostenstellennutzer',
'GP/Organisationseinheit',
'Rahmenvertrag',
'Kundennummer',
'Daten Optionen',
'Voice Optionen',
'Mischoptionen (Voice, Data, SMS)',
'Mehrkarten Optionen',
'Roaming Optionen',
'Sonstige Optionen',
'Karten-/Profilnummer',
'EVN',
'Vertragsbeginn',
'Bindefristende',
'Bindefrist',
'Tarif',
'Sperren',
'Sperrgrund',
'Stillegung',
'Letzte Vertragsverlängerung',
'VVL Grund',
'VVL Berechtigung',
'Kündigungstermin',
'Kündigungseingang'
]
df = df[auswahl_spalten]
# Datumsspalten in Datum konvertieren (UTC)
df['Vertragsbeginn']= | pd.to_datetime(df['Vertragsbeginn'],utc=True, format='%d.%m.%Y') | pandas.to_datetime |
#------------------------------------------------------------------------------------------#
import numpy as np
import pandas as pd
from scipy.spatial import distance
import scipy
import h5py, time, random, os, sys, pyflagser, warnings, datetime
import matplotlib.pyplot as plt
from tqdm import tqdm
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from itertools import repeat
from morphological_types import *
from pyflagser import flagser_count_unweighted as fcu
from pyflagser import flagser_unweighted as fu
import matplotlib
#------------------------------------------------------------------------------------------#
t = time.process_time()
#------------------------------------------------------------------------------------------#
''' data '''
def king_file(mc):
mc_file = h5py.File(f'../data/average/cons_locs_pathways_mc{mc}_Column.h5', 'r')
populations, connections = mc_file.get('populations'), mc_file.get('connectivity')
return populations, connections
#------------------------------------------------------------------------------------------#
''' Model Constructions '''
def Bio_M(m_type, populations, connections):
for M_a in tqdm(m_type):
for M_b in m_type:
# spacial coordinates of the neurons in each neuronal m-type
L_a = pd.DataFrame(np.matrix(populations[M_a]['locations']), columns = ['x', 'y', 'z'])
L_b = pd.DataFrame(np.matrix(populations[M_b]['locations']), columns = ['x', 'y', 'z'])
# distances between each neuron pathway group
D_ = scipy.spatial.distance.cdist(L_a, L_b, 'euclidean')
# Bins
bins = np.arange(1, D_.max(), 75) - np.concatenate([[0],
np.array(np.ones(len(np.arange(1, D_.max(), 75)) - 1))])
# Matrix of distance bins
C_ = np.array(np.digitize(D_, bins))
# Bin groups in matrix
groups = np.array(range(len(bins))) + 1
# Actual connections matrix
a = np.array(connections[M_a][M_b]['cMat'])
ab = | pd.DataFrame(a) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:core_acc] *
# language: python
# name: conda-env-core_acc-py
# ---
# # Explore exoS/exoU genes
#
# This notebook specifically explores the exoS (PAO1) and exoU (PA14) accessory-accessory modules to determine if there is an interesting biological story here.
#
# _P. aeruginosa_ uses a type III secretion system (T3SS) to promote development of severe disease, particularly in patients with impaired immune defenses. _P. aeruginosa_ uses a type III secretion system to inject toxic effector proteins into the cytoplasm of eukaryotic cells. ExoU, ExoS, and ExoT, three effector proteins secreted by this system. ExoU and ExoS are usually secreted by different strains.
#
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC529154/
#
# TO DO:
# * Expand description to all exo's
# * Might move this analysis to a new location if we want to include core genes as well
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
import scipy
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scripts import utils, paths, annotations
np.random.seed(1)
# +
# Clustering method
method_name = "affinity"
# Gene subset
gene_subset = "acc"
processed = "spell"
# Select modules containing exoS (module 17) and exoU (module 6)
exoS_module_id = 17
exoU_module_id = 6
# -
# ## Load correlation matrix
# Load correlation matrix
pao1_corr_filename = paths.PAO1_CORR_LOG_SPELL_ACC
pa14_corr_filename = paths.PA14_CORR_LOG_SPELL_ACC
pao1_corr = pd.read_csv(pao1_corr_filename, sep="\t", index_col=0, header=0)
pa14_corr = pd.read_csv(pa14_corr_filename, sep="\t", index_col=0, header=0)
# ## Load module membership
pao1_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR, f"pao1_modules_{method_name}_{gene_subset}_{processed}.tsv"
)
pa14_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR, f"pa14_modules_{method_name}_{gene_subset}_{processed}.tsv"
)
pao1_membership = pd.read_csv(pao1_membership_filename, sep="\t", header=0, index_col=0)
pa14_membership = pd.read_csv(pa14_membership_filename, sep="\t", header=0, index_col=0)
# ## Select genes associated with modules of interest
exoS_module_df = pao1_membership[pao1_membership["module id"] == exoS_module_id]
exoU_module_df = pa14_membership[pa14_membership["module id"] == exoU_module_id]
exoS_module_df.head()
exoU_module_df.head()
# ## Examine exoS and exoU modules
# Get gene id for exoS and exoU
exoS_id = "PA3841"
exoU_id = "PA14_51530"
exoS_module_gene_ids = list(exoS_module_df.index)
exoU_module_gene_ids = list(exoU_module_df.index)
# +
# Import gene metadata
pao1_gene_annot_filename = paths.GENE_PAO1_ANNOT
pa14_gene_annot_filename = paths.GENE_PA14_ANNOT
pao1_gene_annot = pd.read_csv(pao1_gene_annot_filename, index_col=0, header=0)
pa14_gene_annot = pd.read_csv(pa14_gene_annot_filename, index_col=0, header=0)
# -
# Get df with gene ids as indices and gene names as a column
# Having the data in a df instead of a series will just allow me to do my merges that are in the notebook
pao1_gene_annot = pao1_gene_annot["Name"].to_frame("gene name")
pa14_gene_annot = pa14_gene_annot["Name"].to_frame("gene name")
pao1_gene_annot.head()
# Add gene names to heatmap
def add_gene_name_to_index(annot_df, corr_df):
# Only consider genes in the correlation matrix
annot_index_df = annot_df.loc[corr_df.index]
annot_columns_df = annot_df.loc[corr_df.columns]
new_index = []
for gene_id in annot_index_df.index:
if pd.isnull(annot_index_df.loc[gene_id, "gene name"]):
new_index.append(gene_id)
else:
new_index.append(annot_index_df.loc[gene_id, "gene name"])
new_columns = []
for gene_id in annot_columns_df.index:
if pd.isnull(annot_columns_df.loc[gene_id, "gene name"]):
new_columns.append(gene_id)
else:
new_columns.append(annot_columns_df.loc[gene_id, "gene name"])
# Rename index and columns
corr_df.index = new_index
corr_df.columns = new_columns
return corr_df
# +
exoS_corr_module = pao1_corr.loc[exoS_module_gene_ids, exoS_module_gene_ids]
exoU_corr_module = pa14_corr.loc[exoU_module_gene_ids, exoU_module_gene_ids]
exoS_corr_module_names = add_gene_name_to_index(pao1_gene_annot, exoS_corr_module)
exoU_corr_module_names = add_gene_name_to_index(pa14_gene_annot, exoU_corr_module)
# -
# %%time
f1 = sns.clustermap(exoS_corr_module_names, cmap="BrBG", center=0)
f1.ax_heatmap.set_xticklabels(f1.ax_heatmap.get_xmajorticklabels(), fontsize=20)
f1.ax_heatmap.set_yticklabels(f1.ax_heatmap.get_ymajorticklabels(), fontsize=20)
f1.fig.suptitle("Correlation of exoS module", y=1.05, fontsize=24)
# %%time
g1 = sns.clustermap(exoU_corr_module_names, cmap="BrBG", center=0)
g1.ax_heatmap.set_xticklabels(g1.ax_heatmap.get_xmajorticklabels(), fontsize=20)
g1.ax_heatmap.set_yticklabels(g1.ax_heatmap.get_ymajorticklabels(), fontsize=20)
g1.fig.suptitle("Correlation of exoU module", y=1.05, fontsize=24)
# **Takeaway**
# We've aggregated the information from this notebook into a [google sheet](https://docs.google.com/spreadsheets/d/1AuD1Q4lHhWNp5xzgW-hi8mHkHFyd91rmOksXXuAwk4Q/edit#gid=533448426) to easily share with collaborators. This sheet contains Uniprot annotations for each gene within the exoS and exoU modules. The sheet also contains a sorted matrix of genes and how correlated they are with exoS and exoU.
#
# * Genes within exoS module appear to be more highly correlated with exoS (see sheet) and each other (see heatmap) unlike exoU module
# * What might this mean about exoS, exoU?
# * Despite being part of the same T3S system, their relationship to other accessory genes is different
# * Based on gene annotations, is there a different mechanism by which exoS contributes to virulence compared to exoU?
# * This is difficult to answer with so many unannotated genes
#
# Some more reading will need to be done to determine the biological motivation here:
# * What is known about the mechanism by which these genes contribute to virulence?
# * What can we learn from module composition?
# * What can we learn from most co-expressed genes?
# ## Other relationships between exoS/U and other genes
#
# Get co-expression relationship between exoS/U and all other genes (both core and accessory)
# +
# Read in correlation for all genes
pao1_all_corr_filename = paths.PAO1_CORR_LOG_SPELL
pa14_all_corr_filename = paths.PA14_CORR_LOG_SPELL
pao1_all_corr = pd.read_csv(pao1_all_corr_filename, sep="\t", index_col=0, header=0)
pa14_all_corr = | pd.read_csv(pa14_all_corr_filename, sep="\t", index_col=0, header=0) | pandas.read_csv |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length= | pd.Timedelta('1h') | pandas.Timedelta |
'''
create REMC Regional R16 Error summary Section data in the column sequence
Regional Solar, Regional Wind, Regional Combined (Solar+wind), ISTS Solar, ISTS Wind, ISTS Combined (Solar+wind)
'''
import pandas as pd
from data_fetchers.remc_data_store import getRemcPntData, FCA_FORECAST_VS_ACTUAL_STORE_NAME
from utils.remcFormulas import calcNrmsePerc, calcMapePerc
from utils.excel_utils import append_df_to_excel
from utils.printUtils import printWithTs
def populateRemcRegionalR16ErrSectionData(configFilePath, configSheetName, outputFilePath, outputSheetName, truncateSheet=False):
sectionDataDf = getRemcRegionalR16ErrSectionDataDf(
configFilePath, configSheetName)
# dump data to excel
append_df_to_excel(outputFilePath, sectionDataDf, sheet_name=outputSheetName,
startrow=None, truncate_sheet=truncateSheet, index=False, header=False)
def getRemcRegionalR16ErrSectionDataDf(configFilePath, configSheetName):
# get conf dataframe
confDf = pd.read_excel(configFilePath, sheet_name=configSheetName)
# confDf columns should be
# name,r16_pnt,actual_pnt,cuf_pnt,avc_pnt,type
for stripCol in 'name,r16_pnt,actual_pnt,avc_pnt,type'.split(','):
confDf[stripCol] = confDf[stripCol].str.strip()
# initialize results
resValsList = []
# find the row index of first non dummy row
for rowIter in range(len(confDf)):
confRow = confDf.iloc[rowIter]
rowType = confRow['type']
if rowType == 'dummy':
resValsList.append({"name": confRow['name'],
"solar": None, "wind": None,
"combined": None})
# get regional rows
solarConf = confDf[confDf['name'] == 'solar'].squeeze()
windConf = confDf[confDf['name'] == 'wind'].squeeze()
combinedConf = confDf[confDf['name'] == 'combined'].squeeze()
# get data values
regSolActVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, solarConf['actual_pnt'])
regSolR16Vals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, solarConf['r16_pnt'])
regSolAvcVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, solarConf['avc_pnt'])
regWindActVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, windConf['actual_pnt'])
regWindR16Vals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, windConf['r16_pnt'])
regWindAvcVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, windConf['avc_pnt'])
regCombActVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, combinedConf['actual_pnt'])
regCombR16Vals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, combinedConf['r16_pnt'])
regCombAvcVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, combinedConf['avc_pnt'])
# get ISTS rows
solarConf = confDf[confDf['name'] == 'ists_solar'].squeeze()
windConf = confDf[confDf['name'] == 'ists_wind'].squeeze()
combinedConf = confDf[confDf['name'] == 'ists_combined'].squeeze()
# get data values
istsSolActVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, solarConf['actual_pnt'])
istsSolR16Vals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, solarConf['r16_pnt'])
istsSolAvcVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, solarConf['avc_pnt'])
istsWindActVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, windConf['actual_pnt'])
istsWindR16Vals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, windConf['r16_pnt'])
istsWindAvcVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, windConf['avc_pnt'])
istsCombActVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, combinedConf['actual_pnt'])
istsCombR16Vals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, combinedConf['r16_pnt'])
istsCombAvcVals = getRemcPntData(
FCA_FORECAST_VS_ACTUAL_STORE_NAME, combinedConf['avc_pnt'])
# calculate the output rows for region
# calculate regional solar mape for r16
regSolR16MapePerc = calcMapePerc(regSolActVals, regSolR16Vals, regSolAvcVals)
# calculate regional solar nrmse for r16
regSolR16NrmsePerc = calcNrmsePerc(
regSolActVals, regSolR16Vals, regSolAvcVals)
# calculate regional wind mape for r16
regWindR16MapePerc = calcMapePerc(
regWindActVals, regWindR16Vals, regWindAvcVals)
# calculate regional wind nrmse for r16
regWindR16NrmsePerc = calcNrmsePerc(
regWindActVals, regWindR16Vals, regWindAvcVals)
# calculate regional combined mape for r16
regCombR16MapePerc = calcMapePerc(
regCombActVals, regCombR16Vals, regCombAvcVals)
# calculate regional combined nrmse for r16
regCombR16NrmsePerc = calcNrmsePerc(
regCombActVals, regCombR16Vals, regCombAvcVals)
# calculate the output rows for ists
# calculate ists solar mape for r16
istsSolR16MapePerc = calcMapePerc(
istsSolActVals, istsSolR16Vals, istsSolAvcVals)
# calculate ists solar nrmse for r16
istsSolR16NrmsePerc = calcNrmsePerc(
istsSolActVals, istsSolR16Vals, istsSolAvcVals)
# calculate ists wind mape for r16
istsWindR16MapePerc = calcMapePerc(
istsWindActVals, istsWindR16Vals, istsWindAvcVals)
# calculate ists wind nrmse for r16
istsWindR16NrmsePerc = calcNrmsePerc(
istsWindActVals, istsWindR16Vals, istsWindAvcVals)
# calculate ists combined mape for r16
istsCombR16MapePerc = calcMapePerc(
istsCombActVals, istsCombR16Vals, istsCombAvcVals)
# calculate ists combined nrmse for r16
istsCombR16NrmsePerc = calcNrmsePerc(
istsCombActVals, istsCombR16Vals, istsCombAvcVals)
printWithTs('Processing REMC Regional R16 Error summary Section at row {0}'.format(
len(resValsList)+1))
# create result dataframe rows
resValsList.extend([
{"name": "MAPE",
"solar": regSolR16MapePerc, "wind": regWindR16MapePerc, "combined": regCombR16MapePerc,
"istsSolar": istsSolR16MapePerc, "istsWind": istsWindR16MapePerc, "istsCombined": istsCombR16MapePerc
},
{"name": "NRMSE",
"solar": regSolR16NrmsePerc, "wind": regWindR16NrmsePerc, "combined": regCombR16NrmsePerc,
"istsSolar": istsSolR16NrmsePerc, "istsWind": istsWindR16NrmsePerc, "istsCombined": istsCombR16NrmsePerc
}
])
return | pd.DataFrame(resValsList) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import pandas
from modin.pandas.utils import to_pandas
import modin.pandas as pd
from pathlib import Path
import pyarrow as pa
import os
import sys
from .utils import df_equals
# needed to resolve ray-project/ray#3744
pa.__version__ = "0.11.0"
pd.DEFAULT_NPARTITIONS = 4
PY2 = sys.version_info[0] == 2
TEST_PARQUET_FILENAME = "test.parquet"
TEST_CSV_FILENAME = "test.csv"
TEST_JSON_FILENAME = "test.json"
TEST_HTML_FILENAME = "test.html"
TEST_EXCEL_FILENAME = "test.xlsx"
TEST_FEATHER_FILENAME = "test.feather"
TEST_READ_HDF_FILENAME = "test.hdf"
TEST_WRITE_HDF_FILENAME_MODIN = "test_write_modin.hdf"
TEST_WRITE_HDF_FILENAME_PANDAS = "test_write_pandas.hdf"
TEST_MSGPACK_FILENAME = "test.msg"
TEST_STATA_FILENAME = "test.dta"
TEST_PICKLE_FILENAME = "test.pkl"
TEST_SAS_FILENAME = os.getcwd() + "/data/test1.sas7bdat"
TEST_FWF_FILENAME = "test_fwf.txt"
TEST_GBQ_FILENAME = "test_gbq."
SMALL_ROW_SIZE = 2000
def modin_df_equals_pandas(modin_df, pandas_df):
return to_pandas(modin_df).sort_index().equals(pandas_df.sort_index())
def setup_parquet_file(row_size, force=False):
if os.path.exists(TEST_PARQUET_FILENAME) and not force:
pass
else:
pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
).to_parquet(TEST_PARQUET_FILENAME)
def create_test_ray_dataframe():
df = pd.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
return df
def create_test_pandas_dataframe():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
return df
@pytest.fixture
def test_files_eq(path1, path2):
with open(path1, "rb") as file1, open(path2, "rb") as file2:
file1_content = file1.read()
file2_content = file2.read()
if file1_content == file2_content:
return True
else:
return False
def teardown_test_file(test_path):
if os.path.exists(test_path):
os.remove(test_path)
def teardown_parquet_file():
if os.path.exists(TEST_PARQUET_FILENAME):
os.remove(TEST_PARQUET_FILENAME)
@pytest.fixture
def make_csv_file():
"""Pytest fixture factory that makes temp csv files for testing.
Yields:
Function that generates csv files
"""
filenames = []
def _make_csv_file(
filename=TEST_CSV_FILENAME,
row_size=SMALL_ROW_SIZE,
force=False,
delimiter=",",
encoding=None,
):
if os.path.exists(filename) and not force:
pass
else:
dates = pandas.date_range("2000", freq="h", periods=row_size)
df = pandas.DataFrame(
{
"col1": np.arange(row_size),
"col2": [str(x.date()) for x in dates],
"col3": np.arange(row_size),
"col4": [str(x.time()) for x in dates],
}
)
df.to_csv(filename, sep=delimiter, encoding=encoding)
filenames.append(filename)
return df
# Return function that generates csv files
yield _make_csv_file
# Delete csv files that were created
for filename in filenames:
if os.path.exists(filename):
os.remove(filename)
def setup_json_file(row_size, force=False):
if os.path.exists(TEST_JSON_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_json(TEST_JSON_FILENAME)
def teardown_json_file():
if os.path.exists(TEST_JSON_FILENAME):
os.remove(TEST_JSON_FILENAME)
def setup_html_file(row_size, force=False):
if os.path.exists(TEST_HTML_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_html(TEST_HTML_FILENAME)
def teardown_html_file():
if os.path.exists(TEST_HTML_FILENAME):
os.remove(TEST_HTML_FILENAME)
def setup_clipboard(row_size, force=False):
df = pandas.DataFrame({"col1": np.arange(row_size), "col2": np.arange(row_size)})
df.to_clipboard()
def setup_excel_file(row_size, force=False):
if os.path.exists(TEST_EXCEL_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_excel(TEST_EXCEL_FILENAME)
def teardown_excel_file():
if os.path.exists(TEST_EXCEL_FILENAME):
os.remove(TEST_EXCEL_FILENAME)
def setup_feather_file(row_size, force=False):
if os.path.exists(TEST_FEATHER_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_feather(TEST_FEATHER_FILENAME)
def teardown_feather_file():
if os.path.exists(TEST_FEATHER_FILENAME):
os.remove(TEST_FEATHER_FILENAME)
def setup_hdf_file(row_size, force=False, format=None):
if os.path.exists(TEST_READ_HDF_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_hdf(TEST_READ_HDF_FILENAME, key="df", format=format)
def teardown_hdf_file():
if os.path.exists(TEST_READ_HDF_FILENAME):
os.remove(TEST_READ_HDF_FILENAME)
def setup_msgpack_file(row_size, force=False):
if os.path.exists(TEST_MSGPACK_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_msgpack(TEST_MSGPACK_FILENAME)
def teardown_msgpack_file():
if os.path.exists(TEST_MSGPACK_FILENAME):
os.remove(TEST_MSGPACK_FILENAME)
def setup_stata_file(row_size, force=False):
if os.path.exists(TEST_STATA_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_stata(TEST_STATA_FILENAME)
def teardown_stata_file():
if os.path.exists(TEST_STATA_FILENAME):
os.remove(TEST_STATA_FILENAME)
def setup_pickle_file(row_size, force=False):
if os.path.exists(TEST_PICKLE_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_pickle(TEST_PICKLE_FILENAME)
def teardown_pickle_file():
if os.path.exists(TEST_PICKLE_FILENAME):
os.remove(TEST_PICKLE_FILENAME)
@pytest.fixture
def make_sql_connection():
"""Sets up sql connections and takes them down after the caller is done.
Yields:
Factory that generates sql connection objects
"""
filenames = []
def _sql_connection(filename, table=""):
# Remove file if exists
if os.path.exists(filename):
os.remove(filename)
filenames.append(filename)
# Create connection and, if needed, table
conn = "sqlite:///{}".format(filename)
if table:
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3, 4, 5, 6],
"col2": [7, 8, 9, 10, 11, 12, 13],
"col3": [14, 15, 16, 17, 18, 19, 20],
"col4": [21, 22, 23, 24, 25, 26, 27],
"col5": [0, 0, 0, 0, 0, 0, 0],
}
)
df.to_sql(table, conn)
return conn
yield _sql_connection
# Takedown the fixture
for filename in filenames:
if os.path.exists(filename):
os.remove(filename)
def setup_fwf_file():
if os.path.exists(TEST_FWF_FILENAME):
return
fwf_data = """id8141 360.242940 149.910199 11950.7
id1594 444.953632 166.985655 11788.4
id1849 364.136849 183.628767 11806.2
id1230 413.836124 184.375703 11916.8
id1948 502.953953 173.237159 12468.3"""
with open(TEST_FWF_FILENAME, "w") as f:
f.write(fwf_data)
def teardown_fwf_file():
if os.path.exists(TEST_FWF_FILENAME):
os.remove(TEST_FWF_FILENAME)
def test_from_parquet():
setup_parquet_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME)
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_parquet_file()
def test_from_parquet_with_columns():
setup_parquet_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_parquet_file()
def test_from_json():
setup_json_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_json(TEST_JSON_FILENAME)
modin_df = pd.read_json(TEST_JSON_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_json_file()
def test_from_html():
setup_html_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_html(TEST_HTML_FILENAME)[0]
modin_df = pd.read_html(TEST_HTML_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_html_file()
@pytest.mark.skip(reason="No clipboard on Travis")
def test_from_clipboard():
setup_clipboard(SMALL_ROW_SIZE)
pandas_df = pandas.read_clipboard()
modin_df = pd.read_clipboard()
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_excel():
setup_excel_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_excel(TEST_EXCEL_FILENAME)
modin_df = pd.read_excel(TEST_EXCEL_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_excel_file()
# @pytest.mark.skip(reason="Arrow version mismatch between Pandas and Feather")
def test_from_feather():
setup_feather_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_feather(TEST_FEATHER_FILENAME)
modin_df = pd.read_feather(TEST_FEATHER_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_feather_file()
def test_from_hdf():
setup_hdf_file(SMALL_ROW_SIZE, format=None)
pandas_df = pandas.read_hdf(TEST_READ_HDF_FILENAME, key="df")
modin_df = pd.read_hdf(TEST_READ_HDF_FILENAME, key="df")
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_hdf_file()
def test_from_hdf_format():
setup_hdf_file(SMALL_ROW_SIZE, format="table")
pandas_df = pandas.read_hdf(TEST_READ_HDF_FILENAME, key="df")
modin_df = pd.read_hdf(TEST_READ_HDF_FILENAME, key="df")
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_hdf_file()
def test_from_msgpack():
setup_msgpack_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_msgpack(TEST_MSGPACK_FILENAME)
modin_df = pd.read_msgpack(TEST_MSGPACK_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_msgpack_file()
def test_from_stata():
setup_stata_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_stata(TEST_STATA_FILENAME)
modin_df = pd.read_stata(TEST_STATA_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_stata_file()
def test_from_pickle():
setup_pickle_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_pickle(TEST_PICKLE_FILENAME)
modin_df = pd.read_pickle(TEST_PICKLE_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_pickle_file()
def test_from_sql(make_sql_connection):
filename = "test_from_sql.db"
table = "test_from_sql"
conn = make_sql_connection(filename, table)
query = "select * from {0}".format(table)
pandas_df = pandas.read_sql(query, conn)
modin_df = pd.read_sql(query, conn)
assert modin_df_equals_pandas(modin_df, pandas_df)
with pytest.warns(UserWarning):
pd.read_sql_query(query, conn)
with pytest.warns(UserWarning):
pd.read_sql_table(table, conn)
@pytest.mark.skip(reason="No SAS write methods in Pandas")
def test_from_sas():
pandas_df = pandas.read_sas(TEST_SAS_FILENAME)
modin_df = pd.read_sas(TEST_SAS_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv(make_csv_file):
make_csv_file()
pandas_df = pandas.read_csv(TEST_CSV_FILENAME)
modin_df = pd.read_csv(TEST_CSV_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
if not PY2:
pandas_df = pandas.read_csv(Path(TEST_CSV_FILENAME))
modin_df = pd.read_csv(Path(TEST_CSV_FILENAME))
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv_chunksize(make_csv_file):
make_csv_file()
# Tests __next__ and correctness of reader as an iterator
# Use larger chunksize to read through file quicker
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=500)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=500)
for modin_df, pd_df in zip(rdf_reader, pd_reader):
assert modin_df_equals_pandas(modin_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=1)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=1)
modin_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
assert modin_df_equals_pandas(modin_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=1)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=1)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
assert modin_df_equals_pandas(modin_df, pd_df)
def test_from_csv_delimiter(make_csv_file):
make_csv_file(delimiter="|")
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, sep="|")
modin_df = pd.read_csv(TEST_CSV_FILENAME, sep="|")
assert modin_df_equals_pandas(modin_df, pandas_df)
modin_df = pd.DataFrame.from_csv(
TEST_CSV_FILENAME, sep="|", parse_dates=False, header="infer", index_col=None
)
pandas_df = pandas.DataFrame.from_csv(
TEST_CSV_FILENAME, sep="|", parse_dates=False, header="infer", index_col=None
)
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv_skiprows(make_csv_file):
make_csv_file()
pandas_df = | pandas.read_csv(TEST_CSV_FILENAME, skiprows=2) | pandas.read_csv |
import json
import math
import operator
from operator import getitem
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import roc_curve, auc
from tqdm.auto import tqdm
from data_prep import *
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.set_option("display.width", 1000)
buffer = 1
gini_cutoff = 0.57
class NoResultsError(Exception):
pass
# Importance Metric
def sd_matrix(df):
"""
Calculates the Standard Deviation of each column and returns a dictionary
containing the positions with the most significant SD
"""
df_std = df.std(axis=0, skipna=True)
df_std = df_std[df_std > df_std.mean()]
df_index = [ind for ind in df_std.index]
return df_index
def gini_weight(df):
"""
Calculates the Gini Coefficient which is a measure of statistical dispersion.
Gini = 1 means maximal inequallity
Gini = 0 means perfect equallity where all the values are the same.
"""
d = {}
# df = df ** 2
for col in df.columns:
col_list = df[col].to_numpy()
mad = np.abs(np.subtract.outer(col_list, col_list)).mean()
rmad = mad / col_list.mean()
g1 = 0.5 * rmad
d[col] = g1
d_df = pd.DataFrame.from_dict(d, orient="index")
d_df = d_df.round(decimals=2)
return d_df
def weight_matrix(df):
"""
Calculates the gini column weights and creates a new weighted PSSM
"""
df_weighted = df * gini_weight(df)
df_weighted = df_weighted.round(decimals=2)
return df_weighted
# Calculation of the peptide window
def calc_brute_force_window(df1, df2):
"""
Calculates a list of possible windows for the comparison of two PSSMs.
"""
max_diff = 0
if len(df1.columns) > len(df2.columns):
max_diff = len(df2.columns)
else:
max_diff = len(df1.columns)
return [x for x in range(1, max_diff + 1)]
def gini_window_index(df):
"""
Finds all the important positions of a PSSM. Important positions are all
the positions that have a gini larger than the (mean + SD) of all the ginis.
"""
gini_df = gini_weight(df)
# gini_df_sorted = gini_df.sort_values(by=0, ascending=False)
# gini_window = []
# select_indices = [0, 1]
# gini_window = gini_df_sorted.iloc[select_indices].index.tolist()
# gini_window_index = [ind for ind in gini_window.index]
# print("This is the gini window index ", gini_window_index)
gini_window = gini_df[gini_df > gini_cutoff]
gini_window.dropna(inplace=True)
# print("This is the gini_window: ",gini_window)
if len(gini_window) == 0:
gini_df_sorted = gini_df.sort_values(by=0, ascending=False)
gini_window = []
select_indices = [0, 1]
gini_window_index = gini_df_sorted.iloc[select_indices].index.tolist()
# print("This is the new gini index: ", gini_window_index, " calc with 2 max")
else:
gini_window_index = [ind for ind in gini_window.index]
# print("This is the gini index: ", gini_window_index, " calc with gini cutoff")
# gini_window_index.sort()
# gini_window_index = [ind for ind in gini_window.index]
# print("This is the gini window index ", gini_window_index)
# # if len(gini_window) == 0:
# # df = df ** 2
# # gini_df = gini_weight(df)
# # gini_window = gini_df[gini_df > gini_df.mean() + gini_df.std()]
# # gini_window.dropna(inplace=True)
# # if len(gini_window) == 0:
# # gini_window = gini_df[gini_df > gini_df.mean()]
# # gini_window.dropna(inplace=True)
# gini_window_index = [ind for ind in gini_window.index]
# if len(gini_window_index) == 0:
# gini_window = gini_df[gini_df > gini_df.mean()]
# gini_window.dropna(inplace=True)
# gini_window_index = [ind for ind in gini_window.index]
return gini_window_index
def calc_gini_windows(df1, df2):
"""
Calculates the list of all the windows for the comparison of the 2 PSSMs,
according to their important positions.
"""
index_1 = gini_window_index(df1)
index_2 = gini_window_index(df2)
# sdt_deviation = sd_matrix(df1)
print("Index_1: ", index_1)
print("Index_2: ", index_2)
windows_list = []
# if len(df1.columns) <= len(df2.columns):
# window = len(df1.columns)
# else:
# window = len(df2.columns)
# windows_list.append(window)
windows_list = calc_brute_force_window(df1, df2)
# if len(index_1) != 0 and len(index_2) != 0:
# if len(index_1) == 1:
# window = max(index_2) - min(index_2) + buffer
# windows_list.append(window)
# elif len(index_2) == 1:
# window = max(index_1) - min(index_1) + buffer
# windows_list.append(window)
# else:
# if len(df1.columns) <= len(df2.columns):
# min_window = max(index_1) - min(index_1) + buffer
# # min_window = max(sdt_deviation) - min(sdt_deviation) + buffer
# max_window = max(index_2) - min(index_2) + buffer
# if min_window > max_window:
# max_window, min_window = min_window, max_window
# else:
# min_window = max(index_2) - min(index_2) + buffer
# max_window = max(index_1) - min(index_1) + buffer
# if min_window > max_window:
# max_window, min_window = min_window, max_window
# windows_list = [x for x in range(min_window, max_window + 1)]
# elif len(index_1) == 0 or len(index_2) == 0:
# cindex = index_1 + index_2
# max_window = min_window = max(cindex) - min(cindex) + buffer
# windows_list = [x for x in range(min_window, max_window + 1)]
# else:
# windows_list = calc_brute_force_window(df1, df2)
print("This is the windows_list: ", windows_list)
return windows_list
def get_df_window(df1, df2, pep_window, i, j):
"""
Slices the dataframes according to a given window size.
"""
a = df1.loc[:, i : i + pep_window - 1]
b = df2.loc[:, j : j + pep_window - 1]
a.columns = [ind for ind in range(0, len(a.columns))]
b.columns = [ind for ind in range(0, len(b.columns))]
return a, b
def find_motif(df):
"""
Finds the motif of the pssm using the important positions.
"""
motif = ""
motif_l = []
gini_index = gini_window_index(df)
st_index = sd_matrix(df)
if len(gini_index) != 0:
motif_range = [x for x in range(min(gini_index), max(gini_index) + 1)]
else:
motif_range = [x for x in range(0, len(df) + 1)]
for col in motif_range:
if col in st_index:
Index_label = df[df[col] > df[col].mean()].index.tolist()
if len(Index_label) == 1:
motif_l.append(Index_label[0])
else:
Index_label = df[df[col] == df[col].max()].index.tolist()
motif_l.append(Index_label[0])
else:
motif_l.append("x")
print("This is the motif: ", motif.join(motif_l))
return motif.join(motif_l)
# Similarity Metrics
def matrix_equal(df1, df2):
"""
Returns a boolean whether two matrices are equal
"""
return df2.equals(df1)
def sum_squared_distance_matrix(df1, df2):
"""
Calculates the squared distances of two matrices and returns the sum value
"""
adf1, adf2 = df1.align(df2, join="outer", axis=1)
full_ssd = (adf1 - adf2) ** 2
full_ssd = full_ssd.dropna(axis=1, how="all")
full_ssd_val = full_ssd.fillna(0).values.sum()
return full_ssd_val
def euclidian_distance(df1, df2):
"""
Calculates the euclidian distance of the two matrices. Sort will be ascending.
"""
ed_df = (df1 - df2) ** 2
ed_df = ed_df.dropna(axis=1, how="all")
full_eu = math.sqrt(ed_df.fillna(0).values.sum())
return full_eu
def correlation_coefficient(df1, df2):
"""
Calculates and return the correlation coefficient of two matrices.
Sort will be decreasing.
"""
mean1 = sum(df1.mean())
mean2 = sum(df2.mean())
summerA = (df1 - mean1) * (df2 - mean2)
summerB = (df1 - mean1) ** 2
summerC = (df2 - mean2) ** 2
return sum(summerA) / math.sqrt((sum(summerB) * sum(summerC)))
def calc_sum_of_squared_distance(dfi, dfj):
"""
Calculates the square distance between 2 dataframes and returns their sum.
Order is ascending.
"""
ssd = (dfi - dfj) ** 2
ssd_sum = ssd.sum().round(decimals=3)
return ssd_sum
def calc_dot_product(dfi, dfj):
"""
Calculates the dot product between 2 dataframes and returns their sum.
Order is ascending.
"""
dot_product = dfi.values * dfj.values
dot_product_sum = dot_product.sum().round(decimals=3)
return dot_product_sum
def calc_Kullback_Leibler_distance(dfi, dfj):
"""
Calculates the Kullback-Leibler distance of the two matrices.
As defined in Aerts et al. (2003). Also called Mutual Information.
Sort will be ascending.
Epsilon is used here to avoid conditional code for checking that neither P nor Q is equal to 0.
"""
epsilon = 0.00001
P = dfi + epsilon
Q = dfj + epsilon
divergence = np.sum(P * np.log2(P / Q))
return divergence
def calc_pearson_correlation(dfi, dfj):
"""
Calculates Pearson's correlation between two dataframes and rescales them
from -1 - 1 to 0-1.
Order is decreasing.
"""
pearson = dfi.corr(dfj)
pearson = pearson.round(decimals=3)
# Turning the correlation coefficient scale from -1 - 1 to 0-1
# pearson_scale = (pearson + 1) / 2
# pearson_scale = pearson_scale.round(decimals=3)
return pearson
def calc_spearmans_correlation(dfi, dfj):
"""
Calculates the Spearman's correlation between two dataframes and rescales them
from -1 - 1 to 0-1.
Order is decreasing.
"""
spearman = dfi.corr(dfj, method="spearman")
spearman = round(spearman, 3)
# Turning the correlation coefficient scale from -1 - 1 to 0-1
# spearmans_scale = (spearman + 1) / 2
# spearmans_scale = round(spearmans_scale, 3)
return spearman
def calc_kendall_correlation(dfi, dfj):
"""
Calculates Kendall's correlation between two dataframes and rescales them
from -1 - 1 to 0-1.
Order is decreasing.
"""
kendall = dfi.corr(dfj, method="kendall")
kendall = round(kendall, 3)
# Turning the correlation coefficient scale from -1 - 1 to 0-1
# kendalls_scale = (kendall + 1) / 2
# kendalls_scale = round(kendalls_scale, 3)
return kendall
def calculate_similarity(df1, df2):
"""
Calculates all the similarity measures column wise and returns 1 row dataframes.
"""
kendalls = []
pearsons = []
spearmans = []
dots = []
ssds = []
kls = []
for i in range(0, len(df1.columns)):
dfi = df1.iloc[:, i]
dfj = df2.iloc[:, i]
kendall = calc_kendall_correlation(dfi, dfj)
pearson = calc_pearson_correlation(dfi, dfj)
spearman = calc_spearmans_correlation(dfi, dfj)
dot_product = calc_dot_product(dfi, dfj)
ssd = calc_sum_of_squared_distance(dfi, dfj)
kl = calc_Kullback_Leibler_distance(dfi, dfj)
kendalls.append(kendall)
pearsons.append(pearson)
spearmans.append(spearman)
dots.append(dot_product)
ssds.append(ssd)
kls.append(kl)
kendalls_df = convert_to_df(kendalls, "Kendall")
pearsons_df = convert_to_df(pearsons, "Pearson")
spearmans_df = convert_to_df(spearmans, "Spearman")
dots_df = convert_to_df(dots, "Dot")
ssds_df = convert_to_df(ssds, "SSD")
kls_df = convert_to_df(kls, "KL")
return kendalls_df, pearsons_df, spearmans_df, dots_df, ssds_df, kls_df
# Comparisons
def compare_matrix_windowed(df1, df2, pep_window):
"""
Compares two matrices using a window of comparison and returns a dictionary
containing the positions of each matrix and the SSD
"""
results = {}
results_sorted = None
ssd_sum_dic = {}
pearsons = {}
spearmans = {}
kendalls = {}
dot_products = {}
kl_divergence = {}
ginis_1 = {}
ginis_2 = {}
it_window_a = len(df1.columns) - pep_window
it_window_b = len(df2.columns) - pep_window
for i in range(0, it_window_a + 1):
for j in range(0, it_window_b + 1):
key = "{}:{} - {}:{}".format(i, i + pep_window - 1, j, j + pep_window - 1)
a, b = get_df_window(df1, df2, pep_window, i, j)
a_gini_df = gini_weight(a)
a_gini_df = a_gini_df.T
b_gini_df = gini_weight(b)
b_gini_df = b_gini_df.T
(
kendalls_cor,
pearsons_cor,
spearmans_cor,
dot_product,
ssd,
kl,
) = calculate_similarity(a, b)
# TODO make this configurable
comparison = pd.DataFrame(
pearsons_cor.values * a_gini_df.values * b_gini_df.values,
columns=pearsons_cor.columns,
index=pearsons_cor.index,
)
# Suggested way:
# SDF as sum(SSD * (1 - gini1) * (1 - gini2))
# and sum(SSD * (1 - gini1 * gini2))
results[key] = comparison.values.sum().round(decimals=3)
ssd_sum_dic[key] = ssd
pearsons[key] = pearsons_cor
spearmans[key] = spearmans_cor
kendalls[key] = kendalls_cor
dot_products[key] = dot_product
kl_divergence[key] = kl
ginis_1[key] = a_gini_df
ginis_2[key] = b_gini_df
# TODO make order cofigurable
results_sorted = sorted(
results.items(), key=operator.itemgetter(1), reverse=True
)
if results_sorted is None:
raise NoResultsError(
"window a: {} , window b: {}".format(it_window_a, it_window_b)
)
res_0 = results_sorted[0]
if res_0[0] in ssd_sum_dic:
ssd_res = ssd_sum_dic[res_0[0]]
pearsons_res = pearsons[res_0[0]]
spearmans_res = spearmans[res_0[0]]
kendalls_res = kendalls[res_0[0]]
dot_product_res = dot_products[res_0[0]]
kl_divergence_res = kl_divergence[res_0[0]]
ginis_1_res = ginis_1[res_0[0]]
ginis_2_res = ginis_2[res_0[0]]
return (
results_sorted,
ssd_res,
pearsons_res,
spearmans_res,
kendalls_res,
dot_product_res,
kl_divergence_res,
ginis_1_res,
ginis_2_res,
)
def compare_two_files(base_file, second_file, pep_window):
"""
Calculate all the comparisons for two PSSMs
"""
df1 = prepare_matrix(base_file)
df1_norm = normalise_matrix(df1)
df1_sd = sd_matrix(df1)
# df1_weigthed = weight_matrix(df1_norm)
df2 = prepare_matrix(second_file)
df2_sd = sd_matrix(df2)
df2_norm = normalise_matrix(df2)
# df2_weigthed = weight_matrix(df2_norm)
ssd_global = sum_squared_distance_matrix(df1_norm, df2_norm)
equality = matrix_equal(df1_norm, df2_norm)
(
comparison_results,
ssd,
pearsons,
spearmans,
kendalls,
dot_products,
kl_divergence,
gini_a,
gini_b,
) = compare_matrix_windowed(df1_norm, df2_norm, pep_window)
return (
equality,
df1_sd,
df2_sd,
ssd_global,
comparison_results,
df1_norm,
df2_norm,
ssd,
pearsons,
spearmans,
kendalls,
dot_products,
kl_divergence,
gini_a,
gini_b,
)
def compare_combined_file(base_file, combined_file, pep_window):
"""
Calculate all the comparisons for a PSSM and a .json file cointaing multiple
PSSMs
"""
results = []
one_window = 0
with open(combined_file) as json_file:
data = json.load(json_file)
# TODO find a way not have to use this
index_names = []
for elm in data:
index_names.append(data[elm]["motif"])
# "ELM",
col_names = [
"Quality",
"No Important Positions",
"Windows",
"Comparison Results",
"Norm. Window",
"Motif 1",
"Motif 2",
"Consensus",
"Comparison Windows",
"Gini 1",
"Gini 2",
"Similarity",
]
df = pd.DataFrame(columns=col_names, index=index_names)
# df = pd.DataFrame(columns=col_names)
# i = 0
for pssm in tqdm(data):
try:
# json_pssm = json.dumps(data[pssm]["pssm"])
json_pssm = json.dumps(data[pssm]["other_scoring_methods"]["log odds"])
print("-----> ", data[pssm]["motif"])
pep_windows = []
if pep_window is 0:
df1 = prepare_matrix(base_file)
df1 = normalise_matrix(df1)
df2 = prepare_matrix(json_pssm)
df2 = normalise_matrix(df2)
gini_1 = gini_weight(df1)
gini_2 = gini_weight(df2)
pep_windows = calc_gini_windows(df1, df2)
index_1 = gini_window_index(df1)
# std_dev = sd_matrix(df1)
min_window = max(index_1) - min(index_1) + 1
# min_window_dev = max(std_dev) - min(std_dev) +1
# if min_window > min_window_dev:
# min_window = min_window_dev
print("min_window is ", min_window)
if min_window > len(df2.columns):
min_window = len(df2.columns)
print("Or better the window is ", min_window)
index_2 = gini_window_index(df2)
if len(index_2) == 1:
one_window += 1
motif_1 = find_motif(df1)
motif_2 = find_motif(df2)
else:
pep_windows.append(int(pep_window))
print("pep_windows: ", pep_windows)
for window in pep_windows:
# i += 1
if window >= min_window:
res = {}
(
equality,
_,
_,
ssd_global,
comparison_results,
df1,
df2,
ssd,
pearsons,
spearmans,
kendalls,
dot_products,
kl_divergence,
gini_a,
gini_b,
) = compare_two_files(base_file, json_pssm, window)
res["equality"] = equality
res["base"] = base_file
res["second"] = data[pssm]["motif"]
res["ssd_global"] = ssd_global
res["comparison_results"] = comparison_results[0]
res["df1"] = df1
res["df2"] = df2
res["ssd"] = ssd
res["pearsons"] = pearsons
res["spearmans"] = spearmans
res["kendalls"] = kendalls
res["dot_products"] = dot_products
res["kl_divergence"] = kl_divergence
res["pep_window"] = window
res["norm_window"] = comparison_results[0][1] / window
res["gini_1"] = gini_a
res["gini_2"] = gini_b
res["motif_1"] = motif_1
res["motif_2"] = motif_2
results.append(res)
print(
"second: ",
res["second"],
"window ",
window,
" comparison ",
comparison_results[0][1],
)
print("Norm_window: ", res["norm_window"])
if (
res["norm_window"] > df.at[res["second"], "Norm. Window"]
) or pd.isna(df.at[res["second"], "Norm. Window"]):
print("adding...")
df.loc[res["second"]] = [
data[pssm]["quality"],
len(index_2),
window,
comparison_results[0][1],
res["norm_window"],
res["motif_1"],
res["motif_2"],
data[pssm]["consensus"],
comparison_results[0][0],
convert_df_to_string(res["gini_1"]),
convert_df_to_string(res["gini_2"]),
convert_df_to_string(res["pearsons"])
]
# df.loc[i] = [
# res["second"],
# data[pssm]["quality"],
# len(index_2),
# window,
# comparison_results[0][1],
# res["norm_window"],
# res["motif_1"],
# res["motif_2"],
# data[pssm]["consensus"],
# comparison_results[0][0],
# convert_df_to_string(res["gini_1"]),
# convert_df_to_string(res["gini_2"]),
# convert_df_to_string(res["pearsons"]),
# ]
except TypeError as ex:
print("error: {} on pssm: {}".format(ex, pssm))
except IndexError as ex:
print("error: {} on pssm: {}, res: {} ".format(ex, pssm, res))
except NoResultsError as ex:
print("error: {} on pssm: {}".format(ex, pssm))
continue
df = df.sort_values(by=["Norm. Window"], ascending=False)
df.to_csv("TANK_vs_ELM_min_len_-.csv")
results.sort(key=lambda x: float(x["norm_window"]), reverse=True)
print("Results with 1 important position: ", one_window)
for res in results:
print(
"second: ",
res["second"],
"windows: ",
comparison_results[0][0],
"Norm Window Values: ",
res["norm_window"],
"Original window: ",
res["pep_window"],
)
print("\n")
return results
def compare_two_combined(file_1, file_2, pep_window):
results = []
one_window = 0
with open(file_1) as json_file_1, open(file_2) as json_file_2:
data1 = json.load(json_file_1)
data2 = json.load(json_file_2)
# TODO find a way not have to use this
index_names = []
for pssm in data1:
index_names.append(data1[pssm]["motif"])
col_names = [
"ELM",
"Quality",
"No Important Positions",
"Windows",
"Comparison Results",
"Norm. Window",
"Motif 1",
"Motif 2",
"Consensus",
"Comparison Windows",
"Gini 1",
"Gini 2",
"Similarity",
]
df = pd.DataFrame(columns=col_names, index=index_names)
# df = pd.DataFrame(columns=col_names)
# i = 0
for base_pssm in tqdm(data1):
base_file = json.dumps(data1[base_pssm]["pssm"])
print("-----> ", data1[base_pssm]["motif"])
for pssm in tqdm(data2):
try:
# json_pssm = json.dumps(data2[pssm]["pssm"])
json_pssm = json.dumps(data2[pssm]["other_scoring_methods"]["log odds"])
print("-----> ", data2[pssm]["motif"])
pep_windows = []
if pep_window is 0:
df1 = prepare_matrix(base_file)
df1 = normalise_matrix(df1)
df2 = prepare_matrix(json_pssm)
df2 = normalise_matrix(df2)
gini_1 = gini_weight(df1)
gini_2 = gini_weight(df2)
pep_windows = calc_gini_windows(df1, df2)
print("Windows ", pep_windows)
index_1 = gini_window_index(df1)
# std_dev = sd_matrix(df1)
min_window = max(index_1) - min(index_1) + 1
# min_window_dev = max(std_dev) - min(std_dev) +1
# if min_window > min_window_dev:
# min_window = min_window_dev
print("min_window is ", min_window)
if min_window > len(df2.columns):
min_window = len(df2.columns)
print("Or better the window is ", min_window)
index_2 = gini_window_index(df2)
if len(index_2) == 1:
one_window += 1
motif_1 = find_motif(df1)
motif_2 = find_motif(df2)
else:
pep_windows.append(int(pep_window))
print("pep_windows: ", pep_windows)
for window in pep_windows:
# i += 1
if window >= min_window:
res = {}
(
equality,
_,
_,
ssd_global,
comparison_results,
df1,
df2,
ssd,
pearsons,
spearmans,
kendalls,
dot_products,
kl_divergence,
gini_a,
gini_b,
) = compare_two_files(base_file, json_pssm, window)
res["equality"] = equality
res["base"] = base_file
res["base_name"] = data1[base_pssm]["motif"]
res["second"] = data2[pssm]["motif"]
res["ssd_global"] = ssd_global
res["comparison_results"] = comparison_results[0]
res["df1"] = df1
res["df2"] = df2
res["ssd"] = ssd
res["pearsons"] = pearsons
res["spearmans"] = spearmans
res["kendalls"] = kendalls
res["dot_products"] = dot_products
res["kl_divergence"] = kl_divergence
res["pep_window"] = window
res["norm_window"] = comparison_results[0][1] / window
res["gini_1"] = gini_a
res["gini_2"] = gini_b
res["motif_1"] = motif_1
res["motif_2"] = motif_2
results.append(res)
print(
"second: ",
res["second"],
"window ",
window,
" comparison ",
comparison_results[0][1],
)
print("Norm_window: ", res["norm_window"])
print(".........",res["base_name"])
print("....", | pd.isna(df.at[res["base_name"], "Norm. Window"]) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Fri May 11 15:08:25 2018
@author: fiorito_l
"""
import logging
from functools import reduce
import pandas as pd
import numpy as np
from .utils import BaseFile, Xs
from ..settings import SandyError
__author__ = "<NAME>"
__all__ = ["Errorr"]
class Errorr(BaseFile):
Format = "errorr"
def read_section(self, mat, mf, mt):
"""
Parse MAT/MF/MT section
"""
if mf == 1:
from .mf1 import read_errorr as read
elif mf == 3:
from .mf3 import read_errorr as read
elif mf == 33 or mf == 31 or mf == 35:
from .mf33 import read_errorr as read
else:
raise SandyError("SANDY cannot parse section MAT{}/MF{}/MT{}".format(mat,mf,mt))
if (mat,mf,mt) not in self.index:
raise SandyError("section MAT{}/MF{}/MT{} is not in tape".format(mat,mf,mt))
return read(self.loc[mat,mf,mt].TEXT)
def get_xs(self, listmat=None, listmt=None, **kwargs):
"""
Extract xs from errorr file into Xs instance.
"""
condition = self.index.get_level_values("MF") == 3
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
mat = self.index.get_level_values("MAT")[0]
eg = self.read_section(mat,1,451)["EG"]
ListXs = []
for ix,text in tape.TEXT.iteritems():
mat,mf,mt = ix
X = self.read_section(*ix)
xs = pd.Series(X["XS"], index=eg[:-1], name=(X["MAT"],X["MT"])).rename_axis("E").to_frame()
ListXs.append(xs)
if not ListXs:
logging.warn("requested cross sections were not found")
return | pd.DataFrame() | pandas.DataFrame |
from collections import Counter
import dateutil
from nltk import sent_tokenize, word_tokenize
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from numpy import dot
from numpy.linalg import norm
EMAIL_BODY_FIELD = 'emailbody'
EMAIL_SUBJECT_FIELD = 'emailsubject'
EMAIL_HTML_FIELD = 'emailbodyhtml'
FROM_FIELD = 'emailfrom'
FROM_DOMAIN_FIELD = 'fromdomain'
PREPROCESSED_EMAIL_BODY = 'preprocessedemailbody'
PREPROCESSED_EMAIL_SUBJECT = 'preprocessedemailsubject'
MERGED_TEXT_FIELD = 'mereged_text'
EMAIL_TO_FIELD = 'emailto'
EMAIL_CC_FIELD = 'emailcc'
EMAIL_BCC_FIELD = 'emailbcc'
MIN_CAMPAIGN_SIZE = int(demisto.args().get("minIncidentsForCampaign", 3))
MIN_UNIQUE_RECIPIENTS = int(demisto.args().get("minUniqueRecipients", 2))
DUPLICATE_SENTENCE_THRESHOLD = 0.95
KEYWORDS = ['#1', '100%', 'access', 'accordance', 'account', 'act', 'action', 'activate', 'ad', 'affordable', 'amazed',
'amazing', 'apply', 'asap', 'asked', 'attach', 'attached', 'attachment', 'attachments', 'attention',
'authorize', 'authorizing', 'avoid', 'bank', 'bargain', 'billing', 'bonus', 'boss', 'bucks', 'bulk', 'buy',
"can't", 'cancel', 'candidate', 'capacity', 'card', 'cards', 'cash', 'casino', 'caution', 'cents',
'certified', 'chance', 'charges', 'claim', 'claims', 'clearance', 'click', 'collect', 'confidentiality',
'confirm', 'confirmation', 'confirmed', 'congratulations', 'consideration', 'consolidate', 'consultation',
'contact', 'contract', 'credentials', 'credit', 'day', 'days', 'deadline', 'deal', 'deals', 'dear', 'debt',
'delivered', 'delivery', 'deposit', 'detected', 'dhl', 'disabled', 'discount', 'discounts', 'document',
'documents', 'dollar', 'dollars', 'dropbox', 'drugs', 'due', 'earn', 'earnings', 'enlarge', 'enlargement',
'equity', 'erection', 'erections', 'exclusive', 'expire', 'expires', 'fedex', 'fees', 'file', 'finance',
'financial', 'fraud', 'free', 'friend', 'from', 'funds', 'gas', 'gift', 'gimmick', 'giveaway', 'great',
'growth', 'guarantee', 'guaranteed', 'hack', 'hacked', 'hacker', 'hormone', 'hosting', 'hours', 'hurry',
'immediate', 'immediately', 'important', 'income', 'increase', 'instant', 'interest', 'investment',
'invoice', 'kindly', 'last', 'lender', 'lenders', 'lifetime', 'limited', 'loan', 'loans', 'login', 'lose',
'loss', 'luxury', 'market', 'marketing', 'mass', 'mastrubate', 'mastrubating', 'med', 'medications',
'medicine', 'meds', 'member', 'membership', 'million', 'millions', 'miracle', 'money', 'monthly',
'months', 'mortgage', 'newsletter', 'notification', 'notify', 'obligation', 'offer', 'offers', 'oil',
'only', 'open', 'opt', 'order', 'package', 'paid', 'parcel', 'partners', 'password', 'passwords',
'payment', 'payments', 'paypal', 'payroll', 'pdf', 'penis', 'pennies', 'permanently', 'pharmacy', 'pics',
'pictures', 'pill', 'pills', 'porn', 'porno', 'postal', 'potential', 'pre-approved', 'presently',
'preview', 'price', 'prize', 'profit', 'promise', 'promotion', 'purchase', 'pure', 'qualifies', 'qualify',
'quote', 'rates', 'receipt', 'record', 'recorded', 'recording', 'refund', 'request', 'requested',
'requires', 'reserve', 'reserves', 'review', 'risk', 'sales', 'satisfactin', 'satisfaction', 'satisfied',
'save', 'scam', 'security', 'sensitive', 'sex', 'share', 'shared', 'sharing', 'shipment', 'shipping',
'sir', 'spam', 'special', 'spend', 'spending', 'started', 'starting', 'stock', 'success', 'supplies',
'supply', 'suspended', 'temporarily', 'terms', 'trader', 'trading', 'traffic', 'transaction', 'transfer',
'trial', 'unlimited', 'unsecured', 'unsolicited', 'unsubscribe', 'update', 'ups', 'urgent', 'user', 'usps',
'valium', 'verification', 'verify', 'viagra', 'vicodin', 'videos', 'vids', 'viedo', 'virus', 'waiting',
'wallet', 'warranty', 'web', 'weight', 'win', 'winner', 'winning', 'wire', 'xanax']
STATUS_DICT = {
0: "Pending",
1: "Active",
2: "Closed",
3: "Archive",
}
def return_outputs_custom(readable_output, outputs=None, tag=None):
return_entry = {
"Type": entryTypes["note"],
"HumanReadable": readable_output,
"ContentsFormat": formats['json'],
"Contents": outputs,
"EntryContext": outputs,
}
if tag is not None:
return_entry["Tags"] = ['campaign_{}'.format(tag)]
demisto.results(return_entry)
def add_context_key(entry_context):
new_context = {}
for k, v in entry_context.items():
new_context['{}.{}'.format('EmailCampaign', k)] = v
return new_context
def create_context_for_campaign_details(campaign_found=False, incidents_df=None):
if not campaign_found:
return {
'isCampaignFound': campaign_found,
}
else:
incident_id = demisto.incident()['id']
incident_df = incidents_df[
['id', 'similarity', FROM_FIELD, FROM_DOMAIN_FIELD]] # lgtm [py/hash-unhashable-value]
incident_df = incident_df[incident_df['id'] != incident_id]
incident_df.rename({FROM_DOMAIN_FIELD: 'emailfromdomain'}, axis=1, inplace=True)
incidents_context = incident_df.fillna(1).to_dict(orient='records')
return {
'isCampaignFound': campaign_found,
'involvedIncidentsCount': len(incidents_df) if incidents_df is not None else 0,
'incidents': incidents_context
}
def create_context_for_indicators(indicators_df=None):
if indicators_df is None:
indicators_context = []
else:
indicators_df.rename({'Value': 'value'}, axis=1, inplace=True)
indicators_df = indicators_df[['id', 'value']]
indicators_context = indicators_df.to_dict(orient='records')
return {'indicators': indicators_context}
def create_empty_context():
context = create_context_for_campaign_details(campaign_found=False)
context = add_context_key(context)
return context
def is_number_of_incidents_too_low(res, incidents):
if not res["EntryContext"]['isDuplicateIncidentFound'] or \
len(incidents) < MIN_CAMPAIGN_SIZE:
return_outputs_custom('No possible campaign was detected', create_empty_context())
return True
return False
def is_number_of_unique_recipients_is_too_low(incidents):
unique_recipients = Counter([str(i.get(EMAIL_TO_FIELD, 'None')) for i in incidents])
unique_recipients += Counter([str(i[EMAIL_CC_FIELD]) for i in incidents if EMAIL_CC_FIELD in i])
unique_recipients += Counter([str(i[EMAIL_BCC_FIELD]) for i in incidents if EMAIL_BCC_FIELD in i])
missing_recipients = unique_recipients['None']
unique_recipients.pop('None', None)
if (len(unique_recipients) < MIN_UNIQUE_RECIPIENTS and missing_recipients == 0) or \
(0 < len(unique_recipients) < MIN_UNIQUE_RECIPIENTS and missing_recipients > 0):
msg = 'Similar emails were found, but the number of their unique recipients is too low to consider them as ' \
'campaign.\n '
msg += 'If you wish to consider similar emails as campaign even with low number of unique recipients, ' \
'please change *minUniqueRecipients* argument\'s value.\n'
msg += 'Details:\n'
msg += '* Found {} similar incidents.\n'.format(len(incidents))
msg += '* Those incidents have {} unique recipients'.format(len(unique_recipients))
msg += ' ({}).\n'.format(', '.join(unique_recipients))
msg += '* The minimum number of unique recipients for similar emails as campaign: ' \
'{}\n'.format(MIN_UNIQUE_RECIPIENTS)
if missing_recipients > 0:
msg += '* Could not find email recipient for {}/{} incidents ' \
'(*Email To* field is empty)'.format(missing_recipients, len(incidents))
return_outputs_custom(msg, create_empty_context())
return True
return False
def get_str_representation_top_n_values(values_list, counter_tuples_list, top_n):
domains_counter_top = counter_tuples_list[:top_n]
if len(counter_tuples_list) > top_n:
domains_counter_top += [('Other', len(values_list) - sum(x[1] for x in domains_counter_top))]
return ', '.join('{} ({})'.format(domain, count) for domain, count in domains_counter_top)
def calculate_campaign_details_table(incidents_df, fields_to_display):
n_incidents = len(incidents_df)
similarities = incidents_df['similarity'].dropna().to_list()
max_similarity = max(similarities)
min_similarity = min(similarities)
headers = []
contents = []
headers.append('Details')
contents.append('Found possible campaign of {} similar emails'.format(n_incidents))
if max_similarity > min_similarity + 10 ** -3:
headers.append('Similarity range')
contents.append("{:.1f}%-{:.1f}%".format(min_similarity * 100, max_similarity * 100))
else:
headers.append('Similarity')
contents.append("{:.1f}%".format(max_similarity * 100))
incidents_df['created_dt'] = incidents_df['created'].apply(lambda x: dateutil.parser.parse(x)) # type: ignore
datetimes = incidents_df['created_dt'].dropna() # type: ignore
min_datetime, max_datetime = min(datetimes), max(datetimes)
if (max_datetime - min_datetime).days == 0:
headers.append('Date')
contents.append(max_datetime.strftime("%B %d, %Y"))
else:
headers.append('Date range')
contents.append('{} - {}'.format(min_datetime.strftime("%B %d, %Y"), max_datetime.strftime("%B %d, %Y")))
senders = incidents_df[FROM_FIELD].dropna().replace('', np.nan).tolist()
senders_counter = Counter(senders).most_common() # type: ignore
senders_domain = incidents_df[FROM_DOMAIN_FIELD].replace('', np.nan).dropna().tolist()
domains_counter = Counter(senders_domain).most_common() # type: ignore
if EMAIL_TO_FIELD in incidents_df.columns:
recipients = incidents_df[EMAIL_TO_FIELD].replace('', np.nan).dropna().tolist()
if EMAIL_CC_FIELD in incidents_df.columns:
recipients += incidents_df[EMAIL_CC_FIELD].replace('', np.nan).dropna().tolist()
if EMAIL_BCC_FIELD in incidents_df.columns:
recipients += incidents_df[EMAIL_BCC_FIELD].replace('', np.nan).dropna().tolist()
recipients_counter = Counter(recipients).most_common() # type: ignore
if len(senders_counter) == 1:
domain_header = "Sender domain"
sender_header = "Sender address"
elif len(senders_counter) > 1 and len(domains_counter) == 1:
domain_header = "Senders domain"
sender_header = "Senders addresses"
else:
domain_header = "Senders domains"
sender_header = "Senders addresses"
top_n = 3
domain_value = get_str_representation_top_n_values(senders_domain, domains_counter, top_n)
sender_value = get_str_representation_top_n_values(senders, senders_counter, top_n)
recipients_value = get_str_representation_top_n_values(recipients, recipients_counter, len(recipients_counter))
headers.append(domain_header)
contents.append(domain_value)
headers.append(sender_header)
contents.append(sender_value)
headers.append('Recipients')
contents.append(recipients_value)
for field in fields_to_display:
if field in incidents_df.columns:
field_values = get_non_na_empty_values(incidents_df, field)
if len(field_values) > 0:
field_values_counter = Counter(field_values).most_common() # type: ignore
field_value_str = get_str_representation_top_n_values(field_values, field_values_counter, top_n)
headers.append(field)
contents.append(field_value_str)
hr = tableToMarkdown('Possible Campaign Detected', {header: value for header, value in zip(headers, contents)},
headers=headers)
return hr
def get_non_na_empty_values(incidents_df, field):
field_values = incidents_df[field].replace('', None).dropna().tolist()
field_values = [x for x in field_values if len(str(x).strip()) > 0]
return field_values
def cosine_sim(a, b):
return dot(a, b) / (norm(a) * norm(b))
def summarize_email_body(body, subject, nb_sentences=3, subject_weight=1.5, keywords_weight=1.5):
corpus = sent_tokenize(body)
cv = CountVectorizer(stop_words=list(stopwords.words('english')))
body_arr = cv.fit_transform(corpus).toarray()
subject_arr = cv.transform(sent_tokenize(subject)).toarray()
word_list = cv.get_feature_names()
count_list = body_arr.sum(axis=0) + subject_arr.sum(axis=0) * subject_weight
duplicate_sentences = [i for i, arr in enumerate(body_arr) if
any(cosine_sim(arr, arr2) > DUPLICATE_SENTENCE_THRESHOLD
for arr2 in body_arr[:i])]
word_frequency = dict(zip(word_list, count_list))
val = sorted(word_frequency.values())
max_frequency = val[-1]
for word in word_frequency.keys():
word_frequency[word] = (word_frequency[word] / max_frequency)
for word in KEYWORDS:
if word in word_frequency:
word_frequency[word] *= keywords_weight
sentence_rank = [0] * len(corpus)
for i, sent in enumerate(corpus):
if i in duplicate_sentences:
continue
for word in word_tokenize(sent):
if word.lower() in word_frequency.keys():
sentence_rank[i] += word_frequency[word.lower()]
sentence_rank[i] = sentence_rank[i] / len(word_tokenize(sent)) # type: ignore
top_sentences_indices = np.argsort(sentence_rank)[::-1][:nb_sentences].tolist()
summary = []
for sent_i in sorted(top_sentences_indices):
sent = corpus[sent_i].strip().replace('\n', ' ')
if sent_i == 0 and sent_i + 1 not in top_sentences_indices:
sent = sent + ' ...'
elif sent_i + 1 == len(corpus) and sent_i - 1 not in top_sentences_indices:
sent = '... ' + sent
elif sent_i - 1 not in top_sentences_indices and sent_i + 1 not in top_sentences_indices:
sent = '... ' + sent + ' ...'
summary.append(sent)
return '\n'.join(summary)
def create_email_summary_hr(incidents_df):
hr_email_summary = ''
clean_email_subject = incidents_df.iloc[0][PREPROCESSED_EMAIL_SUBJECT]
email_summary = 'Subject: ' + clean_email_subject.replace('\n', '')
clean_email_body = incidents_df.iloc[0][PREPROCESSED_EMAIL_BODY]
email_summary += '\n' + summarize_email_body(clean_email_body, clean_email_subject)
for word in KEYWORDS:
for cased_word in [word.lower(), word.title(), word.upper()]:
email_summary = re.sub(r'(?<!\w)({})(?!\w)'.format(cased_word), '**{}**'.format(cased_word), email_summary)
hr_email_summary += '\n\n' + '### Current Incident\'s Email Snippets'
hr_email_summary += '\n ##### ' + email_summary
context = add_context_key(create_context_for_campaign_details(campaign_found=True, incidents_df=incidents_df))
return context, hr_email_summary
def return_campaign_details_entry(incidents_df, fields_to_display):
hr_campaign_details = calculate_campaign_details_table(incidents_df, fields_to_display)
context, hr_email_summary = create_email_summary_hr(incidents_df)
hr = '\n'.join([hr_campaign_details, hr_email_summary])
return return_outputs_custom(hr, context, tag='campaign_details')
def return_no_mututal_indicators_found_entry():
hr = '### Mutual Indicators' + '\n'
hr += 'No mutual indicators were found.'
return_outputs_custom(hr, add_context_key(create_context_for_indicators()), tag='indicators')
def return_indicator_entry(incidents_df):
indicators_query = 'investigationIDs:({})'.format(' '.join('"{}"'.format(id_) for id_ in incidents_df['id']))
fields = ['id', 'indicator_type', 'investigationIDs', 'relatedIncCount', 'score', 'value']
indicators_args = {'query': indicators_query, 'limit': '150', 'populateFields': ','.join(fields)}
res = demisto.executeCommand('GetIndicatorsByQuery', args=indicators_args)
if is_error(res):
return_error(res)
indicators = res[0]['Contents']
indicators_df = pd.DataFrame(data=indicators)
if len(indicators_df) == 0:
return_no_mututal_indicators_found_entry()
return indicators_df
indicators_df = indicators_df[indicators_df['relatedIncCount'] < 150]
indicators_df['Involved Incidents Count'] = \
indicators_df['investigationIDs'].apply(lambda x: sum(id_ in x for id_ in incidents_df['id']))
indicators_df = indicators_df[indicators_df['Involved Incidents Count'] > 1]
if len(indicators_df) == 0:
return_no_mututal_indicators_found_entry()
return indicators_df
indicators_df['Id'] = indicators_df['id'].apply(lambda x: "[%s](#/indicator/%s)" % (x, x))
indicators_df = indicators_df.sort_values(['score', 'Involved Incidents Count'], ascending=False)
indicators_df['Reputation'] = indicators_df['score'].apply(scoreToReputation)
indicators_df.rename({'value': 'Value', 'indicator_type': 'Type'}, axis=1, inplace=True)
indicators_headers = ['Id', 'Value', 'Type', 'Reputation', 'Involved Incidents Count']
hr = tableToMarkdown('Mutual Indicators', indicators_df.to_dict(orient='records'),
headers=indicators_headers)
return_outputs_custom(hr, add_context_key(create_context_for_indicators(indicators_df)), tag='indicators')
return indicators_df
def get_comma_sep_list(value):
res = [x.strip() for x in value.split(",")]
return [x for x in res if x != '']
def get_reputation(id_, indicators_df):
if len(indicators_df) == 0:
max_reputation = 0
else:
relevant_indicators_df = indicators_df[indicators_df['investigationIDs'].apply(lambda x: id_ in x)]
if len(relevant_indicators_df) > 0:
max_reputation = max(relevant_indicators_df['score'])
else:
max_reputation = 0
return scoreToReputation(max_reputation)
def return_involved_incidents_entry(incidents_df, indicators_df, fields_to_display):
incidents_df['Id'] = incidents_df['id'].apply(lambda x: "[%s](#/Details/%s)" % (x, x))
incidents_df = incidents_df.sort_values('created', ascending=False).reset_index(drop=True)
incidents_df['created_dt'] = incidents_df['created'].apply(lambda x: dateutil.parser.parse(x)) # type: ignore
incidents_df['Created'] = incidents_df['created_dt'].apply(lambda x: x.strftime("%B %d, %Y"))
incidents_df['similarity'] = incidents_df['similarity'].fillna(1)
incidents_df['similarity'] = incidents_df['similarity'].apply(lambda x: '{:.1f}%'.format(x * 100))
current_incident_id = demisto.incident()['id']
incidents_df['DBot Score'] = incidents_df['id'].apply(lambda id_: get_reputation(id_, indicators_df))
# add a mark at current incident, at its similarity cell
incidents_df['similarity'] = incidents_df.apply(
lambda x: '{} (current)'.format(x['similarity']) if x['id'] == current_incident_id else x['similarity'], axis=1)
incidents_df['status'] = incidents_df['status'].apply(lambda x: STATUS_DICT[x] if x in STATUS_DICT else '')
incidents_df.rename({
'name': 'Name',
FROM_FIELD: 'Email From',
'similarity': 'Similarity to Current Incident',
'status': 'Status'},
axis=1, inplace=True)
incidents_headers = ['Id', 'Created', 'Name', 'Status', 'Email From', 'DBot Score',
'Similarity to Current Incident']
if fields_to_display is not None:
fields_to_display = [f for f in fields_to_display if f in incidents_df.columns]
incidents_df[fields_to_display] = incidents_df[fields_to_display].fillna('')
fields_to_display = [f for f in fields_to_display if len(get_non_na_empty_values(incidents_df, f)) > 0]
incidents_headers += fields_to_display
hr = '\n\n' + tableToMarkdown('Involved Incidents', incidents_df[incidents_headers].to_dict(orient='records'),
headers=incidents_headers)
return_outputs_custom(hr, tag='incidents')
def draw_canvas(incidents, indicators):
incident_ids = set(map(lambda x: x['id'], incidents))
filtered_indicators = []
for indicator in indicators:
investigations = indicator.get('investigationIDs', [])
mutual_incidents_in_canvas = len(set(investigations).intersection(incident_ids))
if mutual_incidents_in_canvas >= 2:
filtered_indicators.append(indicator)
try:
res = demisto.executeCommand('DrawRelatedIncidentsCanvas', {'relatedIncidentsIDs': list(incident_ids),
'indicators': filtered_indicators,
'overrideUserCanvas': 'true'
})
if not is_error(res):
res[-1]['Tags'] = ['canvas']
demisto.results(res)
except Exception:
pass
def analyze_incidents_campaign(incidents, fields_to_display):
incidents_df = | pd.DataFrame(incidents) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 13:30:38 2019
@author: Prasad
"""
# Artificial Neural Network
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13]
y = dataset.iloc[:, 13]
#Create dummy variables
geography=pd.get_dummies(X["Geography"],drop_first=True)
gender= | pd.get_dummies(X['Gender'],drop_first=True) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""ANN.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/17YNvbIkWjUPYQysH9nTDsyLVi1qOTFYq
"""
import tensorflow as tf
import keras
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13]
y = dataset.iloc[:, 13]
geography = pd.get_dummies(X['Geography'],drop_first=True)
gender = | pd.get_dummies(X['Gender'],drop_first=True) | pandas.get_dummies |
# -*- coding: utf-8 -*-
import re
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import Selector
from scrapy import signals
from fooltrader.api.quote import get_security_list
from fooltrader.consts import DEFAULT_KDATA_HEADER
from fooltrader.contract.files_contract import get_finance_report_event_path
from fooltrader.utils.utils import index_df_with_time
class StockFinanceReportEventSpider(scrapy.Spider):
name = "stock_finance_report_event"
custom_settings = {
'DOWNLOAD_DELAY': 2,
'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
'SPIDER_MIDDLEWARES': {
'fooltrader.middlewares.FoolErrorMiddleware': 1000,
}
}
def start_requests(self):
security_item = self.settings.get("security_item")
if security_item is not None:
for request in self.yield_request(security_item):
yield request
else:
for _, item in get_security_list().iterrows():
for request in self.yield_request(item):
yield request
def yield_request(self, item):
# 一季度报告,中期报告,三季度报告,年度报告
for period_type in ['yjdbg', 'zqbg', 'sjdbg', 'ndbg']:
url = self.get_finance_report_event_url(item['code'], period_type)
yield Request(url=url, headers=DEFAULT_KDATA_HEADER,
meta={'item': item,
'period_type': period_type},
callback=self.download_fi_report_event_data)
@staticmethod
def report_period_from_title(title, period_type, report_event_date):
try:
year = re.match('.*(\d{4}).*', title).group(1)
report_event_year = pd.Timestamp(report_event_date).date().year
if int(year) < int(report_event_year) - 2 or int(year) > int(report_event_year):
raise Exception('wrong report year')
except Exception as e:
year = | pd.Timestamp(report_event_date) | pandas.Timestamp |
#Funciones para operar sobre un dataframe de forma gener generica
import pandas as pd
import numpy as np
import datetime as dt
import copy
import logging
from urllib.request import urlopen
import json
from dateutil.relativedelta import relativedelta
#------------------------------------------------------------------------------
# INICIO DE LA CONFIGURACION
#------------------------------------------------------------------------------
# logging.basicConfig(
# level=logging.DEBUG,
# format='(%(threadName)-10s) %(message)s'
# )
#------------------------------------------------------------------------------
# FIN DE LA CONFIGURACION
#------------------------------------------------------------------------------
def MultiplicarDataframe(DF,numero):
"""
Descripción:
-----------
Multiplica un dataframe por un escalar
La multiplicación solo se aplica a las columnas de tipo float64 o int64
Parametros
----------
df : pandas dataframe, obligatorio
Dataframe a ser multiplicado por un escalora
numero : int, obligatorio
Escalar por el que se van a multiplicar todas las columnas.
Returns
-------
Dataframe con las columnas multiplicadas por el escalar.
"""
df = DF.copy()
for columna in df.columns:
#solo mitiplico si el contenido es float o integer
if(df[columna].dtype == np.float64 or df[columna].dtype == np.int64):
#df[columna] = df[columna].apply(lambda x: x*numero) #version labda
df.loc[:, columna] = df[columna] * numero
return df
def DividirDataframe(DF,numero):
"""
Descripción:
-----------
Divide un dataframe por un escalar
La division solo se aplica a las columnas de tipo float64 o int64
Parametros
----------
df : pandas dataframe, obligatorio
Dataframe a ser multiplicado por un escalora
numero : int, obligatorio
Escalar por el que se van a dividir todas las columnas.
Returns
-------
Dataframe con las columnas divididas por el escalar.
"""
df = DF.copy()
for columna in df.columns:
#solo mitiplico si el contenido es float o integer
if(df[columna].dtype == np.float64 or df[columna].dtype == np.int64):
#df[columna] = df[columna].apply(lambda x: x*numero) #version labda
df.loc[:, columna] = df[columna] / numero
return df
def NormalizarSerie(SR):
"""
Descripción:
-----------
Normaliza una serie basandose en maximo y minimo
Haciendo que la misma se encuentre entre 0 y 1
Un ejemplo de uso seria normalizar el DPO y operar
en 0.2 para la compra y 0.5 para la venta (estrategia comprado)
Parametros
----------
SR : pandas serie, obligatorio
Dataframe a ser multiplicado por un escalora
Uso
-------
from DataframeUtils import NormalizarSerie \n
df_stocks["GGAL"]["DPO_NORMALIZADO"] = NormalizarSerie(df_stocks["GGAL"]["DPO"])\n
Returns
-------
Serie normalizada
"""
sr = SR.copy()
sr_norm =(sr - sr.min())/(sr.max()-sr.min())
return sr_norm
def DataframeDesde(DF,dias):
"""
Descripción:
-----------
Retorna un dataframe desde cierta fecha en el pasado
la fecha se calcula a partir de 'dias' es decir 365 sera un año
Parametros
----------
df : pandas dataframe, obligatorio
Dataframe debe tener un indice datetime
dias : int, obligatorio
Escalar que representa los dias a calcular la fecha
Returns
-------
Dataframe con las filas desde la fecha calculada
"""
df = DF.copy()
df = df.reset_index()
desde = df.iloc[-1,0] - | pd.Timedelta(days=dias) | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 19 11:06:44 2017
@author: <NAME>
"""
import pandas as pd
from pandas import DataFrame
import random
from copy import deepcopy
import math
import numpy as np
# Backtrader
import backtrader as bt
from backtrader.indicators import EMA
# PyQuantTrader
from PyQuantTrader import strategy as pqt_strategy
from PyQuantTrader import validation as pqt_val
from PyQuantTrader import analyzers as pqt_ana
from PyQuantTrader import indicators as pqt_ind
from PyQuantTrader import observers as pqt_obs
from PyQuantTrader import sizers as pqt_sizers
# OandaAPI
import oandapy
# Keras
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
def load_data(data, seq_len, normalise_window):
sequence_length = seq_len + 1
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length])
if normalise_window:
result = normalise_windows(result)
result = np.array(result)
row = round(0.9 * result.shape[0])
train = result[:int(row), :]
np.random.shuffle(train)
x_train = train[:, :-1]
y_train = train[:, -1]
x_test = result[int(row):, :-1]
y_test = result[int(row):, -1]
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
return [x_train, y_train, x_test, y_test]
def normalise_windows(window_data):
normalised_data = []
for window in window_data:
normalised_window = [((float(p) / float(window[0])) - 1) for p in window]
normalised_data.append(normalised_window)
return normalised_data
def build_model(layers, dropout):
model = Sequential()
model.add(LSTM(
input_dim=layers[0],
output_dim=layers[1],
return_sequences=True))
model.add(Dropout(dropout))
model.add(LSTM(
layers[2],
return_sequences=False))
model.add(Dropout(dropout))
model.add(Dense(
output_dim=layers[3]))
model.add(Activation("linear"))
model.compile(loss="mse", optimizer="rmsprop")
return model
def predict_point_by_point(model, data):
#Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
predicted = model.predict(data)
predicted = np.reshape(predicted, (predicted.size,))
return predicted
# Strategy
class MyStrategy(bt.Strategy):
params = dict(
hold = [8,8],
)
def notify_order(self, order):
if order.status == order.Submitted:
return
dt, dn = self.datetime.datetime(), order.data._name
print('{} {} Order {} Status {}'.format(
dt, dn, order.ref, order.getstatusname())
)
def log(self, txt, dt=None):
''''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.datetime(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
lstmInd = MachineLearningInd()
self.lstmPred = lstmInd.lines.lstmPred
def start(self):
print("the world call me!")
def prenext(self):
print("not mature")
def next(self):
for i, d in enumerate(self.datas):
if self.lstmPred[0] > 0:
# go Short
if i == 0:
self.buy(data=d)
elif i == 1:
self.sell(data=d)
elif self.lstmPred[0] < 0:
# go Long
if i == 0:
self.sell(data=d)
elif i == 1:
self.buy(data=d)
self.log('LSTM: %.4f' % (self.lstmPred[0]))
class MachineLearningInd(bt.ind.PeriodN):
_mindatas = 2
packages = (('pandas','pd'),
('numpy','np'),
('sklearn', 'sk'),
('statsmodels.api', 'sm'),
)
lines = ('lstmPred',)
params = dict(
lookbacks = 4800,
seq_len = 50,
normalise_window = True,
batch_size = 64,
epochs = 2,
validation_split = 0.25,
dropout = 0.1,
nodes = 25,
)
def __init__(self):
self.addminperiod(self.params.lookbacks)
def next(self):
p0 = np.array(self.data0.get(size=self.p.lookbacks))
p1 = np.array(self.data1.get(size=self.p.lookbacks))
data = p0-p1
X_train, y_train, X_test, y_test = load_data(data, self.p.seq_len, self.p.normalise_window)
model = build_model([1, self.p.nodes, self.p.nodes, 1], self.p.dropout)
model.fit(
X_train,
y_train,
batch_size=self.p.batch_size,
nb_epoch=self.p.epochs,
validation_split=self.p.validation_split)
predictions = predict_point_by_point(model, X_test)
self.lines.lstmPred[0] = predictions[-1]
# Run Strategy
def runstrat(args=None):
# Oanda data
account = "101-011-6029361-001"
access_token="8153764443276ed6230c2d8a95dac609-e9e68019e7c1c51e6f99a755007914f7"
account_type = "practice"
# Register APIs
oanda = oandapy.API(environment=account_type, access_token=access_token)
# Get historical prices
hist = oanda.get_history(instrument = "AUD_USD", granularity = "M15", count = 5000, candleFormat = "midpoint")
dataframe = pd.DataFrame(hist['candles'])
dataframe['openinterest'] = 0
dataframe = dataframe[['time', 'openMid', 'highMid', 'lowMid', 'closeMid', 'volume', 'openinterest']]
dataframe['time'] = pd.to_datetime(dataframe['time'])
dataframe = dataframe.set_index('time')
dataframe = dataframe.rename(columns={'openMid': 'open', 'highMid': 'high', 'lowMid': 'low', 'closeMid': 'close'})
AUDUSD = bt.feeds.PandasData(dataname=dataframe)
hist = oanda.get_history(instrument = "USD_CAD", granularity = "M15", count = 5000, candleFormat = "midpoint")
dataframe = | pd.DataFrame(hist['candles']) | pandas.DataFrame |
import pandas as pd
from datetime import datetime
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
df = pd.read_csv("sphist.csv")
df["Date"] = | pd.to_datetime(df["Date"]) | pandas.to_datetime |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 10:24:34 2019
@author: labadmin
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 21:05:32 2019
@author: Hassan
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier as GBC
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.over_sampling import SMOTENC
data_ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset1.csv",skiprows=4)
data_ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset2.csv",skiprows=4)
data_ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset3.csv",skiprows=4)
data_ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset4.csv",skiprows=4)
data_ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset5.csv",skiprows=4)
data_ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset6.csv",skiprows=4)
data_ben7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset7.csv",skiprows=4)
frames_ben1 = [data_ben1,data_ben2,data_ben3,data_ben4,data_ben5,data_ben6,data_ben7]
result_ben1 = pd.concat(frames_ben1)
result_ben1.index=range(3360)
df_ben1 = pd.DataFrame({'label': [1]},index=range(0,3360))
dat_ben1=pd.concat([result_ben1,df_ben1],axis=1)
#-------------------------------------------------------------------------------------------------
data__ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset1.csv",skiprows=4)
data__ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset2.csv",skiprows=4)
data__ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset3.csv",skiprows=4)
data__ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset4.csv",skiprows=4)
data__ben4=data__ben4['# Columns: time'].str.split(expand=True)
data__ben4.columns=['# Columns: time','avg_rss12','var_rss12','avg_rss13','var_rss13','avg_rss23','var_rss23']
data__ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset5.csv",skiprows=4)
data__ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset6.csv",skiprows=4)
frames_ben2 = [data__ben1,data__ben2,data__ben3,data__ben4,data__ben5,data__ben6]
result_ben2 = pd.concat(frames_ben2)
result_ben2.index=range(2880)
df_ben2 = pd.DataFrame({'label': [2]},index=range(0,2880))
dat__ben2=pd.concat([result_ben2,df_ben2],axis=1)
#-----------------------------------------------------------------------------------------------------
data_cyc1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset1.csv",skiprows=4)
data_cyc2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset2.csv",skiprows=4)
data_cyc3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset3.csv",skiprows=4)
data_cyc4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset4.csv",skiprows=4)
data_cyc5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset5.csv",skiprows=4)
data_cyc6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset6.csv",skiprows=4)
data_cyc7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset7.csv",skiprows=4)
data_cyc8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset8.csv",skiprows=4)
data_cyc9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset99.csv",skiprows=4)
data_cyc10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset10.csv",skiprows=4)
data_cyc11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset11.csv",skiprows=4)
data_cyc12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset12.csv",skiprows=4)
data_cyc13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset13.csv",skiprows=4)
data_cyc14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset144.csv",skiprows=4)
data_cyc15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset15.csv",skiprows=4)
frames_cyc = [data_cyc1,data_cyc2,data_cyc3,data_cyc4,data_cyc5,data_cyc6,data_cyc7,data_cyc8,data_cyc9,data_cyc10,data_cyc11,data_cyc12,data_cyc13,data_cyc14,data_cyc15]
result_cyc = pd.concat(frames_cyc)
result_cyc.index=range(7200)
df_cyc = pd.DataFrame({'label': [3]},index=range(0,7200))
data_cyc=pd.concat([result_cyc,df_cyc],axis=1)
#----------------------------------------------------------------------------------------------
data_ly1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset1.csv",skiprows=4)
data_ly2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset2.csv",skiprows=4)
data_ly3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset3.csv",skiprows=4)
data_ly4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset4.csv",skiprows=4)
data_ly5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset5.csv",skiprows=4)
data_ly6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset6.csv",skiprows=4)
data_ly7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset7.csv",skiprows=4)
data_ly8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset8.csv",skiprows=4)
data_ly9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset9.csv",skiprows=4)
data_ly10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset10.csv",skiprows=4)
data_ly11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset11.csv",skiprows=4)
data_ly12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset12.csv",skiprows=4)
data_ly13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset13.csv",skiprows=4)
data_ly14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset14.csv",skiprows=4)
data_ly15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset15.csv",skiprows=4)
frames_ly = [data_ly1,data_ly2,data_ly3,data_ly4,data_ly5,data_ly6,data_ly7,data_ly8,data_ly9,data_ly10,data_ly11,data_ly12,data_ly13,data_ly14,data_ly15]
result_ly = pd.concat(frames_ly)
result_ly.index=range(7200)
df_ly = pd.DataFrame({'label': [4]},index=range(0,7200))
data_ly=pd.concat([result_ly,df_ly],axis=1)
#-------------------------------------------------------------------------------------------------
data_sit1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset1.csv",skiprows=4)
data_sit2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset2.csv",skiprows=4)
data_sit3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset3.csv",skiprows=4)
data_sit4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset4.csv",skiprows=4)
data_sit5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset5.csv",skiprows=4)
data_sit6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset6.csv",skiprows=4)
data_sit7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset7.csv",skiprows=4)
data_sit8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset8.csv",skiprows=4)
data_sit9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset9.csv",skiprows=4)
data_sit10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset10.csv",skiprows=4)
data_sit11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset11.csv",skiprows=4)
data_sit12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset12.csv",skiprows=4)
data_sit13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset13.csv",skiprows=4)
data_sit14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset14.csv",skiprows=4)
data_sit15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\dataset15.csv",skiprows=4)
frames_sit= [data_sit1,data_sit2,data_sit3,data_sit4,data_sit5,data_sit6,data_sit7,data_sit8,data_sit9,data_sit10,data_sit11,data_sit12,data_sit13,data_sit14,data_sit15]
result_sit = pd.concat(frames_sit)
result_sit.index=range(7199)
df_sit= pd.DataFrame({'label': [5]},index=range(0,7199))
data_sit=pd.concat([result_sit,df_sit],axis=1)
#----------------------------------------------------------------------------------------------------
data_sta1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset1.csv",skiprows=4)
data_sta2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset2.csv",skiprows=4)
data_sta3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset3.csv",skiprows=4)
data_sta4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset4.csv",skiprows=4)
data_sta5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset5.csv",skiprows=4)
data_sta6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset6.csv",skiprows=4)
data_sta7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset7.csv",skiprows=4)
data_sta8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset8.csv",skiprows=4)
data_sta9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset9.csv",skiprows=4)
data_sta10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset10.csv",skiprows=4)
data_sta11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset11.csv",skiprows=4)
data_sta12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset12.csv",skiprows=4)
data_sta13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset13.csv",skiprows=4)
data_sta14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset14.csv",skiprows=4)
data_sta15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\dataset15.csv",skiprows=4)
frames_sta= [data_sta1,data_sta2,data_sta3,data_sta4,data_sta5,data_sta6,data_sta7,data_sta8,data_sta9,data_sta10,data_sta11,data_sta12,data_sta13,data_sta14,data_sta15]
result_sta = pd.concat(frames_sta)
result_sta.index=range(7200)
df_sta= pd.DataFrame({'label': [6]},index=range(0,7200))
data_sta=pd.concat([result_sta,df_sta],axis=1)
#---------------------------------------------------------------------------------------------------------------
data_wa1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset1.csv",skiprows=4)
data_wa2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset2.csv",skiprows=4)
data_wa3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset3.csv",skiprows=4)
data_wa4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset4.csv",skiprows=4)
data_wa5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset5.csv",skiprows=4)
data_wa6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset6.csv",skiprows=4)
data_wa7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset7.csv",skiprows=4)
data_wa8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset8.csv",skiprows=4)
data_wa9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset9.csv",skiprows=4)
data_wa10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset10.csv",skiprows=4)
data_wa11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset11.csv",skiprows=4)
data_wa12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset12.csv",skiprows=4)
data_wa13= | pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset13.csv",skiprows=4) | pandas.read_csv |
from apiWrapper import coinAPI
from datetime import datetime
from connectingPipelines import schema_validation as svt
import logging
import pandas as pd
import boto3
ACCESS_KEY = '<KEY>'
SECRET_KEY = '<KEY>'
def coinsMarket(cmkt):
df1 = pd.DataFrame()
df2 = | pd.DataFrame() | pandas.DataFrame |
import regex as re
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import random
ITERARIONS = 10000 #determines how many game turns to simulate
BASE_DAMAGE = 3 #base attack damage for each attack
attacks = dict()
damagemodifiers = dict()
#read in the cards from the file
deckfile = "attack-deck.csv"
cardcounts = dict()
with open(deckfile) as infile:
for line in infile:
res = line.strip().split(',')
cardcounts[res[0]] = int(res[1])
#make a deck out from all cards
maindeck = []
for face in cardcounts:
for _ in (range(cardcounts[face])):
maindeck.append(face)
#shuffle the deck
random.shuffle(maindeck)
deck = maindeck.copy()
zz = 0
#For card in deck:
while zz < ITERARIONS:
damage = BASE_DAMAGE
modifiers = set()
while True:
flip = False
card = deck.pop(0)
if (card == "x2"):
damage = damage * 2
deck = maindeck.copy() #shuffle the deck
random.shuffle(deck)
elif (card == "Null"):
damage = 0
deck = maindeck.copy() #shuffle the deck
random.shuffle(deck)
else:
result = re.search(r'^([+-]\d)?([A-Z][a-z]*)?(Flip)?([A-Z][a-z]*)?', card) ##Regular expression to check for damage, modifiers, and flip
if (result.group(1)):
#print(f'damage before {damage} {result.group(1)} ')
damage = damage + int(result.group(1))
#print(f'damage after {damage}')
if (result.group(2)):
modifiers.add(result.group(2)) #First Modifier
if (result.group(3)):
flip = True
if (result.group(4)):
modifiers.add(result.group(4)) #Second Modifier
if not flip:
break
#count modifiers for the whole simulation and per each damage
for i in modifiers:
damagemodifiers.setdefault(i, 0)
damagemodifiers[i] += 1
attacks.setdefault(damage, {}).setdefault(i, 0)
attacks[damage][i] += 1
#count the frequency of each damage
attacks.setdefault(damage, {}).setdefault('count', 0)
attacks[damage]['count'] += 1
zz += 1
print("Gloomhaven Attack Modifier Simulation")
print(f'Simulating {ITERARIONS} turns...done.\n')
values = list()
#Print the probability of each damage
for damage in sorted(attacks):
z = attacks[damage]['count']*100/ITERARIONS
values.append(z/100)
print(f'Damage: {damage} Probability: {z}%')
print("\nAverage Damage: 3.54\n")
#Print the chance of each modifier
for modifier in sorted(damagemodifiers):
print(f'{modifier} chance: {damagemodifiers[modifier]*100/ITERARIONS}%')
#Calculate the change of each modifier per damage
modifiers = dict()
for damage in sorted(attacks):
for modifier in sorted(damagemodifiers):
attacks[damage].setdefault(modifier, 0)
#z = attacks[damage][modifier]/attacks[damage]['count'] #Not sure which line is supposed to be graphed
z = attacks[damage][modifier]/damagemodifiers[modifier]
modifiers.setdefault(modifier, list())
modifiers[modifier].append(z)
#DataFrames for all the graphs
df = pd.DataFrame({'percentage': pd.Series(values)})
df = df.set_axis(pd.Series(sorted(attacks.keys())), axis='index')
dftwo = | pd.DataFrame.from_dict(modifiers) | pandas.DataFrame.from_dict |
from common.util import ReviewUtil
import pandas as pd
from collections import Counter
import numpy as np
from scipy.stats import norm
import os
class ZYJTemporalAnalysis:
def __init__(self, input_dir: str, threshold: float = 0.9999, num_day_thres: float = 100.):
self.input_path = input_dir
self.threshold = threshold
self.num_day_thres = num_day_thres
@staticmethod
def get_risks(df, prod_id):
if df.empty or df.shape[0] < 100:
return pd.DataFrame
review_date = df["creationTime"].apply(lambda x: x.split(" ")[0]).tolist()
date_freqs = Counter(review_date).items()
(date, freqs) = zip(*date_freqs)
np_freqs = np.array(freqs)
m = np.mean(np_freqs)
sd = np.std(np_freqs)
r = (np_freqs - 5 * m) / sd
prob = norm(0, 1).cdf(r)
mus = [m] * len(date)
sds = [sd] * len(date)
prod_ids = [prod_id] * len(date)
analysis_df = pd.DataFrame({"date": date, "count": freqs, "prob": prob, "mu": mus, "sd": sds, "prod_id": prod_ids})
return analysis_df
@staticmethod
def is_outlier(points, thresh=3.5):
if len(points.shape) == 1:
points = points[:, None]
mu = np.mean(points, axis=0)
diff = np.sum((points - mu) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.mean(diff)
modified_z_score = (0.6745 * diff + 0.0001) / (med_abs_deviation + 0.0001)
return modified_z_score > thresh
@staticmethod
def get_water_army_reviews2(df):
df['creationTime'] = pd.to_datetime(df["creationTime"])
df['referenceTime'] = pd.to_datetime(df["referenceTime"])
diff = df['referenceTime'] - df['creationTime']
df['date_diff'] = diff.apply(lambda x: x.days)
df['ratingDate'] = df['creationTime'].apply(lambda x: x.date())
try:
count_iterable = Counter(df['ratingDate'])
df_counter = pd.DataFrame.from_dict(count_iterable, orient='index').reset_index()
mask = ZYJTemporalAnalysis.is_outlier(df_counter[0])
outlier_dates = set(df_counter['index'][mask])
suspicious_list = df[df['ratingDate'].isin(outlier_dates) & (df['date_diff'] < 30)]['id'].values.tolist()
if len(suspicious_list) > 20:
print(suspicious_list)
return suspicious_list
else:
return list()
except KeyError:
return list()
@staticmethod
def get_water_army_reviews(df):
num_top_date_delta = 5
df['referenceTime'] = pd.to_datetime(df["referenceTime"])
df['creationTime'] = pd.to_datetime(df["creationTime"])
diff = df['referenceTime'] - df['creationTime']
df['date_diff'] = diff.apply(lambda x: x.days)
try:
date_diff_temp = df
count_iterable = Counter(date_diff_temp['date_diff']).items()
date_deltas = [date_delta for (date_delta, count) in count_iterable]
suspicious_delta = set(date_deltas[:num_top_date_delta])
suspicious_list = df[df['date_diff'].isin(suspicious_delta)]['id'].values.tolist()
if len(suspicious_list) > 20:
return suspicious_list
else:
return list()
except KeyError:
return list()
def suspicious(self, df, prodid):
analysis_df = ZYJTemporalAnalysis.get_risks(df, prodid)
suspicious = False
if analysis_df.empty:
return suspicious, analysis_df
suspicious_dates = analysis_df.loc[analysis_df['prob'] > self.threshold]
if (analysis_df.shape[0] > self.num_day_thres) & (suspicious_dates.shape[0] > 0):
suspicious = True
return suspicious, suspicious_dates
def get_temporal_analysis(self):
files = ReviewUtil.get_all_valid_path(self.input_path)
df_total = []
for file in files:
prodid = os.path.basename(str(file)).split(".")[0]
df_raw = | pd.read_csv(file) | pandas.read_csv |
import random
import pandas as pd
import pytest
from evalml.preprocessing.data_splitters import BalancedClassificationSampler
@pytest.mark.parametrize("ratio,samples,percentage,seed",
[(1, 1, 0.2, 1),
(3.3, 101, 0.5, 100)])
def test_balanced_classification_init(ratio, samples, percentage, seed):
bcs = BalancedClassificationSampler(balanced_ratio=ratio, min_samples=samples, min_percentage=percentage, random_seed=seed)
assert bcs.balanced_ratio == ratio
assert bcs.min_samples == samples
assert bcs.min_percentage == percentage
assert bcs.random_seed == seed
def test_balanced_classification_errors():
with pytest.raises(ValueError, match="balanced_ratio must be"):
BalancedClassificationSampler(balanced_ratio=-1)
with pytest.raises(ValueError, match="min_sample must be"):
BalancedClassificationSampler(min_samples=0)
with pytest.raises(ValueError, match="min_percentage must be"):
BalancedClassificationSampler(min_percentage=0)
with pytest.raises(ValueError, match="min_percentage must be"):
BalancedClassificationSampler(min_percentage=0.6)
with pytest.raises(ValueError, match="min_percentage must be"):
BalancedClassificationSampler(min_percentage=-1.3)
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_balanced_simple(num_classes):
X = pd.DataFrame({"a": [i for i in range(1000)]})
y = pd.Series([i % num_classes for i in range(1000)])
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
def test_classification_severely_imbalanced_binary_simple():
X = pd.DataFrame({"a": [i for i in range(1000)]})
# 5 instances of positive 1
y = pd.Series([1 if i % 200 != 0 else 0 for i in range(1000)])
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
def test_classification_severely_imbalanced_multiclass_simple():
X = pd.DataFrame({"a": [i for i in range(1000)]})
# 9 instances of 1, 9 instances of 2
y = pd.Series([0 if i % 55 != 0 else (1 + i % 2) for i in range(1000)])
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
@pytest.mark.parametrize("balanced_ratio", [1, 2, 3, 4, 5, 10])
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_imbalanced_balanced_ratio(num_classes, balanced_ratio):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if num_classes == 2:
y = pd.Series([0] * 750 + [1] * 250)
else:
y = pd.Series([0] * 600 + [1] * 200 + [2] * 200)
bcs = BalancedClassificationSampler(balanced_ratio=balanced_ratio)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if balanced_ratio >= 3:
# the classes are considered balanced, do nothing
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
else:
# remove some samples
assert len(X2) == {2: (250 * (balanced_ratio + 1)), 3: (200 * (balanced_ratio + 2))}[num_classes]
assert len(y2) == len(X2)
assert y2.value_counts().values[0] == balanced_ratio * {2: 250, 3: 200}[num_classes]
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_imbalanced_min_samples(num_classes, min_samples):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if num_classes == 2:
y = pd.Series([0] * 900 + [1] * 100)
else:
y = pd.Series([0] * 799 + [1] * 101 + [2] * 100)
bcs = BalancedClassificationSampler(balanced_ratio=1, min_samples=min_samples)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if min_samples <= 100:
# balance 1:1 without conflicting with min_samples
assert len(X2) == {2: 200, 3: 300}[num_classes]
assert y2.value_counts().values[0] == 100
else:
# cannot balance 1:1, choosing the min_samples size for the majority class and add minority class(es)
if num_classes == 2:
assert len(X2) == min_samples + 100
assert y2.value_counts().values[0] == min_samples
else:
assert len(X2) == min_samples + 201
assert y2.value_counts().values[0] == min_samples
@pytest.mark.parametrize("min_percentage", [0.01, 0.05, 0.2, 0.3])
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_imbalanced_min_percentage(num_classes, min_percentage):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if num_classes == 2:
y = pd.Series([0] * 950 + [1] * 50)
else:
y = | pd.Series([0] * 820 + [1] * 90 + [2] * 90) | pandas.Series |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
# df_2018 = pd.read_csv("/mnt/nadavrap-students/STS/data/2018_2019.csv")
# df_2016 = pd.read_csv("/mnt/nadavrap-students/STS/data/2016_2017.csv")
# df_2014 = pd.read_csv("/mnt/nadavrap-students/STS/data/2014_2015.csv")
# df_2012 = pd.read_csv("/mnt/nadavrap-students/STS/data/2012_2013.csv")
# df_2010 = pd.read_csv("/mnt/nadavrap-students/STS/data/2010_2011.csv")
#
# print (df_2018.stsrcom.unique())
# print (df_2016.stsrcom.unique())
# print (df_2014.stsrcom.unique())
# print (df_2012.stsrcom.unique())
# print (df_2010.stsrcom.unique())
# print (df_2018.stsrcHospD.unique())
# print (df_2016.stsrcHospD.unique())
# print (df_2014.stsrcHospD.unique())
# print (df_2012.stsrcHospD.unique())
# print (df_2010.stsrcHospD.unique())
# # print (df_2018.columns.tolist())
# df_union = pd.concat([df_2010, df_2012,df_2014,df_2016,df_2018], ignore_index=True)
# print (df_union)
# print (df_union['surgyear'].value_counts())
# for col in df_union.columns:
# print("Column '{}' have :: {} missing values.".format(col,df_union[col].isna().sum()))
# df_union= pd.read_csv("df_union.csv")
# cols_to_remove = []
# samples = len(df_union)
# for col in df_union.columns:
# nan_vals = df_union[col].isna().sum()
# prec_missing_vals = nan_vals / samples
# print("Column '{}' have :: {} missing values. {}%".format(col, df_union[col].isna().sum(), round(prec_missing_vals,3)))
# print (cols_to_remove)
#
# df_union.drop(cols_to_remove, axis=1, inplace=True)
# print("Number of Features : ",len(df_union.columns))
# for col in df_union.columns:
# print("Column '{}' have :: {} missing values.".format(col,df_union[col].isna().sum()))
#
# df_union.to_csv("df union after remove.csv")
# df_2018_ = pd.read_csv("/mnt/nadavrap-students/STS/data/2018_2019.csv")
df_all= pd.read_csv("/tmp/pycharm_project_723/df_union.csv")
print (df_all.reoperation.unique())
print (df_all.stsrcHospD.unique())
print (df_all.stsrcom.unique())
# mask = df_2018_['surgyear'] == 2018
# df_all = df_2018_[mask]
# mask_reop = df_all['reoperation'] == 1
# df_reop = df_all[mask_reop]
# df_op = df_all[~mask_reop]
def create_df_for_bins_hospid(col_mort):
df1 = df_all.groupby(['hospid', 'surgyear'])['hospid'].count().reset_index(name='total')
df2 = df_all.groupby(['hospid', 'surgyear'])['reoperation'].apply(lambda x: (x == 1).sum()).reset_index(
name='Reop')
df3 = df_all.groupby(['hospid', 'surgyear'])['reoperation'].apply(lambda x: (x == 0).sum()).reset_index(
name='FirstOperation')
df_aggr = pd.read_csv("aggregate_csv.csv")
mask_reop = df_all['reoperation'] == 1
df_reop = df_all[mask_reop]
df_op = df_all[~mask_reop]
dfmort = df_all.groupby(['hospid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_all')
dfmortf = df_op.groupby(['hospid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_first')
dfmortr = df_reop.groupby(['hospid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_reop')
df_comp = df_all.groupby(['hospid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_all')
df_compr = df_reop.groupby(['hospid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_reop')
df_compf = df_op.groupby(['hospid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_FirstOperation')
d1 = pd.merge(df1, df3, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d2 = pd.merge(d1, df2, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
df5 = pd.merge(df_aggr, d2, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'],
how='inner') # how='left', on=['HospID','surgyear'])
del df5["Unnamed: 0"]
d3 = pd.merge(df5, dfmort, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d4 = pd.merge(d3, dfmortf, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d5 = pd.merge(d4, dfmortr, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d6 = pd.merge(d5, df_comp, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d7 = pd.merge(d6, df_compf, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d8 = pd.merge(d7, df_compr, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
# df_sum_all_Years_total = pd.merge(d8, df_19, on='HospID', how='outer')
d8.fillna(0, inplace=True)
d8['mort_rate_All'] = (d8['Mortality_all'] / d8['total']) * 100
d8['Mortality_First_rate'] = (d8['Mortality_first'] / d8['FirstOperation']) * 100
d8['Mortality_Reop_rate'] = (d8['Mortality_reop'] / d8['Reop']) * 100
d8['Complics_rate_All'] = (d8['Complics_all'] / d8['total']) * 100
d8['Complics_First_rate'] = (d8['Complics_FirstOperation'] / d8['FirstOperation']) * 100
d8['Complics_Reop_rate'] = (d8['Complics_reop'] / d8['Reop']) * 100
d8.to_csv('hospid_year_allyears.csv')
df_PredMort_all = df_all.groupby(['hospid', 'surgyear'])['predmort'].mean().reset_index(name='PredMort_All_avg')
df_PredMort_op = df_op.groupby(['hospid', 'surgyear'])['predmort'].mean().reset_index(name='PredMort_First_avg')
df_PredMort_reop = df_reop.groupby(['hospid', 'surgyear'])['predmort'].mean().reset_index(
name='PredMort_Reoperation_avg')
df_PredComp_all = df_all.groupby(['hospid', 'surgyear'])['predmm'].mean().reset_index(name='PredComp_All_avg')
df_PredComp_op = df_op.groupby(['hospid', 'surgyear'])['predmm'].mean().reset_index(name='PredComp_First_avg')
df_PredComp_reop = df_reop.groupby(['hospid', 'surgyear'])['predmm'].mean().reset_index(
name='PredComp_Reoperation_avg')
d19 = pd.merge(d8, df_PredMort_all, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d9 = pd.merge(d19, df_PredMort_op, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d10 = pd.merge(d9, df_PredMort_reop, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d14 = pd.merge(d10, df_PredComp_all, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d11 = pd.merge(d14, df_PredComp_op, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d12 = pd.merge(d11, df_PredComp_reop, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d12.fillna(0, inplace=True)
d12['Mort_observe/expected_All'] = (d12['mort_rate_All'] / d12['PredMort_All_avg'])
d12['Mort_observe/expected_First'] = (d12['Mortality_First_rate'] / d12['PredMort_First_avg'])
d12['Mort_observe/expected_Reop'] = (d12['Mortality_Reop_rate'] / d12['PredMort_Reoperation_avg'])
d12[['log_All_Mort', 'log_First_Mort', 'log_Reoperation_Mort']] = np.log2(
d12[['Mort_observe/expected_All', 'Mort_observe/expected_First', 'Mort_observe/expected_Reop']].replace(0,
np.nan))
d12.fillna(0, inplace=True)
d12['Comp_observe/expected_All'] = (d12['Complics_rate_All'] / d12['PredComp_All_avg'])
d12['Comp_observe/expected_First'] = (d12['Complics_First_rate'] / d12['PredComp_First_avg'])
d12['Comp_observe/expected_Reop'] = (d12['Complics_Reop_rate'] / d12['PredComp_Reoperation_avg'])
d12[['log_All_Comp', 'log_First_Comp', 'log_Reoperation_Comp']] = np.log2(
d12[['Comp_observe/expected_All', 'Comp_observe/expected_First', 'Comp_observe/expected_Reop']].replace(0,
np.nan))
d12.fillna(0, inplace=True)
d12.to_csv("hospid_allyears_expec_hospid_stsrcHospD.csv")
print(d12.info())
print(d12.columns.tolist())
#create_df_for_bins_hospid('stsrcHospD')
def create_df_for_bins_surgid(col_mort):
df1 = df_all.groupby(['surgid', 'surgyear'])['surgid'].count().reset_index(name='total')
df2 = df_all.groupby(['surgid', 'surgyear'])['reoperation'].apply(lambda x: (x == 1).sum()).reset_index(
name='Reop')
df3 = df_all.groupby(['surgid', 'surgyear'])['reoperation'].apply(lambda x: (x == 0).sum()).reset_index(
name='FirstOperation')
df_aggr = pd.read_csv("/tmp/pycharm_project_723/aggregate_surgid_csv.csv")
mask_reop = df_all['reoperation'] == 1
df_reop = df_all[mask_reop]
df_op = df_all[~mask_reop]
dfmort = df_all.groupby(['surgid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_all')
dfmortf = df_op.groupby(['surgid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_first')
dfmortr = df_reop.groupby(['surgid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_reop')
df_comp = df_all.groupby(['surgid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_all')
df_compr = df_reop.groupby(['surgid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_reop')
df_compf = df_op.groupby(['surgid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_FirstOperation')
d1 = pd.merge(df1, df3, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d2 = pd.merge(d1, df2, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
df5 = pd.merge(df_aggr, d2, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'],how='inner')
# del df5["Unnamed: 0"]
d3 = pd.merge(df5, dfmort, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d4 = pd.merge(d3, dfmortf, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d5 = pd.merge(d4, dfmortr, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d6 = pd.merge(d5, df_comp, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d7 = pd.merge(d6, df_compf, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d8 = pd.merge(d7, df_compr, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
# df_sum_all_Years_total = pd.merge(d8, df_19, on='HospID', how='outer')
d8.fillna(0, inplace=True)
d8['mort_rate_All'] = (d8['Mortality_all'] / d8['total']) * 100
d8['Mortality_First_rate'] = (d8['Mortality_first'] / d8['FirstOperation']) * 100
d8['Mortality_Reop_rate'] = (d8['Mortality_reop'] / d8['Reop']) * 100
d8['Complics_rate_All'] = (d8['Complics_all'] / d8['total']) * 100
d8['Complics_First_rate'] = (d8['Complics_FirstOperation'] / d8['FirstOperation']) * 100
d8['Complics_Reop_rate'] = (d8['Complics_reop'] / d8['Reop']) * 100
d8.to_csv('surgid_year_allyears.csv')
df_PredMort_all = df_all.groupby(['surgid', 'surgyear'])['predmort'].mean().reset_index(name='PredMort_All_avg')
df_PredMort_op = df_op.groupby(['surgid', 'surgyear'])['predmort'].mean().reset_index(name='PredMort_First_avg')
df_PredMort_reop = df_reop.groupby(['surgid', 'surgyear'])['predmort'].mean().reset_index(
name='PredMort_Reoperation_avg')
df_PredComp_all = df_all.groupby(['surgid', 'surgyear'])['predmm'].mean().reset_index(name='PredComp_All_avg')
df_PredComp_op = df_op.groupby(['surgid', 'surgyear'])['predmm'].mean().reset_index(name='PredComp_First_avg')
df_PredComp_reop = df_reop.groupby(['surgid', 'surgyear'])['predmm'].mean().reset_index(
name='PredComp_Reoperation_avg')
d19 = pd.merge(d8, df_PredMort_all, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d9 = pd.merge(d19, df_PredMort_op, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d10 = pd.merge(d9, df_PredMort_reop, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d14 = pd.merge(d10, df_PredComp_all, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d11 = | pd.merge(d14, df_PredComp_op, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 16 18:03:13 2017
@author: lfiorito
"""
import pdb
import os
import logging
from collections import Counter
from functools import reduce
import numpy as np
import pandas as pd
from sandy.formats.records import read_cont
from sandy.formats import (mf1,
mf3,
mf4,
mf5,
mf8,
mf33,
mf34,
mf35,
)
from sandy.formats.utils import (
Xs,
Lpc,
Fy,
XsCov,
EdistrCov,
LpcCov,
triu_matrix,
corr2cov,
)
from sandy.settings import SandyError
from sandy.functions import find_nearest
__author__ = "<NAME>"
__all__ = ["Endf6", "Errorr", "Gendf"]
#def split_endf(text):
# """
# Read ENDF-6 formatted file and split it into columns based on field widths:
# C1 C2 L1 L2 N1 N2 MAT MF MT
# 11 11 11 11 11 11 4 2 3.
# Store list in dataframe.
# """
# from io import StringIO
# def read_float(x):
# try:
# return float(x[0] + x[1:].replace('+', 'E+').replace('-', 'E-'))
# except:
# return x
# widths = [11,11,11,11,11,11,4,2,3]
# columns = ["C1", "C2", "L1", "L2", "N1", "N2","MAT", "MF", "MT"]
# converters = dict(zip(columns[:6],[read_float]*6))
# frame = pd.read_fwf(StringIO(text), widths=widths, names=columns, converters=converters)
# return frame.query("MAT>0 & MF>0 & MT>0")
#
#
class _BaseFile(pd.DataFrame):
"""This class is to be inherited by all classes that parse and analyze
nuclear data evaluated files in ENDF-6 or derived (ERRORR) formats.
**Index**:
- MAT : (`int`) MAT number to identify the isotope
- MF : (`int`) MF number to identify the data type
- MT : (`int`) MT number to identify the reaction
**Columns**:
- TEXT : (`string`) MAT/MF/MT section reported as a single string
Attributes
----------
labels : `list` of `str`
index labels MAT, MT and MT
Methods
-------
add_sections
Collapse two tapes into a single one
delete_sections
Delete sections from the dataframe
filter_by
Filter dataframe based on MAT, MF, MT lists
from_file
Create dataframe by reading a endf6 file
from_text
Create dataframe from endf6 text in string
Raises
------
`SandyError`
if the tape is empty
`SandyError`
if the same combination MAT/MF/MT is found more than once
"""
labels = ['MAT', 'MF', 'MT']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.empty:
raise SandyError("tape is empty")
self.index.names = self.labels
self.columns = ["TEXT"]
self.sort_index(level=self.labels, inplace=True)
if self.index.duplicated().any():
raise SandyError("found duplicate MAT/MF/MT")
@classmethod
def from_file(cls, file):
"""Create dataframe by reading a file.
Parameters
----------
file : `str`
file name
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Dataframe containing ENDF6 data grouped by MAT/MF/MT
"""
with open(file) as f:
text = f.read()
return cls.from_text(text)
@classmethod
def from_text(cls, text):
"""Create dataframe from endf6 text in string.
Parameters
----------
text : `str`
string containing the evaluated data
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Dataframe containing ENDF6 data grouped by MAT/MF/MT
"""
from io import StringIO
tape = pd.read_fwf(
StringIO(text),
widths = [66, 4, 2, 3],
names = ["TEXT", "MAT", "MF", "MT"],
converters = {"MAT" : np.int, "MF" : np.int, "MT" : np.int},
usecols = cls.labels
)
tape["TEXT"] = text.splitlines(True)
tape = tape.loc[(tape.MAT>0) & (tape.MF>0) & (tape.MT>0)]. \
groupby(cls.labels). \
apply(lambda x: "".join(x.TEXT.values)). \
to_frame()
return cls(tape)
def add_sections(self, tape):
"""Collapse two tapes into a single one.
If MAT/MF/MT index is present in both tapes, take it from the second.
Parameters
----------
tape : `sandy.formats.endf6.BaseFile` or derived instance
dataframe for ENDF-6 formatted file
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
dataframe with merged content
"""
outdf = pd.concat([pd.DataFrame(self), tape]). \
reset_index(). \
drop_duplicates(self.labels, keep='last'). \
set_index(self.labels)
return self.__class__(outdf)
def delete_sections(self, *tuples):
"""Given a sequence of tuples (MAT,MF,MT), delete the corresponding sections
from the dataframe.
Parameters
----------
tuples : sequence of `tuple`
each tuple should have the format (MAT, MF, MT)
To delete, say, a given MF independentently from the MAT and MT, assign `None`
to the MAT and MT position in the tuple.
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
dataframe without given sections
"""
queries = []
for mat,mf,mt in tuples:
conditions = []
if mat is not None:
conditions.append("MAT == {}".format(mat))
if mf is not None:
conditions.append("MF == {}".format(mf))
if mt is not None:
conditions.append("MT == {}".format(mt))
if not conditions:
continue
queries.append("not (" + " & ".join(conditions) + ")")
if not queries:
logging.warn("given MAT/MF/MT sections were not found")
return self
else:
query = " & ".join(queries)
newdf = self.query(query)
return self.__class__(newdf)
def filter_by(self, listmat=None, listmf=None, listmt=None):
"""Filter dataframe based on MAT, MF, MT lists.
Parameters
----------
listmat : `list` or `None`
list of requested MAT values (default is `None`: use all MAT)
listmf : `list` or `None`
list of requested MF values (default is `None`: use all MF)
listmt : `list` or `None`
list of requested MT values (default is `None`: use all MT)
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Copy of the original instance with filtered MAT, MF and MT sections
"""
_listmat = range(1,10000) if listmat is None else listmat
_listmf = range(1,10000) if listmf is None else listmf
_listmt = range(1,10000) if listmt is None else listmt
cond_mat = self.index.get_level_values("MAT").isin(_listmat)
cond_mf = self.index.get_level_values("MF").isin(_listmf)
cond_mt = self.index.get_level_values("MT").isin(_listmt)
df = self.loc[cond_mat & cond_mf & cond_mt]
return self.__class__(df)
@property
def mat(self):
return sorted(self.index.get_level_values("MAT").unique())
@property
def mf(self):
return sorted(self.index.get_level_values("MF").unique())
@property
def mt(self):
return sorted(self.index.get_level_values("MT").unique())
def get_file_format(self):
"""Determine ENDF-6 format type by reading flags "NLIB" and "LRP" of first MAT in file:
* `NLIB = -11 | NLIB = -12` : errorr
* `NLIB = -1` : gendf
* `LRP = 2` : pendf
* `LRP != 2` : endf6
Returns
-------
`str`
type of ENDF-6 format
"""
lines = self.TEXT.loc[self.mat[0], 1, 451].splitlines()
C, i = read_cont(lines, 0)
if C.N1 == -11 or C.N1 == -12:
ftype = "errorr"
elif C.N1 == -1:
ftype = "gendf"
else:
if C.L1 == 2:
ftype = "pendf"
else:
ftype = "endf6"
return ftype
class Endf6(_BaseFile):
"""Class to contain the content of ENDF-6 files, grouped by MAT/MF/MT.
**Index**:
- MAT : (`int`) MAT number to identify the isotope
- MF : (`int`) MF number to identify the data type
- MT : (`int`) MT number to identify the reaction
**Columns**:
- TEXT : (`string`) MAT/MF/MT section reported as a single string
Methods
-------
"""
def get_nsub(self):
"""Determine ENDF-6 sub-library type by reading flag "NSUB" of first MAT in file:
* `NSUB = 10` : Incident-Neutron Data
* `NSUB = 11` : Neutron-Induced Fission Product Yields
Returns
-------
`int`
NSUB value
"""
return self.read_section(self.mat[0], 1, 451)["NSUB"]
def read_section(self, mat, mf, mt):
"""Parse MAT/MF/MT section.
"""
if mf == 1:
foo = mf1.read
elif mf == 3:
foo = mf3.read
elif mf == 4:
foo = mf4.read
elif mf == 5:
foo = mf5.read
elif mf == 8:
foo = mf8.read
elif mf == 33 or mf == 31:
foo = mf33.read
elif mf == 34:
foo = mf34.read
elif mf == 35:
foo = mf35.read
else:
raise SandyError("SANDY cannot parse section MAT{}/MF{}/MT{}".format(mat,mf,mt))
if (mat,mf,mt) not in self.index:
raise SandyError("section MAT{}/MF{}/MT{} is not in tape".format(mat,mf,mt))
return foo(self.loc[mat,mf,mt].TEXT)
def write_string(self, title=" "*66, skip_title=False, skip_fend=False):
"""Collect all rows in `Endf6` and write them into string.
Parameters
----------
title : `str`
title of the file
skip_title : `bool`
do not write the title
skip_fend : `bool`
do not write the last FEND line
Returns
-------
`str`
"""
from .records import write_cont
tape = self.copy()
string = ""
if not skip_title:
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(title, 1, 0, 0, 0)
for mat,dfmat in tape.groupby('MAT', sort=True):
for mf,dfmf in dfmat.groupby('MF', sort=True):
for mt,dfmt in dfmf.groupby('MT', sort=True):
for text in dfmt.TEXT:
string += text.encode('ascii', 'replace').decode('ascii')
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(*write_cont(*[0]*6), int(mat), int(mf), 0, 99999)
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(*write_cont(*[0]*6), int(mat), 0, 0, 0)
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(*write_cont(*[0]*6), 0, 0, 0, 0)
if not skip_fend:
string += "{:<66}{:4}{:2}{:3}{:5}".format(*write_cont(*[0]*6), -1, 0, 0, 0)
return string
def get_xs(self, listmat=None, listmt=None):
""" Extract selected cross sections (xs).
xs are linearized on unique grid.
Missing points are linearly interpolated (use zero when out of domain).
Conditions:
- Interpolation law must be lin-lin
- No duplicate points on energy grid
"""
condition = self.index.get_level_values("MF") == 3
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
ListXs = []
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
xs = pd.Series(X["XS"], index=X["E"], name=(X["MAT"],X["MT"])).rename_axis("E").to_frame()
duplicates = [x for x, count in Counter(xs.index).items() if count > 1]
if duplicates:
raise SandyError('duplicate energy points found for MAT{}/MF{}/MT{}\n'.format(*ix) +
'\n'.join(map(str,duplicates)))
if X['INT'] != [2]:
raise SandyError('MAT{}/MF{}/MT{} interpolation scheme is not lin-lin'.format(*ix))
ListXs.append(xs)
if not ListXs:
logging.warn("requested cross sections were not found")
return pd.DataFrame()
frame = reduce(lambda left,right : pd.merge(left, right, left_index=True, right_index=True, how='outer'), ListXs).sort_index().interpolate(method='slinear', axis=0).fillna(0)
return Xs(frame)
def update_xs(self, xsFrame):
from .mf3 import write
tape = self.copy()
mf = 3
for (mat,mt),xsSeries in xsFrame.iteritems():
if (mat,mf,mt) not in self.index: continue
sec = self.read_section(mat,mf,mt)
# Cut threshold xs
ethresh = sec["E"][0]
xsSeries = xsSeries.where(xsSeries.index >= ethresh).dropna()
# iNotZero = next((i for i,x in enumerate(xsSeries) if x), None)
# if iNotZero > 0: xsSeries = xsSeries.iloc[iNotZero-1:]
sec["E"] = xsSeries.index.values
sec["XS"] = xsSeries.values
# Assume all xs have only 1 interpolation region and it is linear
sec["NBT"] = [xsSeries.size]
sec["INT"] = [2]
text = write(sec)
tape.loc[mat,mf,mt].TEXT = text
return Endf6(tape)
def get_nubar(self, listmat=None, listmt=None):
"""
Extract selected nubar.
nubar are linearized on unique grid.
Missing points are linearly interpolated (use zero when out of domain).
Conditions:
- Interpolation law must be lin-lin
- No duplicate points on energy grid
"""
condition = self.index.get_level_values("MF") == 1
tape = self[condition]
conditions = [tape.index.get_level_values("MT") == x for x in [452, 455, 456]]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
# query = "MF==1 & (MT==452 | MT==455 | MT==456)"
# if listmat is not None:
# query_mats = " | ".join(["MAT=={}".format(x) for x in listmat])
# query += " & ({})".format(query_mats)
# if listmt is not None:
# query_mts = " | ".join(["MT=={}".format(x) for x in listmt])
# query += " & ({})".format(query_mts)
# tape = self.query(query)
ListXs = []
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
xs = pd.Series(X["NUBAR"], index=X["E"], name=(X["MAT"],X["MT"])).rename_axis("E").to_frame()
duplicates = [x for x, count in Counter(xs.index).items() if count > 1]
if duplicates:
raise SandyError('duplicate energy points found for MAT{}/MF{}/MT{}\n'.format(*ix) +
'\n'.join(map(str,duplicates)))
if X['INT'] != [2]:
raise SandyError('MAT{}/MF{}/MT{} interpolation scheme is not lin-lin'.format(*ix))
ListXs.append(xs)
if not ListXs:
logging.warn("no fission neutron multiplicity was found")
return pd.DataFrame()
frame = reduce(lambda left,right : pd.merge(left, right, left_index=True, right_index=True, how='outer'), ListXs).sort_index().interpolate(method='slinear', axis=0).fillna(0)
return Xs(frame)
def update_nubar(self, xsFrame):
from .mf1 import write
tape = self.copy()
mf = 1
for (mat,mt),S in xsFrame.iteritems():
if (mat,mf,mt) not in self.index: continue
sec = self.read_section(mat,mf,mt)
# Cut threshold xs
iNotZero = next((i for i,x in enumerate(S) if x), None)
if iNotZero > 0: S = S.iloc[iNotZero-1:]
sec["E"] = S.index.values
sec["NUBAR"] = S.values
# Assume all xs have only 1 interpolation region and it is linear
sec["NBT"] = [S.size]
sec["INT"] = [2]
text = write(sec)
tape.loc[mat,mf,mt].TEXT = text
return Endf6(tape)
def update_edistr(self, edistrFrame):
from .mf5 import write
mf = 5
tape = self.copy()
for (mat,mt),S in edistrFrame.groupby(["MAT","MT"]):
if (mat,mf,mt) not in self.index: continue
sec = self.read_section(mat,mf,mt)
for k,S in S.groupby(["K"]):
if sec["PDISTR"][k]["LF"] != 1: continue
ein_orig = sorted(sec["PDISTR"][k]["EIN"].keys())
for ein in S.index.get_level_values("EIN"):
edistr = S.loc[mat,mt,k,ein].values
eout = S.loc[mat,mt,k,ein].index.values
ein_found = find_nearest(ein_orig, ein)[1]
mask = np.in1d(eout, sec["PDISTR"][k]["EIN"][ein_found]["EOUT"])
edistr = edistr[mask]
eout = eout[mask]
dict_distr = {"EDISTR" : edistr,
"EOUT" : eout,
"NBT" : [len(eout)],
"INT" : [2]}
sec["PDISTR"][k]["EIN"].update({ein : dict_distr})
sec["PDISTR"][k]["NBT_EIN"] = [len(sec["PDISTR"][k]["EIN"])]
sec["PDISTR"][k]["INT_EIN"] = [2]
text = write(sec)
tape.loc[mat,mf,mt].TEXT = text
return Endf6(tape)
def get_edistr_cov(self, listmat=None, listmt=None):
condition = self.index.get_level_values("MF") == 35
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
# query = "MF==35"
# if listmat is not None:
# query_mats = " | ".join(["MAT=={}".format(x) for x in listmat])
# query += " & ({})".format(query_mats)
# if listmt is not None:
# query_mts = " | ".join(["MT=={}".format(x) for x in listmt])
# query += " & ({})".format(query_mts)
# tape = self.query(query)
List = []; eg = set()
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
mat = X['MAT']; mt = X['MT']
for sub in X["SUB"].values():
# Ek grid is one unit longer than covariance.
Ek = np.array(sub["EK"])
Fkk = np.array(sub["FKK"])
NE = sub["NE"]
cov = triu_matrix(Fkk, NE-1)
# Normalize covariance matrix dividing by the energy bin.
dE = 1./(Ek[1:]-Ek[:-1])
cov = corr2cov(cov, dE)
# Add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
cov = pd.DataFrame(cov, index=Ek, columns=Ek)
eg |= set(cov.index.values)
List.append([mat, mt, sub["ELO"], sub["EHI"], cov])
if not List:
logging.warn("no energy distribution covariance found")
return pd.DataFrame()
frame = pd.DataFrame(List, columns=('MAT', 'MT', 'ELO', 'EHI', 'COV'))
eg = sorted(eg)
frame.COV = frame.COV.apply(lambda x:cov_interp(x, eg))
# From here, the method is identical to Errorr.get_cov()
# Except that the size of eg is equal to the size of each matrix (we include the value for 2e7)
# and that the indexes are different
MI = [(mat,mt,elo,ehi,e) for mat,mt,elo,ehi in sorted(set(zip(frame.MAT, frame.MT, frame.ELO, frame.EHI))) for e in eg]
index = pd.MultiIndex.from_tuples(MI, names=("MAT", "MT", 'ELO', 'EHI', "EOUT"))
# initialize union matrix
matrix = np.zeros((len(index),len(index)))
for i,row in frame.iterrows():
ix = index.get_loc((row.MAT,row.MT,row.ELO,row.EHI))
ix1 = index.get_loc((row.MAT,row.MT,row.ELO,row.EHI))
matrix[ix.start:ix.stop,ix1.start:ix1.stop] = row.COV
i_lower = np.tril_indices(len(index), -1)
matrix[i_lower] = matrix.T[i_lower] # make the matrix symmetric
return EdistrCov(matrix, index=index, columns=index)
def get_lpc(self, listmat=None, listmt=None, verbose=True):
condition = self.index.get_level_values("MF") == 4
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
DictLpc = {}
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
if "LPC" not in X: continue
if X["LPC"]["INT"] != [2]:
if verbose:
logging.warn("found non-linlin interpolation, skip angular distr. for MAT{}/MF{}/MT{}".format(*ix))
continue
for e,v in X["LPC"]["E"].items():
DictLpc.update({(X["MAT"], X["MT"],e) : pd.Series([1]+v["COEFF"])})
if not DictLpc:
logging.warn("no angular distribution in Legendre expansion was found")
return | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import sklearn
import tensorflow as tf
import os
import datetime
from tensorflow.keras import regularizers
x = pd.read_csv('train.csv')
x_test = pd.read_csv('test.csv')
y_a = x['breed_category'].astype(int)
y_b = x['pet_category']
y_a = y_a.to_frame()
y_b = y_b.to_frame()
x = x.drop('pet_category',axis=1)
x = x.drop('breed_category',axis=1)
x = x.drop('pet_id',axis=1)
pet_id = x_test['pet_id']
x_test = x_test.drop('pet_id', axis=1)
# this line converts the string object in Timestamp object
x['issue_date'] = [datetime.datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in x["issue_date"]]
x_test['issue_date'] = [datetime.datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in x_test["issue_date"]]
# extracting date from timestamp
x['issue_dates'] = [datetime.datetime.date(d) for d in x['issue_date']]
x_test['issue_dates'] = [datetime.datetime.date(d) for d in x_test['issue_date']]
x = x.drop("issue_date",axis=1)
x_test = x_test.drop("issue_date", axis=1)
x['listing_date'] = [datetime.datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in x["listing_date"]]
x['listing_hour'] = [d.hour for d in x['listing_date']]
x['listing_minute'] = [d.minute for d in x['listing_date']]
x['listing_dates'] = [datetime.datetime.date(d) for d in x['listing_date']]
x = x.drop("listing_date",axis=1)
x['Difference_dates'] = x['listing_dates'].sub(x['issue_dates'], axis=0)
x['Difference_dates'] = x['Difference_dates'] / np.timedelta64(1, 'D')
x_test['listing_date'] = [datetime.datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in x_test["listing_date"]]
x_test['listing_hour'] = [d.hour for d in x_test['listing_date']]
x_test['listing_minute'] = [d.minute for d in x_test['listing_date']]
x_test['listing_dates'] = [datetime.datetime.date(d) for d in x_test['listing_date']]
x_test = x_test.drop("listing_date", axis=1)
x_test['Difference_dates'] = x_test['listing_dates'].sub(x_test['issue_dates'], axis=0)
x_test['Difference_dates'] = x_test['Difference_dates'] / np.timedelta64(1, 'D')
x['listing_dates'] = pd.to_datetime(x['listing_dates'])
x['issue_dates'] = pd.to_datetime(x['issue_dates'])
x_test['listing_dates'] = | pd.to_datetime(x_test['listing_dates']) | pandas.to_datetime |
import pandas as pd
import datetime as datetime
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.nonparametric.smoothers_lowess import lowess
"""
cgmquantify package
Description:
The cgmquantify package is a comprehensive library for computing metrics from continuous glucose monitors.
Requirements:
pandas, datetime, numpy, matplotlib, statsmodels
Functions:
importdexcom(): Imports data from Dexcom continuous glucose monitor devices
interdaycv(): Computes and returns the interday coefficient of variation of glucose
interdaysd(): Computes and returns the interday standard deviation of glucose
intradaycv(): Computes and returns the intraday coefficient of variation of glucose
intradaysd(): Computes and returns the intraday standard deviation of glucose
TIR(): Computes and returns the time in range
TOR(): Computes and returns the time outside range
PIR(): Computes and returns the percent time in range
POR(): Computes and returns the percent time outside range
MGE(): Computes and returns the mean of glucose outside specified range
MGN(): Computes and returns the mean of glucose inside specified range
MAGE(): Computes and returns the mean amplitude of glucose excursions
J_index(): Computes and returns the J-index
LBGI(): Computes and returns the low blood glucose index
HBGI(): Computes and returns the high blood glucose index
ADRR(): Computes and returns the average daily risk range, an assessment of total daily glucose variations within risk space
MODD(): Computes and returns the mean of daily differences. Examines mean of value + value 24 hours before
CONGA24(): Computes and returns the continuous overall net glycemic action over 24 hours
GMI(): Computes and returns the glucose management index
eA1c(): Computes and returns the American Diabetes Association estimated HbA1c
summary(): Computes and returns glucose summary metrics, including interday mean glucose, interday median glucose, interday minimum glucose, interday maximum glucose, interday first quartile glucose, and interday third quartile glucose
plotglucosesd(): Plots glucose with specified standard deviation lines
plotglucosebounds(): Plots glucose with user-defined boundaries
plotglucosesmooth(): Plots smoothed glucose plot (with LOWESS smoothing)
"""
def importdexcom(filename):
"""
Imports data from Dexcom continuous glucose monitor devices
Args:
filename (String): path to file
Returns:
(pd.DataFrame): dataframe of data with Time, Glucose, and Day columns
"""
data = pd.read_csv(filename)
df = | pd.DataFrame() | pandas.DataFrame |
import logging
import sys
from os import environ as env
from time import time
from gitlabdata.orchestration_utils import (
dataframe_uploader,
dataframe_enricher,
snowflake_engine_factory,
)
import pandas as pd
from sqlalchemy.engine.base import Engine
def single_query_upload(query: str, table_name: str) -> pd.DataFrame:
"""
Takes a single query and uploads to raw.snowflake
"""
snowflake_engine_sysadmin = snowflake_engine_factory(config_dict, "SYSADMIN")
connection = snowflake_engine_sysadmin.connect()
results = pd.read_sql(sql=query, con=connection)
connection.close()
snowflake_engine_sysadmin.dispose()
snowflake_engine_loader = snowflake_engine_factory(config_dict, "LOADER")
dataframe_uploader(results, snowflake_engine_loader, table_name, "snowflake")
snowflake_engine_loader.dispose()
return results
def iterative_query_upload(
dataframe: pd.DataFrame, column: str, base_query: str, table_name: str
) -> None:
"""
Takes a pandas dataframe, iterates on a given column, builds a final result set,
and uploads to raw.snowflake.
"""
snowflake_engine_sysadmin = snowflake_engine_factory(config_dict, "SYSADMIN")
connection = snowflake_engine_sysadmin.connect()
results_all = []
for index, row in dataframe.iterrows():
ref_column = row[column]
query = f"{base_query} {ref_column};"
results = | pd.read_sql(sql=query, con=connection) | pandas.read_sql |
import mysql.connector
import base64
from datetime import date, timedelta, timezone
import pandas as pd
import requests
from datetime import datetime
from data_management import settings as config
from urllib.parse import urlencode
import data_processing_app.models as models
from django.db.models.functions import ExtractWeek, ExtractYear
from django.db.models import Sum
import data_processing_app.toggl_data_processing as data_processing
from plotly.offline import plot
from plotly.graph_objs import Scatter
import plotly.graph_objs as go
def connect_to_toggl(api_token):
"""Connect to toggl and get response containing information to the
:param api_token: Token for you user profile, you can find the token at
Toggl.com at the end of the profile settings page
"""
string = api_token + ':api_token'
headers = {
'Authorization': 'Basic ' + base64.b64encode(string.encode('ascii')).decode("utf-8")}
url = 'https://www.toggl.com/api/v8/me'
response = requests.get(url, headers=headers)
response = response.json()
email = response['data']['email']
workspaces = [{'name': item['name'], 'id': item['id']} for item in response['data']['workspaces'] if
item['admin'] == True]
return email, workspaces, headers
def get_all_clients_and_projects(my_workspace, headers):
'''Gets all clients and projects for your workspace id'''
url = 'https://www.toggl.com/api/v8/workspaces/' + str(my_workspace) + '/clients'
clients = requests.get(url, headers=headers).json()
url = 'https://www.toggl.com/api/v8/workspaces/' + str(my_workspace) + '/projects'
projects = requests.get(url, headers=headers).json()
return clients, projects
def get_all_time_entries(headers, start_date, end_date):
'''Finds all time entries in the time frame [start_date - end_date]'''
start_date = datetime.combine(start_date, datetime.min.time())
end_date = datetime.combine(end_date, datetime.min.time())
start_date = start_date.replace(tzinfo=timezone.utc).isoformat()
end_date = end_date.replace(tzinfo=timezone.utc).isoformat()
url = 'https://api.track.toggl.com/api/v8/time_entries?'
params = {'start_date': start_date, 'end_date': end_date}
url = url + '{}'.format(urlencode(params))
time_entries = requests.get(url, headers=headers).json()
return time_entries
def data_processing(clients,projects,time_entries):
'''Join clients, projects and time entries to a data frame with all time entries
and the corresponding information to clients and projects'''
projects_filtered = [{'pid': item['id'],
'cid': item['cid'],
'project_name': item['name']} for item in projects]
clients_filtered = [{'cid': item['id'],
'client_name': item['name']} for item in clients]
projects_df = pd.DataFrame(data=projects_filtered)
clients_df = pd.DataFrame(data=clients_filtered)
time_entries_df = pd.DataFrame(data=time_entries)
# join_projects_clients = projects_df.set_index('cid').join(clients_df.set_index('cid'))
# time_entries_extended_df = time_entries_df.set_index('pid').join(join_projects_clients.set_index('pid'))
return projects_df, clients_df, time_entries_df
def define_working_days_table(start_date, end_date):
"""
:return: Returns a data frame with all days in the defined time frame (start_date - end_date)
The data frame has two columns: days and type
:Days: contains all dates in the time frame
:Type: the information if the day is a
- working day (WD)
- vacation day (paid time off - PTO)
- public holiday (PH)
- weekend (WE) - saturday and sunday
"""
#retrive objects from public_holidays tables an write them in a list
public_holidays = [item.days for item in models.public_holidays.objects.all()]
vacation_days = [item.days for item in models.vacation_days.objects.all()]
all_days = []
number_of_days = end_date - start_date
for n in range(number_of_days.days):
day = start_date + timedelta(n)
all_days.append({'days': day, 'type': "WD"})
workdays_index = [0, 1, 2, 3, 4]
all_days_we = []
for item in all_days:
if date.weekday(item['days']) in workdays_index:
all_days_we.append({'days': item['days'], 'type': item['type']})
else:
all_days_we.append({'days': item['days'], 'type': "WE"})
all_days_we_ph = []
for item in all_days_we:
if item['days'] in public_holidays:
all_days_we_ph.append({'days': item['days'], 'type': "PH"})
else:
all_days_we_ph.append({'days': item['days'], 'type': item['type']})
all_days_we_ph_pto = []
for item in all_days_we_ph:
if item['days'] in vacation_days:
all_days_we_ph_pto.append({'days': item['days'], 'type': "PTO"})
else:
all_days_we_ph_pto.append({'days': item['days'], 'type': item['type']})
print(f"Number of days between start and end date: {len(all_days_we_ph_pto)}")
print(f"Number of weekend days between start and end date: {len([1 for item in all_days_we_ph_pto if item['type'] == 'WE'])}")
print(f"Number of public holidays between start and end date (minus public holidays): {len([1 for item in all_days_we_ph_pto if item['type'] == 'PH'])}")
print(f"Number of vacation days between start and end date (minus public holidays and vacation days): {len([1 for item in all_days_we_ph_pto if item['type'] == 'PTO'])}")
working_days = []
for item in all_days_we_ph_pto:
if item['type'] == "WD":
working_days.append({'days': item['days'], 'type': item['type'], 'working_hours': config.target_hours_per_day})
else:
working_days.append({'days': item['days'], 'type': item['type'], 'working_hours': 0})
working_days_df = | pd.DataFrame(data=working_days) | pandas.DataFrame |
from typing import Union, List, Optional
import numpy as np
from pandas import DataFrame as df
import matplotlib.pyplot as pl
from VTree.timeseries.Timeseries import Timeseries
from VTree.vtree.Node import Node
from VTree.timeseries.Sample import Sample
def matprint(mat, fmt="g"):
col_maxes = [max([len(("{:" + fmt + "}").format(x)) for x in col]) for col in mat.T]
for x in mat:
for i, y in enumerate(x):
print(("{:" + str(col_maxes[i]) + fmt + "}").format(y), end=" ")
print("")
class VTree:
def __init__(self, t_w: float, k: int, m: int
, time: List[np.ndarray] = None
, flux: List[np.ndarray] = None
, label: List['str'] = None
, tic_id: List[int] = None):
self.t_w = t_w
self.k = k
self.m = m
self.master_node : Node = None
if time is not None and flux is not None:
self._sample = Sample(time=time,flux=flux,label=label,tic_id=tic_id,t_w=t_w)
else:
self._sample: Sample = None
def create_training_sample(self, time: List[np.ndarray]
, flux: List[np.ndarray]
, label: List['str']
, tic_id: List[int]):
self._sample = Sample(time=time, flux=flux, label=label, tic_id=tic_id, t_w=self.t_w)
def train(self,t_s:float):
if self._sample is None:
raise ValueError("Please provide a training set before training.")
self.master_node = Node(0,self.k,self.m,self._sample)
self.master_node.visualize()
self.master_node.populate_tree(t_s)
self.master_node.create_d_matrix()
pass
def query(self,data : Timeseries,t_s):
self.master_node.reset_query_ts_count()
self.master_node.add_query_ts(t_s,data)
q_vector = self.master_node.q_vector()
similarity = self.master_node.w_d_matrix.dot(q_vector)
data = {
'obj':[],
'similarity':[]
}
for i in np.argsort(similarity)[::-1]:
data['obj'].append(self.master_node.sample[i])
data['similarity'].append(similarity[i])
return | df.from_dict(data) | pandas.DataFrame.from_dict |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Bar plots global means
# %% [markdown]
# ## Div imports
#
# load and autoreload
# %%
from IPython import get_ipython
from oas_erf.constants import get_plotpath
from oas_erf.util.naming_conventions.var_info import get_fancy_var_name
from oas_erf.util.practical_functions import make_folders
from oas_erf.util.slice_average import one_val_tab
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
# %%
from oas_erf.data_info.simulation_types import get_abs_by_type
# noinspection PyBroadException
try:
_ipython = get_ipython()
_magic = _ipython.magic
_magic('load_ext autoreload')
_magic('autoreload 2')
except:
pass
# %% [markdown]
# ## Filenames
# %%
from oas_erf.util.plot.colors import get_case_col
version = ''
plt_path = get_plotpath('one_value')
def create_filename(name):
fn = f'{plt_path}_2d_{version}{name}.'
make_folders(fn)
return fn
# %% [markdown]
# ## Div settings
# %%
startyear = '0004-01'
endyear = '0008-12'
pmin = 850. # minimum pressure level
avg_over_lev = True # True#True#False#True
pressure_adjust = True # Can only be false if avg_over_lev false. Plots particular hybrid sigma lev
# %%
varl = ['N_AER', 'NCONC01', 'TGCLDCWP', 'CDNUMC', 'NCFT_Ghan', 'DIR_Ghan', 'LWDIR_Ghan', 'SWDIR_Ghan', 'SWCF_Ghan',
'LWCF_Ghan', 'cb_SO4_NA', 'cb_SOA_NA', 'cb_NA', 'SOA_NA', 'SO4_NA',
'ACTNL_incld', 'ACTREL_incld', 'SFisoprene', 'SFmonoterp']
varl_ex = ['FSNT', 'FSNT_DRF', 'FLNT', 'FLNT_DRF', 'FSNTCDRF']
varl = varl + varl_ex
cases = ['SECTv21_ctrl_koagD', 'SECTv21_incY', 'SECTv21_decY', 'noSECTv21_ox_ricc_dd', 'noSECTv21_ox_ricc_decY',
'noSECTv21_ox_ricc_incY']
cases_sec = [
'NF1850_SECT_ctrl',
'NF1850_aeroxid2014_SECT_ctrl',
]
cases_nsec = [
'NF1850_noSECT_def',
'NF1850_aeroxid2014_noSECT_def',
'NF1850_aeroxid2014_noSECT_ox_ricc',
'NF1850_noSECT_ox_ricc',
]
# 'noSECTv21_ox_ricc_decY','noSECTv21_ox_ricc_dd','noSECTv21_ox_ricc_incY',
# 'noSECTv21_def_decY','noSECTv21_default_dd','noSECTv21_def_incY']
# %% [markdown]
# ## Import yearly means
# %%
# varl = ['N_AER']
df2, dic_vals = one_val_tab.get_tab_yearly_mean(varl,
cases_sec + cases_nsec,
startyear,
endyear,
pmin=pmin,
pressure_adjust=pressure_adjust,
average_over_lev=avg_over_lev,
groupby='time.year', # 'time',
dims=None,
area='Global',
invert_dic=True
)
# %%
# %%
di = get_abs_by_type(dic_vals, case_types=['PI', 'PD'])
di.keys()
# %% [markdown]
# ## Concatinate data in one dataframe
# %%
ls = []
for ct in di.keys():
_di = di[ct]
for cn in _di.keys():
print(cn)
_df = _di[cn]
_df['case'] = cn
_df['case_type'] = ct
ls.append(_df.reset_index())
df_tot = pd.concat(ls)
df_tot.head()
# %% [markdown]
# ## Reorganize data
# %%
di = get_abs_by_type(dic_vals, case_types=['PI', 'PD'])
_df = pd.DataFrame(columns=['val', 'type', 'var', 'model'])
di_var = {}
for t in di.keys():
for m in di[t].keys():
di[t][m] = di[t][m].mean()
for v in varl:
_df_v = pd.DataFrame(columns=['val', 'type', 'model'])
for t in di.keys():
for m in di[t].keys():
_df = _df.append(pd.DataFrame([di[t][m][v], t, v, m], index=['val', 'type', 'var', 'model']).transpose(),
ignore_index=True)
_df_v = _df_v.append(pd.DataFrame([di[t][m][v], t, m], index=['val', 'type', 'model']).transpose(),
ignore_index=True)
_df_v['val'] = | pd.to_numeric(_df_v['val']) | pandas.to_numeric |
import astropy.io.fits as fits
from astropy.coordinates import SkyCoord
from astropy import wcs
import pandas as pd
import os
import numpy as np
from skimage import filters
import time
from skimage import measure
from scipy import ndimage
import matplotlib.pyplot as plt
from threading import Thread
from multiprocessing import Pool
from time import sleep, ctime
from DensityClust.clustring_subfunc import \
kc_coord_3d, kc_coord_2d, get_xyz
"""
在计算距离和梯度的时候,采用了多线程
"""
class Data:
def __init__(self, data_name):
self.data_name = data_name
self.data = None
self.wcs = None
self.size_x = 0
self.size_y = 0
self.size_z = 0
self.ND = 0
self.get_data_inf()
def get_data_inf(self):
data = fits.getdata(data_name)
# self.wcs = self.get_wcs()
size_x, size_y, size_z = data.shape
self.size_x = size_x
self.size_y = size_y
self.size_z = size_z
self.data = data
self.ND = size_x * size_y * size_z
def get_wcs(self):
"""
得到wcs信息
:return:
data_wcs
"""
data_header = fits.getheader(self.data_name)
keys = data_header.keys()
key = [k for k in keys if k.endswith('4')]
[data_header.remove(k) for k in key]
try:
data_header.remove('VELREF')
except:
pass
data_wcs = wcs.WCS(data_header)
return data_wcs
class LocDenCluster:
def __init__(self, para, data_name):
"""
根据决策图得到聚类中心和聚类中心个数
:param para:
para.rhomin: Minimum density
para.deltamin: Minimum delta
para.v_min: Minimum volume
para.noise: The noise level of the data, used for data truncation calculation
para.sigma: Standard deviation of Gaussian filtering
"""
self.out = None
self.outcat = None
self.mask = None
self.gradmin = para["gradmin"]
self.rhomin = para["rhomin"]
self.deltamin = para["deltamin"]
self.v_min = para["v_min"]
self.rms = para["noise"]
self.dc = para['dc']
self.is_plot = para['is_plot']
self.Data = Data(data_name)
ND = self.Data.ND
self.Gradient = np.zeros(ND, np.float)
self.IndNearNeigh = np.zeros(ND, np.int)
self.delta = np.zeros(ND, np.float)
def summary(self):
table_title = ['rhomin', 'deltamin', 'v_min', 'gradmin', 'noise', 'dc']
para = np.array([[self.rhomin, self.deltamin, self.v_min, self.gradmin, self.rms, self.dc]])
para_pd = pd.DataFrame(para, columns=table_title)
# print(para_pd)
return para_pd
def change_pix2word(self):
"""
将算法检测的结果(像素单位)转换到天空坐标系上去
:return:
outcat_wcs
['ID', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3', 'Peak', 'Sum', 'Volume']
-->3d
['ID', 'Peak1', 'Peak2', 'Cen1', 'Cen2', 'Size1', 'Size2', 'Peak', 'Sum', 'Volume']
-->2d
"""
outcat = self.outcat
if outcat is None:
return
else:
outcat_column = outcat.shape[1]
data_wcs = self.Data.wcs
if outcat_column == 10:
# 2d result
peak1, peak2 = data_wcs.all_pix2world(outcat['Peak1'], outcat['Peak2'], 1)
cen1, cen2 = data_wcs.all_pix2world(outcat['Cen1'], outcat['Cen2'], 1)
size1, size2 = np.array([outcat['Size1'] * 30, outcat['Size2'] * 30])
clump_Peak = np.column_stack([peak1, peak2])
clump_Cen = np.column_stack([cen1, cen2])
clustSize = np.column_stack([size1, size2])
clustPeak, clustSum, clustVolume = np.array([outcat['Peak'], outcat['Sum'], outcat['Volume']])
id_clumps = [] # MWSIP017.558+00.150+020.17 分别表示:银经:17.558°, 银纬:0.15°,速度:20.17km/s
for item_l, item_b in zip(cen1, cen2):
str_l = 'MWSIP' + ('%.03f' % item_l).rjust(7, '0')
if item_b < 0:
str_b = '-' + ('%.03f' % abs(item_b)).rjust(6, '0')
else:
str_b = '+' + ('%.03f' % abs(item_b)).rjust(6, '0')
id_clumps.append(str_l + str_b)
id_clumps = np.array(id_clumps)
table_title = ['ID', 'Peak1', 'Peak2', 'Cen1', 'Cen2', 'Size1', 'Size2', 'Peak', 'Sum', 'Volume']
elif outcat_column == 13:
# 3d result
peak1, peak2, peak3 = data_wcs.all_pix2world(outcat['Peak1'], outcat['Peak2'], outcat['Peak3'], 1)
cen1, cen2, cen3 = data_wcs.all_pix2world(outcat['Cen1'], outcat['Cen2'], outcat['Cen3'], 1)
size1, size2, size3 = np.array([outcat['Size1'] * 30, outcat['Size2'] * 30, outcat['Size3'] * 0.166])
clustPeak, clustSum, clustVolume = np.array([outcat['Peak'], outcat['Sum'], outcat['Volume']])
clump_Peak = np.column_stack([peak1, peak2, peak3 / 1000])
clump_Cen = np.column_stack([cen1, cen2, cen3 / 1000])
clustSize = np.column_stack([size1, size2, size3])
id_clumps = [] # MWISP017.558+00.150+020.17 分别表示:银经:17.558°, 银纬:0.15°,速度:20.17km/s
for item_l, item_b, item_v in zip(cen1, cen2, cen3 / 1000):
str_l = 'MWISP' + ('%.03f' % item_l).rjust(7, '0')
if item_b < 0:
str_b = '-' + ('%.03f' % abs(item_b)).rjust(6, '0')
else:
str_b = '+' + ('%.03f' % abs(item_b)).rjust(6, '0')
if item_v < 0:
str_v = '-' + ('%.03f' % abs(item_v)).rjust(6, '0')
else:
str_v = '+' + ('%.03f' % abs(item_v)).rjust(6, '0')
id_clumps.append(str_l + str_b + str_v)
id_clumps = np.array(id_clumps)
table_title = ['ID', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3',
'Peak', 'Sum', 'Volume']
else:
print('outcat columns is %d' % outcat_column)
return None
outcat_wcs = np.column_stack((id_clumps, clump_Peak, clump_Cen, clustSize, clustPeak, clustSum, clustVolume))
outcat_wcs = pd.DataFrame(outcat_wcs, columns=table_title)
return outcat_wcs
def densityCluster_3d(self):
data = self.Data.data
k1 = 1 # 第1次计算点的邻域大小
k2 = np.ceil(self.deltamin).astype(np.int) # 第2次计算点的邻域大小
xx = get_xyz(data) # xx: 3D data coordinates 坐标原点是 1
dim = data.ndim
size_x, size_y, size_z = data.shape
maxed = size_x + size_y + size_z
ND = size_x * size_y * size_z
# Initialize the return result: mask and out
mask = np.zeros_like(data, dtype=np.int)
out = np.zeros_like(data, dtype=np.float)
data_filter = filters.gaussian(data, self.dc)
rho = data_filter.flatten()
rho_Ind = np.argsort(-rho)
rho_sorted = rho[rho_Ind]
delta, IndNearNeigh, Gradient = np.zeros(ND, np.float), np.zeros(ND, np.int), np.zeros(ND, np.float)
delta[rho_Ind[0]] = np.sqrt(size_x ** 2 + size_y ** 2 + size_z ** 2)
# delta 记录距离,
# IndNearNeigh 记录:两个密度点的联系 % index of nearest neighbor with higher density
IndNearNeigh[rho_Ind[0]] = rho_Ind[0]
t0_ = time.time()
# calculating delta and Gradient
for ii in range(1, ND):
# 密度降序排序后,即密度第ii大的索引(在rho中)
ordrho_ii = rho_Ind[ii]
rho_ii = rho_sorted[ii] # 第ii大的密度值
if rho_ii >= self.rms:
delta[ordrho_ii] = maxed
point_ii_xy = xx[ordrho_ii, :]
get_value = True # 判断是否需要在大循环中继续执行,默认需要,一旦在小循环中赋值成功,就不在大循环中运行
idex, bt = kc_coord_3d(point_ii_xy, size_z, size_y, size_x, k1)
for ordrho_jj, item in zip(idex, bt):
rho_jj = rho[ordrho_jj] # 根据索引在rho里面取值
dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离
gradient = (rho_jj - rho_ii) / dist_i_j
if dist_i_j <= delta[ordrho_ii] and gradient >= 0:
delta[ordrho_ii] = dist_i_j
Gradient[ordrho_ii] = gradient
IndNearNeigh[ordrho_ii] = ordrho_jj
get_value = False
if get_value:
# 表明,在(2 * k1 + 1) * (2 * k1 + 1) * (2 * k1 + 1)的邻域中没有找到比该点高,距离最近的点,则在更大的邻域中搜索
idex, bt = kc_coord_3d(point_ii_xy, size_z, size_y, size_x, k2)
for ordrho_jj, item in zip(idex, bt):
rho_jj = rho[ordrho_jj] # 根据索引在rho里面取值
dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离
gradient = (rho_jj - rho_ii) / dist_i_j
if dist_i_j <= delta[ordrho_ii] and gradient >= 0:
delta[ordrho_ii] = dist_i_j
Gradient[ordrho_ii] = gradient
IndNearNeigh[ordrho_ii] = ordrho_jj
get_value = False
if get_value:
delta[ordrho_ii] = k2 + 0.0001
Gradient[ordrho_ii] = -1
IndNearNeigh[ordrho_ii] = ND
else:
IndNearNeigh[ordrho_ii] = ND
delta_sorted = np.sort(-delta) * -1
delta[rho_Ind[0]] = delta_sorted[1]
t1_ = time.time()
print('delata, rho and Gradient are calculated, using %.2f seconds' % (t1_ - t0_))
# 根据密度和距离来确定类中心
clusterInd = -1 * np.ones(ND + 1)
clust_index = np.intersect1d(np.where(rho > self.rhomin), np.where(delta > self.deltamin))
clust_num = len(clust_index)
# icl是用来记录第i个类中心在xx中的索引值
icl = np.zeros(clust_num, dtype=int)
n_clump = 0
for ii in range(clust_num):
i = clust_index[ii]
icl[n_clump] = i
n_clump += 1
clusterInd[i] = n_clump
# assignation 将其他非类中心分配到离它最近的类中心中去
# clusterInd = -1 表示该点不是类的中心点,属于其他点,等待被分配到某个类中去
# 类的中心点的梯度Gradient被指定为 - 1
if self.is_plot == 1:
pass
for i in range(ND):
ordrho_i = rho_Ind[i]
if clusterInd[ordrho_i] == -1: # not centroid
clusterInd[ordrho_i] = clusterInd[IndNearNeigh[ordrho_i]]
else:
Gradient[ordrho_i] = -1 # 将类中心点的梯度设置为-1
clump_volume = np.zeros(n_clump)
for i in range(n_clump):
clump_volume[i] = clusterInd.tolist().count(i + 1)
# centInd [类中心点在xx坐标下的索引值,类中心在centInd的索引值: 代表类别编号]
centInd = []
for i, item in enumerate(clump_volume):
if item >= self.v_min:
centInd.append([icl[i], i])
centInd = np.array(centInd, np.int)
mask_grad = np.where(Gradient > self.gradmin)[0]
# 通过梯度确定边界后,还需要进一步利用最小体积来排除假核
n_clump = centInd.shape[0]
clump_sum, clump_volume, clump_peak = np.zeros([n_clump, 1]), np.zeros([n_clump, 1]), np.zeros([n_clump, 1])
clump_Cen, clump_size = np.zeros([n_clump, dim]), np.zeros([n_clump, dim])
clump_Peak = np.zeros([n_clump, dim], np.int)
clump_ii = 0
for i, item in enumerate(centInd):
rho_cluster_i = np.zeros(ND)
index_cluster_i = np.where(clusterInd == (item[1] + 1))[0] # centInd[i, 1] --> item[1] 表示第i个类中心的编号
index_cc = np.intersect1d(mask_grad, index_cluster_i)
rho_cluster_i[index_cluster_i] = rho[index_cluster_i]
rho_cc_mean = rho[index_cc].mean() * 0.2
index_cc_rho = np.where(rho_cluster_i > rho_cc_mean)[0]
index_cluster_rho = np.union1d(index_cc, index_cc_rho)
cl_1_index_ = xx[index_cluster_rho, :] - 1 # -1 是为了在data里面用索引取值(从0开始)
# clusterInd 标记的点的编号是从1开始, 没有标记的点的编号为-1
clustNum = cl_1_index_.shape[0]
cl_i = np.zeros(data.shape, np.int)
for item_ in cl_1_index_:
cl_i[item_[2], item_[1], item_[0]] = 1
# 形态学处理
# cl_i = morphology.closing(cl_i) # 做开闭运算会对相邻两个云核的掩膜有影响
L = ndimage.binary_fill_holes(cl_i).astype(int)
L = measure.label(L) # Labeled input image. Labels with value 0 are ignored.
STATS = measure.regionprops(L)
Ar_sum = []
for region in STATS:
coords = region.coords # 经过验证,坐标原点为0
temp = 0
for item_coord in coords:
temp += data[item_coord[0], item_coord[1], item_coord[2]]
Ar_sum.append(temp)
Ar = np.array(Ar_sum)
ind = np.where(Ar == Ar.max())[0]
L[L != ind[0] + 1] = 0
cl_i = L / (ind[0] + 1)
coords = STATS[ind[0]].coords # 最大的连通域对应的坐标
if coords.shape[0] > self.v_min:
coords = coords[:, [2, 1, 0]]
clump_i_ = np.zeros(coords.shape[0])
for j, item_coord in enumerate(coords):
clump_i_[j] = data[item_coord[2], item_coord[1], item_coord[0]]
clustsum = clump_i_.sum() + 0.0001 # 加一个0.0001 防止分母为0
clump_Cen[clump_ii, :] = np.matmul(clump_i_, coords) / clustsum
clump_volume[clump_ii, 0] = clustNum
clump_sum[clump_ii, 0] = clustsum
x_i = coords - clump_Cen[clump_ii, :]
clump_size[clump_ii, :] = 2.3548 * np.sqrt((np.matmul(clump_i_, x_i ** 2) / clustsum)
- (np.matmul(clump_i_, x_i) / clustsum) ** 2)
clump_i = data * cl_i
out = out + clump_i
mask = mask + cl_i * (clump_ii + 1)
clump_peak[clump_ii, 0] = clump_i.max()
clump_Peak[clump_ii, [2, 1, 0]] = np.argwhere(clump_i == clump_i.max())[0]
clump_ii += 1
else:
pass
clump_Peak = clump_Peak + 1
clump_Cen = clump_Cen + 1 # python坐标原点是从0开始的,在这里整体加1,改为以1为坐标原点
id_clumps = np.array([item + 1 for item in range(n_clump)], np.int).T
id_clumps = id_clumps.reshape([n_clump, 1])
LDC_outcat = np.column_stack(
(id_clumps, clump_Peak, clump_Cen, clump_size, clump_peak, clump_sum, clump_volume))
LDC_outcat = LDC_outcat[:clump_ii, :]
table_title = ['ID', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3', 'Peak',
'Sum', 'Volume']
LDC_outcat = pd.DataFrame(LDC_outcat, columns=table_title)
self.outcat = LDC_outcat
self.mask = mask
self.out = out
def get_delta(self, rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, ND_end):
# print(ND_start, ND_end)
print('---开始---', ND_start, '时间', ctime())
for ii in range(ND_start, ND_end, 1):
# 密度降序排序后,即密度第ii大的索引(在rho中)
ordrho_ii = rho_Ind[ii]
rho_ii = rho_sorted[ii] # 第ii大的密度值
delta_ordrho_ii, Gradient_ordrho_ii, IndNearNeigh_ordrho_ii = 0, 0, 0
if rho_ii >= self.rms:
delta_ordrho_ii = maxed
point_ii_xy = xx[ordrho_ii, :]
get_value = True # 判断是否需要在大循环中继续执行,默认需要,一旦在小循环中赋值成功,就不在大循环中运行
idex, bt = kc_coord_3d(point_ii_xy, size_z, size_y, size_x, k1)
for ordrho_jj, item in zip(idex, bt):
rho_jj = rho[ordrho_jj] # 根据索引在rho里面取值
dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离
gradient = (rho_jj - rho_ii) / dist_i_j
if dist_i_j <= delta_ordrho_ii and gradient >= 0:
delta_ordrho_ii = dist_i_j
Gradient_ordrho_ii = gradient
IndNearNeigh_ordrho_ii = ordrho_jj
get_value = False
if get_value:
# 表明,在(2 * k1 + 1) * (2 * k1 + 1) * (2 * k1 + 1)的邻域中没有找到比该点高,距离最近的点,则在更大的邻域中搜索
idex, bt = kc_coord_3d(point_ii_xy, size_z, size_y, size_x, k2)
for ordrho_jj, item in zip(idex, bt):
rho_jj = rho[ordrho_jj] # 根据索引在rho里面取值
dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离
gradient = (rho_jj - rho_ii) / dist_i_j
if dist_i_j <= delta_ordrho_ii and gradient >= 0:
delta_ordrho_ii = dist_i_j
Gradient_ordrho_ii = gradient
IndNearNeigh_ordrho_ii = ordrho_jj
get_value = False
if get_value:
delta_ordrho_ii = k2 + 0.0001
Gradient_ordrho_ii = -1
IndNearNeigh_ordrho_ii = ND
else:
IndNearNeigh_ordrho_ii = ND
# print(delta_ordrho_ii)
self.delta[ordrho_ii] = delta_ordrho_ii
self.Gradient[ordrho_ii] = Gradient_ordrho_ii
self.IndNearNeigh[ordrho_ii] = IndNearNeigh_ordrho_ii
print('***结束***', ND_start, '时间', ctime())
print(self.delta.max())
print(self.Gradient.max())
print(self.IndNearNeigh.max())
def densityCluster_3d_multi(self):
data = self.Data.data
k1 = 1 # 第1次计算点的邻域大小
k2 = np.ceil(self.deltamin).astype(np.int) # 第2次计算点的邻域大小
xx = get_xyz(data) # xx: 3D data coordinates 坐标原点是 1
dim = data.ndim
size_x, size_y, size_z = data.shape
maxed = size_x + size_y + size_z
ND = size_x * size_y * size_z
# Initialize the return result: mask and out
mask = np.zeros_like(data, dtype=np.int)
out = np.zeros_like(data, dtype=np.float)
data_filter = filters.gaussian(data, self.dc)
rho = data_filter.flatten()
rho_Ind = np.argsort(-rho)
rho_sorted = rho[rho_Ind]
self.delta[rho_Ind[0]] = np.sqrt(size_x ** 2 + size_y ** 2 + size_z ** 2)
# delta 记录距离,
# IndNearNeigh 记录:两个密度点的联系 % index of nearest neighbor with higher density
self.IndNearNeigh[rho_Ind[0]] = rho_Ind[0]
t0_ = time.time()
# calculating delta and Gradient
# p = Pool(count)
# for i_count in range(count):
# ND_start = 1 + i_count*ittt
# ND_end = 1 + (i_count + 1) * ittt
# if i_count == count-1:
# ND_end = ND
# p.apply_async(self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, ND_end))
# detect_single(data_ij_name, para)
# p.apply_async(self.get_delta,
# args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, 1, ND))
# self.get_delta(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, 1, ND)
# p.close()
# p.join()
count = 4
ittt = int(ND / count)
ts = []
for i_count in range(count):
ND_start = 1 + i_count*ittt
ND_end = 1 + (i_count + 1) * ittt
if i_count == count-1:
ND_end = ND
t = Thread(target=self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, ND_end))
ts.append(t)
[i.start() for i in ts]
[i.join() for i in ts]
print(self.delta.max())
print(self.Gradient.max())
print(self.IndNearNeigh.max())
# p = Pool(count)
# for data_ij_name in data_ij_name_list:
# p.apply_async(detect_single, args=(data_ij_name, para))
# # detect_single(data_ij_name, para)
# p.close()
# p.join()
# t.join()
# for ii in range(1, ND):
# # 密度降序排序后,即密度第ii大的索引(在rho中)
# ordrho_ii = rho_Ind[ii]
# rho_ii = rho_sorted[ii] # 第ii大的密度值
# t = Thread(target=self.get_delta, args=(ordrho_ii, rho_ii, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND))
# t.start()
# t.join()
# ts.append(t)
# ts = []
# count = 2
# # for i_count in range(count):
# ND_start, ND_end = 1, int(ND/2)
# t = Thread(target=self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, ND_end))
# t.start()
# t.join()
# t1 = Thread(target=self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, 1000000))
# t2 = Thread(target=self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, 1000000, 2000000))
# t3 = Thread(target=self.get_delta,
# args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, 2000000, ND))
#
# # 启动线程运行
# t1.start()
# t2.start()
# t3.start()
#
# # 等待所有线程执行完毕
# t1.join() # join() 等待线程终止,要不然一直挂起
# t2.join()
# t3.join()
delta_sorted = np.sort(-self.delta) * -1
self.delta[rho_Ind[0]] = delta_sorted[1]
t1_ = time.time()
print('delata, rho and Gradient are calculated, using %.2f seconds' % (t1_ - t0_))
# 根据密度和距离来确定类中心
clusterInd = -1 * np.ones(ND + 1)
clust_index = np.intersect1d(np.where(rho > self.rhomin), np.where(self.delta > self.deltamin))
clust_num = len(clust_index)
# icl是用来记录第i个类中心在xx中的索引值
icl = np.zeros(clust_num, dtype=int)
n_clump = 0
for ii in range(clust_num):
i = clust_index[ii]
icl[n_clump] = i
n_clump += 1
clusterInd[i] = n_clump
# assignation 将其他非类中心分配到离它最近的类中心中去
# clusterInd = -1 表示该点不是类的中心点,属于其他点,等待被分配到某个类中去
# 类的中心点的梯度Gradient被指定为 - 1
if self.is_plot == 1:
pass
for i in range(ND):
ordrho_i = rho_Ind[i]
if clusterInd[ordrho_i] == -1: # not centroid
clusterInd[ordrho_i] = clusterInd[self.IndNearNeigh[ordrho_i]]
else:
self.Gradient[ordrho_i] = -1 # 将类中心点的梯度设置为-1
clump_volume = np.zeros(n_clump)
for i in range(n_clump):
clump_volume[i] = clusterInd.tolist().count(i + 1)
# centInd [类中心点在xx坐标下的索引值,类中心在centInd的索引值: 代表类别编号]
centInd = []
for i, item in enumerate(clump_volume):
if item >= self.v_min:
centInd.append([icl[i], i])
centInd = np.array(centInd, np.int)
mask_grad = np.where(self.Gradient > self.gradmin)[0]
# 通过梯度确定边界后,还需要进一步利用最小体积来排除假核
n_clump = centInd.shape[0]
clump_sum, clump_volume, clump_peak = np.zeros([n_clump, 1]), np.zeros([n_clump, 1]), np.zeros([n_clump, 1])
clump_Cen, clump_size = np.zeros([n_clump, dim]), np.zeros([n_clump, dim])
clump_Peak = np.zeros([n_clump, dim], np.int)
clump_ii = 0
for i, item in enumerate(centInd):
rho_cluster_i = np.zeros(ND)
index_cluster_i = np.where(clusterInd == (item[1] + 1))[0] # centInd[i, 1] --> item[1] 表示第i个类中心的编号
index_cc = np.intersect1d(mask_grad, index_cluster_i)
rho_cluster_i[index_cluster_i] = rho[index_cluster_i]
rho_cc_mean = rho[index_cc].mean() * 0.2
index_cc_rho = np.where(rho_cluster_i > rho_cc_mean)[0]
index_cluster_rho = np.union1d(index_cc, index_cc_rho)
cl_1_index_ = xx[index_cluster_rho, :] - 1 # -1 是为了在data里面用索引取值(从0开始)
# clusterInd 标记的点的编号是从1开始, 没有标记的点的编号为-1
clustNum = cl_1_index_.shape[0]
cl_i = np.zeros(data.shape, np.int)
for item_ in cl_1_index_:
cl_i[item_[2], item_[1], item_[0]] = 1
# 形态学处理
# cl_i = morphology.closing(cl_i) # 做开闭运算会对相邻两个云核的掩膜有影响
L = ndimage.binary_fill_holes(cl_i).astype(int)
L = measure.label(L) # Labeled input image. Labels with value 0 are ignored.
STATS = measure.regionprops(L)
Ar_sum = []
for region in STATS:
coords = region.coords # 经过验证,坐标原点为0
temp = 0
for item_coord in coords:
temp += data[item_coord[0], item_coord[1], item_coord[2]]
Ar_sum.append(temp)
Ar = np.array(Ar_sum)
ind = np.where(Ar == Ar.max())[0]
L[L != ind[0] + 1] = 0
cl_i = L / (ind[0] + 1)
coords = STATS[ind[0]].coords # 最大的连通域对应的坐标
if coords.shape[0] > self.v_min:
coords = coords[:, [2, 1, 0]]
clump_i_ = np.zeros(coords.shape[0])
for j, item_coord in enumerate(coords):
clump_i_[j] = data[item_coord[2], item_coord[1], item_coord[0]]
clustsum = clump_i_.sum() + 0.0001 # 加一个0.0001 防止分母为0
clump_Cen[clump_ii, :] = np.matmul(clump_i_, coords) / clustsum
clump_volume[clump_ii, 0] = clustNum
clump_sum[clump_ii, 0] = clustsum
x_i = coords - clump_Cen[clump_ii, :]
clump_size[clump_ii, :] = 2.3548 * np.sqrt((np.matmul(clump_i_, x_i ** 2) / clustsum)
- (np.matmul(clump_i_, x_i) / clustsum) ** 2)
clump_i = data * cl_i
out = out + clump_i
mask = mask + cl_i * (clump_ii + 1)
clump_peak[clump_ii, 0] = clump_i.max()
clump_Peak[clump_ii, [2, 1, 0]] = np.argwhere(clump_i == clump_i.max())[0]
clump_ii += 1
else:
pass
clump_Peak = clump_Peak + 1
clump_Cen = clump_Cen + 1 # python坐标原点是从0开始的,在这里整体加1,改为以1为坐标原点
id_clumps = np.array([item + 1 for item in range(n_clump)], np.int).T
id_clumps = id_clumps.reshape([n_clump, 1])
LDC_outcat = np.column_stack(
(id_clumps, clump_Peak, clump_Cen, clump_size, clump_peak, clump_sum, clump_volume))
LDC_outcat = LDC_outcat[:clump_ii, :]
table_title = ['ID', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3', 'Peak',
'Sum', 'Volume']
LDC_outcat = pd.DataFrame(LDC_outcat, columns=table_title)
self.outcat = LDC_outcat
self.mask = mask
self.out = out
def densityCluster_2d(self):
"""
根据决策图得到聚类中心和聚类中心个数
"""
data = self.Data.data
k = 1 # 计算点的邻域大小
k2 = np.ceil(self.deltamin).astype(np.int) # 第2次计算点的邻域大小
xx = get_xyz(data) # xx: 2D data coordinates 坐标原点是 1
dim = data.ndim
mask = np.zeros_like(data, dtype=np.int)
out = np.zeros_like(data, dtype=np.float)
data_filter = filters.gaussian(data, self.dc)
size_x, size_y = data.shape
rho = data_filter.flatten()
rho_Ind = np.argsort(-rho)
rho_sorted = rho[rho_Ind]
maxd = size_x + size_y
ND = len(rho)
# delta 记录距离, # IndNearNeigh 记录:两个密度点的联系 % index of nearest neighbor with higher density
delta, IndNearNeigh, Gradient = np.zeros(ND, np.float), np.zeros(ND, np.int), np.zeros(ND, np.float)
delta[rho_Ind[0]] = np.sqrt(size_x ** 2 + size_y ** 2)
IndNearNeigh[rho_Ind[0]] = rho_Ind[0]
t0 = time.time()
# 计算 delta, Gradient
for ii in range(1, ND):
# 密度降序排序后,即密度第ii大的索引(在rho中)
ordrho_ii = rho_Ind[ii]
rho_ii = rho_sorted[ii] # 第ii大的密度值
if rho_ii >= self.rms:
delta[ordrho_ii] = maxd
point_ii_xy = xx[ordrho_ii, :]
get_value = True # 判断是否需要在大循环中继续执行,默认需要,一旦在小循环中赋值成功,就不在大循环中运行
bt = kc_coord_2d(point_ii_xy, size_y, size_x, k)
for item in bt:
rho_jj = data_filter[item[1] - 1, item[0] - 1]
dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离
gradient = (rho_jj - rho_ii) / dist_i_j
if dist_i_j <= delta[ordrho_ii] and gradient >= 0:
delta[ordrho_ii] = dist_i_j
Gradient[ordrho_ii] = gradient
IndNearNeigh[ordrho_ii] = (item[1] - 1) * size_y + item[0] - 1
get_value = False
if get_value: # 表明在小领域中没有找到比该点高,距离最近的点,则进行更大领域的搜索
bt = kc_coord_2d(point_ii_xy, size_y, size_x, k2)
for item in bt:
rho_jj = data_filter[item[1] - 1, item[0] - 1]
dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离
gradient = (rho_jj - rho_ii) / dist_i_j
if dist_i_j <= delta[ordrho_ii] and gradient >= 0:
delta[ordrho_ii] = dist_i_j
Gradient[ordrho_ii] = gradient
IndNearNeigh[ordrho_ii] = (item[1] - 1) * size_y + item[0] - 1
get_value = False
if get_value:
delta[ordrho_ii] = k2 + 0.0001
Gradient[ordrho_ii] = -1
IndNearNeigh[ordrho_ii] = ND
else:
IndNearNeigh[ordrho_ii] = ND
delta_sorted = np.sort(-delta) * (-1)
delta[rho_Ind[0]] = delta_sorted[1]
t1 = time.time()
print('delata, rho and Gradient are calculated, using %.2f seconds' % (t1 - t0))
# 根据密度和距离来确定类中心
NCLUST = 0
clustInd = -1 * np.ones(ND + 1)
clust_index = np.intersect1d(np.where(rho > self.rhomin), np.where(delta > self.deltamin))
clust_num = clust_index.shape[0]
print(clust_num)
# icl是用来记录第i个类中心在xx中的索引值
icl = np.zeros(clust_num, dtype=int)
for ii in range(0, clust_num):
i = clust_index[ii]
icl[NCLUST] = i
NCLUST += 1
clustInd[i] = NCLUST
# assignation
# 将其他非类中心分配到离它最近的类中心中去
# clustInd = -1
# 表示该点不是类的中心点,属于其他点,等待被分配到某个类中去
# 类的中心点的梯度Gradient被指定为 - 1
if self.is_plot == 1:
plt.scatter(rho, delta, marker='.')
plt.show()
for i in range(ND):
ordrho_i = rho_Ind[i]
if clustInd[ordrho_i] == -1: # not centroid
clustInd[ordrho_i] = clustInd[IndNearNeigh[ordrho_i]]
else:
Gradient[ordrho_i] = -1 # 将类中心点的梯度设置为-1
clustVolume = np.zeros(NCLUST)
for i in range(NCLUST):
clustVolume[i] = clustInd.tolist().count(i + 1)
# % centInd [类中心点在xx坐标下的索引值,
# 类中心在centInd的索引值: 代表类别编号]
centInd = []
for i, item in enumerate(clustVolume):
if item >= self.v_min:
centInd.append([icl[i], i])
centInd = np.array(centInd, np.int)
mask_grad = np.where(Gradient > self.gradmin)[0]
# 通过梯度确定边界后,还需要进一步利用最小体积来排除假核
NCLUST = centInd.shape[0]
clustSum, clustVolume, clustPeak = np.zeros([NCLUST, 1]), np.zeros([NCLUST, 1]), np.zeros([NCLUST, 1])
clump_Cen, clustSize = np.zeros([NCLUST, dim]), np.zeros([NCLUST, dim])
clump_Peak = np.zeros([NCLUST, dim], np.int)
clump_ii = 0
for i, item in enumerate(centInd):
# centInd[i, 1] --> item[1] 表示第i个类中心的编号
rho_clust_i = np.zeros(ND)
index_clust_i = np.where(clustInd == (item[1] + 1))[0]
index_cc = np.intersect1d(mask_grad, index_clust_i)
rho_clust_i[index_clust_i] = rho[index_clust_i]
if len(index_cc) > 0:
rho_cc_mean = rho[index_cc].mean() * 0.2
else:
rho_cc_mean = self.rms
index_cc_rho = np.where(rho_clust_i > rho_cc_mean)[0]
index_clust_rho = np.union1d(index_cc, index_cc_rho)
cl_1_index_ = xx[index_clust_rho, :] - 1 # -1 是为了在data里面用索引取值(从0开始)
# clustInd 标记的点的编号是从1开始, 没有标记的点的编号为-1
cl_i = np.zeros(data.shape, np.int)
for j, item_ in enumerate(cl_1_index_):
cl_i[item_[1], item_[0]] = 1
# 形态学处理
# cl_i = morphology.closing(cl_i) # 做开闭运算会对相邻两个云核的掩膜有影响
L = ndimage.binary_fill_holes(cl_i).astype(int)
L = measure.label(L) # Labeled input image. Labels with value 0 are ignored.
STATS = measure.regionprops(L)
Ar_sum = []
for region in STATS:
coords = region.coords # 经过验证,坐标原点为0
coords = coords[:, [1, 0]]
temp = 0
for item_coord in coords:
temp += data[item_coord[1], item_coord[0]]
Ar_sum.append(temp)
Ar = np.array(Ar_sum)
ind = np.where(Ar == Ar.max())[0]
L[L != ind[0] + 1] = 0
cl_i = L / (ind[0] + 1)
coords = STATS[ind[0]].coords # 最大的连通域对应的坐标
clustNum = coords.shape[0]
if clustNum > self.v_min:
coords = coords[:, [1, 0]]
clump_i_ = np.zeros(coords.shape[0])
for j, item_coord in enumerate(coords):
clump_i_[j] = data[item_coord[1], item_coord[0]]
clustsum = sum(clump_i_) + 0.0001 # 加一个0.0001 防止分母为0
clump_Cen[clump_ii, :] = np.matmul(clump_i_, coords) / clustsum
clustVolume[clump_ii, 0] = clustNum
clustSum[clump_ii, 0] = clustsum
x_i = coords - clump_Cen[clump_ii, :]
clustSize[clump_ii, :] = 2.3548 * np.sqrt((np.matmul(clump_i_, x_i ** 2) / clustsum)
- (np.matmul(clump_i_, x_i) / clustsum) ** 2)
clump_i = data * cl_i
out = out + clump_i
mask = mask + cl_i * (clump_ii + 1)
clustPeak[clump_ii, 0] = clump_i.max()
clump_Peak[clump_ii, [1, 0]] = np.argwhere(clump_i == clump_i.max())[0]
clump_ii += 1
else:
pass
clump_Peak = clump_Peak + 1
clump_Cen = clump_Cen + 1 # python坐标原点是从0开始的,在这里整体加1,改为以1为坐标原点
id_clumps = np.array([item + 1 for item in range(NCLUST)], np.int).T
id_clumps = id_clumps.reshape([NCLUST, 1])
LDC_outcat = np.column_stack((id_clumps, clump_Peak, clump_Cen, clustSize, clustPeak, clustSum, clustVolume))
LDC_outcat = LDC_outcat[:clump_ii, :]
table_title = ['ID', 'Peak1', 'Peak2', 'Cen1', 'Cen2', 'Size1', 'Size2', 'Peak', 'Sum', 'Volume']
LDC_outcat = pd.DataFrame(LDC_outcat, columns=table_title)
self.outcat = LDC_outcat
self.mask = mask
self.out = out
def save_outcat_wcs(self, outcat_wcs_name):
"""
# 保存LDC检测的直接结果,即单位为像素
:return:
"""
outcat_wcs = self.change_pix2word()
outcat_colums = outcat_wcs.shape[1]
if outcat_colums == 10:
# 2d result
table_title = ['ID', 'Peak1', 'Peak2', 'Cen1', 'Cen2', 'Size1', 'Size2', 'Peak', 'Sum', 'Volume']
dataframe = | pd.DataFrame(outcat_wcs, columns=table_title) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import mut.thermo
import mut.bayes
import mut.stats
import tqdm
constants = mut.thermo.load_constants()
# Load the prior predictive check data.
prior_data = pd.read_csv('../../data/Chure2019_DNA_prior_predictive_checks.csv')
# Load the stan model.
model = mut.bayes.StanModel('../stan/Chure2019_DNA_binding_energy.stan')
# Set up a dataframe to store the properties.
samples_dfs = []
sbc_dfs = []
# Definie the thinning constant for computing the rank statistic.
thin = 5
# Iterate through each simulation
for g, d in tqdm.tqdm(prior_data.groupby('sim_idx')):
# Determine the ground truth for each parameter.
gt = {'ep_RA': d['ep_RA'].unique(),
'sigma': d['sigma'].unique()}
# Generate the data dictionary.
data_dict = {'J':1,
'N': len(d),
'idx': np.ones(len(d)).astype(int),
'R': np.ones(len(d)) * constants['RBS1027'],
'Nns': 4.6E6,
'ep_ai': constants['ep_AI'],
'n_sites': constants['n_sites'],
'Ka': constants['Ka'],
'Ki': constants['Ki'],
'c': d['IPTGuM'],
'fc': d['fc_draw']}
# Sample the model
_, samples = model.sample(data_dict=data_dict)
samples.rename(columns={'ep_RA[1]': 'ep_RA', 'sigma[1]':'sigma'},
inplace=True)
samples['sim_idx'] = g
samples_dfs.append(samples)
# Compute the properties for each parameter.
_sbc_dfs = []
for p in ['ep_RA', 'sigma']:
_df = pd.DataFrame([])
z_score = (np.mean(samples[p]) - gt[p]) / np.std(samples[p])
shrinkage = 1 - (np.var(samples[p]) / np.var(prior_data[p].unique()))
_df['z_score'] = z_score
_df['shrinkage'] = shrinkage
_df['param'] = p
_df['rank'] = np.sum(samples[p].values[::thin] < gt[p])
_df['rank_ndraws'] = len(samples[p].values[::thin])
_df['post_median'] = np.median(samples[p])
_df['post_mean'] = np.mean(samples[p])
_df['post_mode'] = samples.iloc[np.argmax(samples['lp__'].values)][p]
_df['ground_truth'] = gt[p]
_sbc_dfs.append(_df)
_sbc_dfs = pd.concat(_sbc_dfs)
_sbc_dfs['sim_idx'] = g
sbc_dfs.append(_sbc_dfs)
sbc_df = | pd.concat(sbc_dfs) | pandas.concat |
from bs4 import BeautifulSoup
from datetime import datetime
import os
import pandas as pd
import pickle
import re
import requests
from selenium import webdriver
import time
time_id = datetime.today().strftime('%Y%m%d')
geckodriver_path = r'C:\Users\nicol\anaconda3\Library\bin\geckodriver'
def check_files(dir_files, keyword):
dict_files = {}
for file in os.listdir(dir_files):
key = re.search('^([0-9]+)', file)
if keyword in file and key is not None:
dict_files[key.group(1)] = file
return dict_files
def last_pickle(dict_pickles):
last_date = max([date for date in dict_pickles])
last_pickle = dict_pickles[last_date]
return last_pickle
def login(username, password, entry_type):
dir_files = f'{os.getcwd()}\\Credentials'
dict_pickles = check_files(dir_files, 'credentials')
try:
selected_pickle = last_pickle(dict_pickles)
with open(f'{dir_files}\\{selected_pickle}', 'rb') as file:
credentials = pickle.load(file)
except Exception as e:
credentials = {}
if entry_type == 'login':
if (username in list(credentials.keys())) and (password == credentials[username]):
return 'User successfully logged in!'
elif username not in list(credentials.keys()):
return 'Username not yet registered.'
elif (username in list(credentials.keys())) and (password != credentials[username]):
return 'Incorrect password.'
elif entry_type == 'register':
if username in list(credentials.keys()):
return 'User already registered.'
else:
credentials[username] = password
with open(f'{dir_files}\\{time_id}_credentials_for_ganter_investment.pickle', 'wb') as file:
pickle.dump(credentials, file)
return 'User successfully registered!'
def login_investing(path, username, password):
investing_url = 'https://www.investing.com/'
browser = webdriver.Firefox(executable_path=geckodriver_path)
browser.get(investing_url)
browser.find_element_by_css_selector('.login').click()
browser.find_element_by_css_selector('#loginFormUser_email').send_keys(username)
browser.find_element_by_css_selector('#loginForm_password').send_keys(password)
browser.find_element_by_css_selector('#signup > a:nth-child(4)').click()
return browser
def get_inmutable(browser, country):
investing_url = 'https://www.investing.com'
df_main = pd.DataFrame()
list_urls = []
browser.get(f'{investing_url}/stock-screener/?sp=country::' +
f'{country}|sector::a|industry::a|equityType::a%3Ceq_market_cap;1')
while True:
time.sleep(5)
html = browser.page_source
soup = BeautifulSoup(html, 'lxml')
soup_table = soup.find('div', {'id':'resultsContainer'})
html_table = soup_table.prettify()
urls = [elem.a.get('href') for elem
in soup_table.find_all('td')
if (elem.a and elem.a.get('href')) != None]
list_urls.extend(urls)
list_dfs = | pd.read_html(html_table) | pandas.read_html |
from __future__ import print_function
####################################################### 导入包
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from utils.utils import progress_bar
from utils.dataLoder import MyDataset, MyTest
import os
import pandas as pd
import time
import numpy as np
import cv2
from models import senet
from models import pnasnet
task_flag = "train"
from utils.utils import makeLabel,makeLabelTrain3,makeLabelVal3
### 待分类别
Class = ("airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck")
###################################################### 模型训练acc 记录表格 #######################################################################
df = pd.DataFrame(columns=Class, index=Class)
#
df_name = "./result/result_train_acc___" + str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()
)) + ".csv"
df.to_csv(df_name)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
###################################################### 模型仓库 ######################################################
LR = 0.001
best_acc = 0 # best test accuracy
def adjust_learning_rate(optimizer, epoch):
""" 学习率更新规则 """
lr = LR * (0.1 ** (epoch //200))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
## 训练模块
def train(net, net_name, epoch, train_loader, group):
print('\nEpoch: %d ' % epoch + group[0] + " : " + group[1] + "------" + net_name)
net.train() # 指明训练网络,dropout
train_loss = 0
correct = 0
total = 0
## 损失函数
criterion = nn.CrossEntropyLoss()
## 优化方法
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9,
weight_decay=5e-4) # 优化方式为mini-batch momentum-SGD,并采用L2正则化(权重衰减)
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs, targets = inputs.to(device), targets.to(device)
adjust_learning_rate(optimizer, epoch)
# 可视化
# print(inputs.cpu().numpy().shape)
# cv2.imshow("img", inputs.cpu().numpy()[0][0])
# cv2.waitKey(0)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
return [1. * correct / total, train_loss / (batch_idx + 1)]
## 测试模块
def test(net, net_name, epoch, test_loader, group):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
## 优化方法
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(test_loader),'Loss: %.3f | Acc: %.3f%% (%d/%d)'% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
## 保存模型
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
best_acc = acc
torch.save(net, "./model_pkl/" + group[0] + "_" + group[1] +"_" + net_name + "_model.pkl")
return [1. * correct / total, test_loss / (batch_idx + 1)]
###################################################### CVS-联合训练 ######################################################
transform_train = None
transform_test = None
def union_train(Class):
# 记录所有的组别
union_group = []
for i in range(len(Class)):
for j in range(len(Class)):
if i == j:
continue
union_group.append((Class[i], Class[j]))
print(union_group)
# 为每个组都训练一个模型
for group in union_group:
#print(group)
# if group != ("apple", "beetle"):
# continue
train_Acc = [[]]
train_loss = [[]]
val_Acc = [[]]
val_Loss = [[]]
cls1 = group[0]
cls2 = group[1]
index_cls1 = Class.index(cls1)
index_cls2 = Class.index(cls2)
net = None
### 两种网络方案
print('==> Building model..')
net_name = None
if index_cls1 < index_cls2:
net = pnasnet.PNASNetB()
net_name = "pnasnet"
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.3),
transforms.RandomVerticalFlip(p=0.3),
transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
elif index_cls1 > index_cls2:
net = senet.SENet18()
net_name = "SENet18"
transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
#transforms.RandomHorizontalFlip(p=0.2),
transforms.RandomVerticalFlip(p=0.3),
# transforms.RandomRotation(20),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
#transforms.RandomHorizontalFlip(p=0.3),
# transforms.RandomVerticalFlip(p=0.3),
# transforms.RandomRotation(20),
# transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
else:
raise Exception("net error")
net = net.to(device)
if device == 'cuda':
# global net
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
## make label file
src_val_dic = "./data/val"
txt_val = "./data/label_txt/" + cls1 + "_" + cls2 + "_val.txt"
src_train_dic = "./data/train"
txt_train = "./data/label_txt/" + cls1 + "_" + cls2 + "_train.txt"
makeLabelVal3(src_val_dic, txt_val, cls1, cls2)
makeLabelTrain3(src_train_dic, txt_train, cls1, cls2)
print('==> Preparing data..')
train_data = MyDataset(txt=txt_train, transform=transform_train)
train_loader = DataLoader(train_data, batch_size=32, shuffle=True) # 返回的是迭代器
test_data = MyDataset(txt=txt_val, transform=transform_test)
test_loader = DataLoader(test_data, batch_size=32)
global best_acc # best test accuracy
best_acc = 0
model_list = os.listdir("./model_pkl")
# 断点继续
if group[0] + "_" + group[1] +"_" + net_name + "_model.pkl" in model_list:
print("----"*5)
continue
## 开始训练和测试
global LR
LR = 0.001
for epoch in range(0, 300):
train_data = train(net, net_name, epoch, train_loader, group)
test_data = test(net, net_name, epoch, test_loader, group)
df = pd.read_csv(df_name, header=0, index_col=0)
df.loc[cls1, cls2] = "0." + str(int(best_acc))
df.to_csv(df_name)
# 删除网络
del net
#################################### 开始训练 ##############################################
union_train(Class)
#################################### 开始第 i 轮测试 #########################################
unlabel_img = []
with open("./data/first_unlabel_img_name.txt",'r') as f:
for img in f:
img = img.strip().split(",")[0]
if "png" in img:
unlabel_img.append(img)
# 记录所有的组别
union_group = []
for i in range(len(Class)):
for j in range(len(Class)):
if i == j:
continue
union_group.append((Class[i] + "_" + Class[j]))
# 为每个组都训练一个模型
df_2_path ="./result/result_predict_unlabel.csv" #"/home/elgong/GEL/one_shot/torch3/result_predict_unlabel.csv"
df_2 = | pd.DataFrame(columns=union_group, index=unlabel_img) | pandas.DataFrame |
from pprint import pprint
from sys import stdout
import sympy as sym
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.optimize import curve_fit
from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import cross_val_predict
# ROMANA // ENGLISH
#1.Definirea si apelarea unei functii // Defining and calling the functions
#2.Utilizarea pachetului scikit-learn // Using the package of scikit-learn
def optimise_pls_cv(X, y, n_comp, plot_components=True):
#Rulam PLS incluzand un numar variabil de componente pana la n si calculam SSE // Running the PLS including a variable number of components up to n and we calculate SSE
sse = []
component = np.arange(1, n_comp)
for i in component:
pls = PLSRegression(n_components=i)
# Cross-validare // Cross-Validation
y_cv = cross_val_predict(pls, X, y, cv=10)
sse.append(mean_squared_error(y, y_cv))
comp = 100 * (i + 1) / 40
# Procedeu pentru actualizarea statutului pe aceeasi linie // Process for updating the status on the same line of code
stdout.write("\r%d%% completat" % comp)
stdout.flush()
stdout.write("\n")
# Calculeaza si afiseaza pozitia minimului SSE // Calculate and print the minimum position of SSE
ssemin = np.argmin(sse)
print("Numar de componente sugerate: ", ssemin + 1)
stdout.write("\n")
if plot_components is True:
with plt.style.context(('ggplot')):
plt.plot(component, np.array(sse), '-v', color='blue', mfc='blue')
plt.plot(component[ssemin], np.array(sse)[ssemin], 'P', ms=10, mfc='red')
plt.xlabel('Numar de componente pentru Partial Least Squares')
plt.ylabel('SSE')
plt.title('PLS')
plt.xlim(left=-1)
plt.show()
# Defineste PLS-ul cu un numar optim de componente // Defines the PLS with an optimal number of components
pls_opt = PLSRegression(n_components=ssemin + 1)
# Fit pentru intreg setul de date // Fitting the entire dataset
pls_opt.fit(X, y)
y_c = pls_opt.predict(X)
# Cross-validare // Cross-validation
y_cv = cross_val_predict(pls_opt, X, y, cv=10)
# Calculeaza valori pentru calibrare si cross-validare // Calculate valors for calibration and cross-validation
score_c = r2_score(y, y_c)
score_cv = r2_score(y, y_cv)
# Calculeaza SSE pentru calibrare si cross validare // Calculate SSE for calibration and cross-validation
sse_c = mean_squared_error(y, y_c)
sse_cv = mean_squared_error(y, y_cv)
print('R2 calib: %5.3f' % score_c)
print('R2 CV: %5.3f' % score_cv)
print('SSE calib: %5.3f' % sse_c)
print('SSE CV: %5.3f' % sse_cv)
# Plot cu regresie si SSE // Plot with regression and SSE
rangey = max(y) - min(y)
rangex = max(y_c) - min(y_c)
# Proiecteaza o linie intre cross validare si SSE // Draws a line between cross-validation and SSE
z = np.polyfit(y, y_c, 1)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(9, 5))
ax.scatter(y_c, y, c='red', edgecolors='k')
# Plot the best fit line
ax.plot(np.polyval(z, y), y, c='blue', linewidth=1)
# Plot the ideal 1:1 line
ax.plot(y, y, color='green', linewidth=1)
plt.title('$R^{2}$ (CV): ' + str(score_cv))
plt.xlabel('PREVIZIONAT')
plt.ylabel('MASURAT')
plt.show()
return
#3.Import de fisier CSV in pandas // Importing the CVS files in pandas
AMD = pd.read_csv('AMD.csv')
print('------------1----------------')
pprint(AMD)
#4.Accesarea datelor cu loc si iloc // Accesing data with loc and iloc
print('---------2---------')
pprint(AMD.iloc[[0, 1, 2], [0, 1, 2]])
pprint(AMD.loc[1:10, ['Open']])
AMD2 = AMD.loc[0:22,['Date','Open','Close']]
print(AMD2)
#5.Tratarea valorilor lipsa // Fixing with the missing values
print('----------3-----------')
print('Exista valori NULL in CSV-ul nostru si daca da, cate: ', AMD.isnull().values.any().sum(), '\n')
#6.Utilizarea pachetelor statmodels // Using the statsmodel packages
print('----------4-----------')
target = | pd.DataFrame(AMD, columns=['High']) | pandas.DataFrame |
import pandas as pd
msg = pd.read_csv("smsspamcollection/SMSSpamCollection",
sep='\t',names=['label','text'])
# import packages
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from wordcloud import WordCloud
import matplotlib.pyplot as plt
ps=PorterStemmer()
corpus=[]
for i in range(len(msg)):
review=re.sub('[^a-zA-Z]',' ',msg['text'][i])
review=review.lower()
review=review.split()
review=[ps.stem(word) for word in review if word not in set(stopwords.words("english"))]
review= " ".join(review)
corpus.append(review)
#print(corpus)
def visualize(labels):
words= ' '
for text in msg[msg['label'] == labels]['text']:
words += text + ' '
wordcloud = WordCloud(width=600, height=400).generate(words)
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
visualize('spam')
visualize('ham')
# Bag of Words
from sklearn.feature_extraction.text import CountVectorizer
cv=CountVectorizer(max_features=3000)
X=cv.fit_transform(corpus).toarray()
#Dummy variables for spam and ham
y= | pd.get_dummies(msg['label']) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 14:01:22 2019
@author: Mortis
請先更改Excel原始檔中的第66行,把column名標註上去
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("ticks")
sns.set( font="Times New Roman")
plt.rcParams["font.family"] = "Times New Roman"
from openpyxl import load_workbook
file_name = "20180928-39184015.xlsx"
pa_number = file_name.split('-')[1].split('.')[0]
wb = load_workbook(filename=r"./VA/{}".format(file_name),
read_only=True)
ws = wb['Export 1']
sampling_rate = 50 #要按照原始檔中紀錄的Sampling Rate 來修改
# Read the cell values into a list of lists
data_rows = []
for row in ws:
data_cols = []
for cell in row:
data_cols.append(cell.value)
data_rows.append(data_cols)
df = pd.DataFrame(data_rows[65:])
colname=list(df.iloc[0])
df = df.iloc[1:]
df.columns = colname
mav_para = 1800
CH1_HbO2_mav = df.CH1_HbO2.ewm(span=mav_para, adjust=True).mean()
CH2_HbO2_mav = df.CH2_HbO2.ewm(span=mav_para, adjust=True).mean()
CH3_HbO2_mav = df.CH3_HbO2.ewm(span=mav_para, adjust=True).mean()
CH1_HHb_mav = df.CH1_HHb.ewm(span=mav_para, adjust=True).mean()
CH2_HHb_mav = df.CH2_HHb.ewm(span=mav_para, adjust=True).mean()
CH3_HHb_mav = df.CH3_HHb.ewm(span=mav_para, adjust=True).mean()
HbO2_mav_data=pd.concat([CH1_HbO2_mav, CH2_HbO2_mav, CH3_HbO2_mav],axis=1)
HbO2_average_a = pd.DataFrame(HbO2_mav_data.iloc[:,[0,2,4]].mean(1)) #如果檔名是39184015以外的,要把[0,2,4]跟[1,3,5]互換
HbO2_average_a.columns=["HbO2_average_a"]
HbO2_average_b = pd.DataFrame(HbO2_mav_data.iloc[:,[1,3,5]].mean(1)) #如果檔名是39184015以外的,要把[0,2,4]跟[1,3,5]互換
HbO2_average_b.columns=["HbO2_average_b"]
HHb_mav_data=pd.concat([CH1_HHb_mav, CH2_HHb_mav, CH3_HHb_mav],axis=1)
HHb_average_a = pd.DataFrame(HHb_mav_data.iloc[:,[0,2,4]].mean(1)) #如果檔名是39184015以外的,要把[0,2,4]跟[1,3,5]互換
HHb_average_a.columns=["HHb_average_a"]
HHb_average_b = pd.DataFrame(HHb_mav_data.iloc[:,[1,3,5]].mean(1)) #如果檔名是39184015以外的,要把[0,2,4]跟[1,3,5]互換
HHb_average_b.columns=["HHb_average_b"]
TSI_a = df.iloc[:,[2]].ewm(span=mav_para, adjust=True).mean() #如果檔名是39184015以外的,要把[2]跟[12]互換
TSI_b = df.iloc[:,[12]].ewm(span=mav_para, adjust=True).mean()
#%% Both side
sns.set_style("ticks")
#sns.set(font="Times New Roman")
plt.rcParams["font.family"] = "Times New Roman"
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(111)
minor=ax1.plot(df.loc[:,["time(ms)"]].div(1000),HbO2_average_a,alpha=1,label="$\Delta HbO_2$", color='r', linewidth=3.0)
major=ax1.plot(df.loc[:,["time(ms)"]].div(1000),HbO2_average_b,alpha=1, color='r', linewidth=2.5, linestyle = '-.')
ax1.plot(df.loc[:,["time(ms)"]].div(1000),HHb_average_a,alpha=1 ,label="$\Delta HHb$" , color='b', linewidth=3.0)
ax1.plot(df.loc[:,["time(ms)"]].div(1000),HHb_average_b,alpha=1 , color='b', linewidth=2.5, linestyle = '-.')
ax1.axvspan(0, 300, alpha=0.5, color=sns.color_palette("Paired")[0])
ax1.axvspan(301, 901, alpha=0.5, color=sns.color_palette("Paired")[1])
ax1.axvspan(901, 1501, alpha=0.5, color=sns.color_palette("Paired")[2])
ax1.axvspan(1501, 2101, alpha=0.5, color=sns.color_palette("Paired")[3])
ax1.axvspan(2101, 2701, alpha=0.5, color=sns.color_palette("Paired")[4])
ax1.axvspan(2701, 3300, alpha=0.5, color=sns.color_palette("Paired")[5])
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(["Non-cannulation Side","Cannulation Side"],loc = 'best', ncol=2)
ax1.set_xlim((0, np.max(df.loc[:,["time(ms)"]].div(1000))[0]))
#ax1.set_ylim((-8.0,4.5))
ax1.set_xlabel("Time (s)")
ax1.set_ylabel(r'$\mu$mol/L')
#leg1 = ax1.legend([minor,major],['max','min'], loc='lower right')
ax2 = ax1.twinx()
ax2.plot(df.loc[:,["time(ms)"]].div(1000),TSI_a,alpha=0.8,label=r'$TSI(\%)$',color='g', linewidth=3.0)
ax2.plot(df.loc[:,["time(ms)"]].div(1000),TSI_b,alpha=0.8,color='g', linewidth=3.0, linestyle = '-.')
ax2.set_ylabel('%',color='g')
ax2.set_ylim((35,75))
ax2.tick_params(axis='y', labelcolor='g')
#fig.suptitle("Data of patient VA Both side",size = 15)
fig.legend(loc='upper center', ncol=3,bbox_to_anchor=(0.5, -0.08), bbox_transform=ax1.transAxes)
plt.savefig("{}_NIRS_signal plot.png".format(pa_number),dpi=360)
#%% Bar
#HbO2_average_a
stage1_HbO_nc = np.mean(HbO2_average_a[301*sampling_rate:900*sampling_rate])[0]
stage2_HbO_nc = np.mean(HbO2_average_a[901*sampling_rate:1500*sampling_rate])[0]
stage3_HbO_nc = np.mean(HbO2_average_a[1501*sampling_rate:2100*sampling_rate])[0]
stage4_HbO_nc = np.mean(HbO2_average_a[2101*sampling_rate:2700*sampling_rate])[0]
stage5_HbO_nc = np.mean(HbO2_average_a[2701*sampling_rate:3300*sampling_rate])[0]
#HbO2_average_b
stage1_HbO_c = np.mean(HbO2_average_b[301*sampling_rate:900*sampling_rate])[0]
stage2_HbO_c = np.mean(HbO2_average_b[901*sampling_rate:1500*sampling_rate])[0]
stage3_HbO_c = np.mean(HbO2_average_b[1501*sampling_rate:2100*sampling_rate])[0]
stage4_HbO_c = np.mean(HbO2_average_b[2101*sampling_rate:2700*sampling_rate])[0]
stage5_HbO_c = np.mean(HbO2_average_b[2701*sampling_rate:3300*sampling_rate])[0]
#%%Merge
df_HbO2_plot_data = | pd.concat([HbO2_average_a,HbO2_average_b],axis=1) | pandas.concat |
from datetime import datetime
from typing import List
import pendulum
import pandas as pd
from backend.app.exceptions import UnprocessableEntityException
from backend.app.utils import check_is_future_date
from backend.enums import Enums
from requests import get
class ExtractorManager:
_date_initial: datetime
_date_final: datetime
_url: str
_station: str
_files: str
_period: list = []
def __init__(self, date_initial: datetime, date_final: datetime, url: str, station: str, files: str):
self.set_date_initial(date_initial)
self.set_date_final(date_final)
self.set_url(url)
self.set_station(station)
self.set_files(files)
def extract(self):
self.set_period()
all_data = []
for day in self._period:
data = get(self.create_url(day))
if data.status_code != 200:
raise UnprocessableEntityException('Estação inválida')
if data.status_code == 200 and self._files == 'Varios Arquivos':
self.save_many_files([data.json()])
else:
all_data.append(data.json())
if self._files == '1 Arquivo':
self.save_one_file(all_data)
self.reset_dates()
@staticmethod
def convert_data(all_data: list):
data_list = []
for data in all_data:
data = data['observations']
for line in data:
new_dict = {}
for key in line:
if type(line[key]) == dict:
for new_key in line[key]:
new_dict[new_key] = line[key][new_key]
else:
new_dict[key] = line[key]
data_list.append(new_dict)
return data_list
def save_one_file(self, all_data: List[dict]):
data_list = self.convert_data(all_data)
df = | pd.DataFrame(data_list) | pandas.DataFrame |
import pandas as pd
import seaborn as sns
import numpy as np
#Importing processed csv file
def load_and_process(path_to_csv_file):
datak = (pd.read_csv(path_to_csv_file)
.drop(columns =['CF','CA','SCF','SCA','Unnamed: 2'])
)
return datak
#Creating north division, by dropping all teams that were not in the division, then dropping the team name and index and creating a column for the division name.
def North_Div1(datak):
datak1 = (datak.drop(datak[datak.Team.isin([ "Arizona Coyotes", "Buffalo Sabres", "Boston Bruins", "Carolina Hurricanes", "Columbus Blue Jackets",
"Chicago Blackhawks", "Colorado Avalanche", "Dallas Stars", "Detroit Red Wings", "Florida Panthers",
"Los Angeles Kings", "Minnesota Wild", "Nashville Predators", "Pittsburgh Penguins", "San Jose Sharks", "Tampa Bay Lightning",
"St Louis Blues", "Vegas Golden Knights", "New Jersey Devils", "New York Islanders", "New York Rangers", "Philadelphia Flyers",
"Washington Capitals", "Anaheim Ducks"])].index)
.reset_index()
.drop(columns = ['Team', 'index'])
)
datak1['Division']='North'
return datak1
#Creating east division, by dropping all teams that were not in the division, then dropping the team name and index and creating a column for the division name.
def East_Div1(datak):
datak2 = (datak.drop(datak[datak.Team.isin(["Arizona Coyotes", "Carolina Hurricanes", "Columbus Blue Jackets", "Calgary Flames", "Chicago Blackhawks", "Colorado Avalanche",
"Dallas Stars", "Detroit Red Wings", "Florida Panthers", "Los Angeles Kings", "Minnesota Wild", "Nashville Predators", "San Jose Sharks",
"Tampa Bay Lightning", "St Louis Blues", "Vegas Golden Knights", "Edmonton Oilers", "Montreal Canadiens","Ottawa Senators",
"Toronto Maple Leafs", "Winnipeg Jets", "Anaheim Ducks", "Vancouver Canucks",])].index)
.reset_index()
.drop(columns = ['Team', 'index'])
)
datak2['Division']='East'
return datak2
#Creating central division, by dropping all teams that were not in the division, then dropping the team name and index and creating a column for the division name.
def Cent_Div1(datak):
datak3 = (datak.drop(datak[datak.Team.isin(["Arizona Coyotes", "Buffalo Sabres", "Boston Bruins", "Calgary Flames","Colorado Avalanche"
,"Los Angeles Kings", "Minnesota Wild","Pittsburgh Penguins", "San Jose Sharks", "St Louis Blues", "Vegas Golden Knights",
"Edmonton Oilers", "Montreal Canadiens", "New Jersey Devils", "New York Islanders",
"New York Rangers", "Ottawa Senators", "Philadelphia Flyers", "Toronto Maple Leafs",
"Winnipeg Jets", "Washington Capitals", "Anaheim Ducks", "Vancouver Canucks"])].index)
.reset_index()
.drop(columns = ['Team', 'index'])
)
datak3['Division']='Central'
return datak3
#Creating west division, by dropping all teams that were not in the division, then dropping the team name and index and creating a column for the division name.
def West_Div1(datak):
datak4 = (datak.drop(datak[datak.Team.isin(["Buffalo Sabres", "Boston Bruins", "Carolina Hurricanes",
"Columbus Blue Jackets", "Calgary Flames", "Chicago Blackhawks", "Dallas Stars", "Detroit Red Wings", "Florida Panthers",
"Nashville Predators", "Pittsburgh Penguins","Tampa Bay Lightning","Edmonton Oilers", "Montreal Canadiens",
"New Jersey Devils", "New York Islanders", "New York Rangers", "Ottawa Senators",
"Philadelphia Flyers", "Toronto Maple Leafs", "Winnipeg Jets", "Washington Capitals","Vancouver Canucks"])].index)
.reset_index()
.drop(columns = ['Team', 'index'])
)
datak4['Division']='West'
return datak4
# Creating all divisons as a concat of all of above divisions to compare.
def all_divisions_describe(datak):
datak1 = (datak.drop(datak[datak.Team.isin([ "Arizona Coyotes", "Buffalo Sabres", "Boston Bruins", "Carolina Hurricanes", "Columbus Blue Jackets",
"Chicago Blackhawks", "Colorado Avalanche", "Dallas Stars", "Detroit Red Wings", "Florida Panthers",
"Los Angeles Kings", "Minnesota Wild", "Nashville Predators", "Pittsburgh Penguins", "San Jose Sharks", "Tampa Bay Lightning",
"St Louis Blues", "Vegas Golden Knights", "New Jersey Devils", "New York Islanders", "New York Rangers", "Philadelphia Flyers",
"Washington Capitals", "Anaheim Ducks"])].index)
.reset_index()
.drop(columns = ['Team', 'index'])
)
datak1['Division']='North'
datak2 = (datak.drop(datak[datak.Team.isin(["Arizona Coyotes", "Carolina Hurricanes", "Columbus Blue Jackets", "Calgary Flames", "Chicago Blackhawks", "Colorado Avalanche",
"Dallas Stars", "Detroit Red Wings", "Florida Panthers", "Los Angeles Kings", "Minnesota Wild", "Nashville Predators", "San Jose Sharks",
"Tampa Bay Lightning", "St Louis Blues", "Vegas Golden Knights", "Edmonton Oilers", "Montreal Canadiens","Ottawa Senators",
"Toronto Maple Leafs", "Winnipeg Jets", "Anaheim Ducks", "Vancouver Canucks",])].index)
.reset_index()
.drop(columns = ['Team', 'index'])
)
datak2['Division']='East'
datak3 = (datak.drop(datak[datak.Team.isin(["Arizona Coyotes", "Buffalo Sabres", "Boston Bruins", "Calgary Flames","Colorado Avalanche"
,"Los Angeles Kings", "Minnesota Wild","Pittsburgh Penguins", "San Jose Sharks", "St Louis Blues", "Vegas Golden Knights",
"Edmonton Oilers", "Montreal Canadiens", "New Jersey Devils", "New York Islanders",
"New York Rangers", "Ottawa Senators", "Philadelphia Flyers", "Toronto Maple Leafs",
"Winnipeg Jets", "Washington Capitals", "Anaheim Ducks", "Vancouver Canucks"])].index)
.reset_index()
.drop(columns = ['Team', 'index'])
)
datak3['Division']='Central'
datak4 = (datak.drop(datak[datak.Team.isin(["Buffalo Sabres", "Boston Bruins", "Carolina Hurricanes",
"Columbus Blue Jackets", "Calgary Flames", "Chicago Blackhawks", "Dallas Stars", "Detroit Red Wings", "Florida Panthers",
"Nashville Predators", "Pittsburgh Penguins","Tampa Bay Lightning","Edmonton Oilers", "Montreal Canadiens",
"New Jersey Devils", "New York Islanders", "New York Rangers", "Ottawa Senators",
"Philadelphia Flyers", "Toronto Maple Leafs", "Winnipeg Jets", "Washington Capitals","Vancouver Canucks"])].index)
.reset_index()
.drop(columns = ['Team', 'index'])
)
datak4['Division']='West'
all_div = ( | pd.concat([datak1, datak2, datak3, datak4], axis=0) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
import pathlib
import sqlite3
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from pycytominer import feature_select
from pycytominer.cyto_utils import infer_cp_features
from utils.single_cell_utils import process_sites, normalize_sc
sys.path.append("../0.generate-profiles")
from scripts.profile_util import load_config
# In[2]:
pd.np.random.seed(1234)
# In[3]:
# Set constants
batch = "2020_07_02_Batch8"
plate = "218360"
cell_line_column = "Metadata_clone_number"
cell_lines = ["Clone A", "Clone E", "WT parental"]
feature_filter = ["Object", "Location", "Count", "Parent"]
test_split_prop = 0.15
scaler_method = "standard"
seed = 123
feature_select_opts = [
"variance_threshold",
"drop_na_columns",
"blacklist",
"drop_outliers",
]
corr_threshold = 0.8
na_cutoff = 0
# In[4]:
# Load locations of single cell files
config = pathlib.Path("../0.generate-profiles/profile_config.yaml")
pipeline, single_cell_files = load_config(config, append_sql_prefix=False, local=True)
# In[5]:
workspace_dir = pipeline["workspace_dir"]
batch_dir = pathlib.Path(workspace_dir, "backend", batch)
metadata_dir = pathlib.Path("../0.generate-profiles", "metadata", batch)
barcode_plate_map_file = pathlib.Path(metadata_dir, "barcode_platemap.csv")
barcode_plate_map_df = | pd.read_csv(barcode_plate_map_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
| assert_series_equal(p.ix[item, :, col], p[item][col]) | pandas.util.testing.assert_series_equal |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import reduce
import math
import numpy as np
import pandas as pd
from ctypes import *
# from .talib_series import LINEARREG_SLOPE
from easyquant.easydealutils.easymongo import MongoIo
import datetime
try:
import talib
except:
print('PLEASE install TALIB to call these methods')
import os
# lib = cdll.LoadLibrary("%s/%s" % (os.path.abspath("."), "talib_ext.so"))
lib = cdll.LoadLibrary("/usr/share/talib/%s" % ("talib_ext.so"))
"""
Series 类
这个是下面以DataFrame为输入的基础函数
return pd.Series format
"""
__STOCK_INFOS = pd.DataFrame()
__REALTIME_DATAS = {}
def __INITDATAS(dateStr = None):
mongo = MongoIo()
global __STOCK_INFOS, __REALTIME_DATAS
if len(__STOCK_INFOS) == 0:
__STOCK_INFOS = mongo.get_stock_info()
# STOCK_INFOS =
if dateStr == None:
dateObj = datetime.datetime.now()
else:
# datetime.datetime.strptime(st, "%Y-%m-%d %H:%M:%S"))
dateObj = datetime.datetime.strptime(dateStr, "%Y-%m-%d")
weekDay = dateObj.weekday()
if weekDay > 4:
dateObj = dateObj - datetime.timedelta(weekDay - 4)
dateStr = dateObj.strftime('%Y-%m-%d')
if dateStr not in __REALTIME_DATAS.keys():
__REALTIME_DATAS[dateStr] = mongo.get_realtime(dateStr=dateStr)
return dateStr
def __STOCK_INFO(code):
__INITDATAS()
return __STOCK_INFOS.query("code=='%s'" % code)
def __REALTIME_DATA(code, dateStr):
global __REALTIME_DATAS
dateStr = __INITDATAS(dateStr)
try:
return __REALTIME_DATAS[dateStr].query("code=='%s'" % code)
except Exception as e:
# print("__REALTIME_DATA", code, dateStr, e)
return pd.DataFrame()
def EMA(Series, N):
# return pd.Series.ewm(Series, span=N, min_periods=N - 1, adjust=True).mean()
Series = Series.fillna(0)
res = talib.EMA(Series.values, N)
return pd.Series(res, index=Series.index)
def EXPMA(Series, N):
# return pd.Series.ewm(Series, span=N, min_periods=N - 1, adjust=True).mean()
Series = Series.fillna(0)
res = talib.EMA(Series.values, N)
return | pd.Series(res, index=Series.index) | pandas.Series |
"""
The script generated required result for experiments
"""
import pandas as pd
import numpy as np
import scipy.stats
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import psycopg2 as ps
import squarify
import textblob as tb
import nltk
from collections import Counter
from joblib import dump
from sklearn import tree
from sklearn.metrics import *
from sklearn.model_selection import ShuffleSplit, GridSearchCV, train_test_split
from sklearn.tree import plot_tree
def compute_requirements_stats():
df = pd.read_excel(r'data/DataLast/dataset4.xls')
print(df.shape)
counter = Counter(df.iloc[:, 1])
print('Projects and requirements: ', counter)
print('# Projects: ', len(counter))
quit()
df2 = df.loc[(df['File'] == '0000 - cctns.xml')]['Requirement_text']
# print(df2)
word_count = 0
for req in df2.iteritems():
blob = tb.TextBlob(req[1])
# print(blob.words)
word_count += len(blob.words)
print(word_count)
def compute_smell_prevalence():
df = pd.read_excel(r'data/DataLast/dataset1kv1.xlsx')
countLine = 0
projects_name_list = list()
my_values = []
my_lables = []
numberOfTotalSmells = []
number_of_no_clean_word = []
numberOfSubjectiveSmell = []
numberOfAmbigAdjAdvSmell = []
numberOfLoopholeSmell = []
numberOfOpenendedSmell = []
numberOfSuperlativeSmell = []
numberOfComparativeSmell = []
numberOfNegativeSmell = []
numberOfPronounsSmell = []
numberOfNUncertainSmell = []
numberOfPolysemySmells = []
for index, row in df.iterrows():
smell_number = 0
SubjectiveNum = 0
AmbigAdjAdvNum = 0
LoopholeNum = 0
OpenendedNum = 0
SuperlativeNum = 0
ComparativeNum = 0
NegativeNum = 0
PronounsNum = 0
UncertainNum = 0
PolysemyNum = 0
# Modify project name:
if row['File'] == '2007-ertms.xml':
projects_name_list.append('ERTMS/ETCS')
elif row['File'] == '0000 - cctns.xml':
projects_name_list.append('CCTNS')
elif row['File'] == '2007-eirene_fun_7-2.xml':
projects_name_list.append('EIRENE')
elif row['File'] == '2008 - keepass.xml':
projects_name_list.append('KeePass')
elif row['File'] == '0000 - gamma j.xml':
projects_name_list.append('Gamma-J')
elif row['File'] == 'NEW - 2008 - peering.xml':
projects_name_list.append('Peering')
else:
projects_name_list.append('not_set')
countLine = countLine + 1
my_values.append(len(row['Requirement_text'].split(" ")))
my_lables.append('R' + str(countLine))
if row['Subjective_lang.'] != '-':
subjectiveNum = len(row['Subjective_lang.'].split("*"))
else:
subjectiveNum = 0
smell_number += subjectiveNum
numberOfSubjectiveSmell.append(subjectiveNum)
if row['Ambiguous_adv._adj.'] != '-':
AmbigAdjAdvNum = len(row['Ambiguous_adv._adj.'].split("*"))
else:
AmbigAdjAdvNum = 0
smell_number += AmbigAdjAdvNum
numberOfAmbigAdjAdvSmell.append(AmbigAdjAdvNum)
if row['Loophole'] != '-':
LoopholeNum = len(row['Loophole'].split("*"))
else:
LoopholeNum = 0
smell_number += LoopholeNum
numberOfLoopholeSmell.append(LoopholeNum)
if row['Nonverifiable_term'] != '-':
OpenendedNum = len(row['Nonverifiable_term'].split("*"))
else:
OpenendedNum = 0
smell_number += OpenendedNum
numberOfOpenendedSmell.append(OpenendedNum)
if row['Superlative'] != '-':
SuperlativeNum = len(row['Superlative'].split("*"))
else:
SuperlativeNum = 0
smell_number += SuperlativeNum
numberOfSuperlativeSmell.append(SuperlativeNum)
if row['Comparative'] != '-':
ComparativeNum = len(row['Comparative'].split("*"))
else:
ComparativeNum = 0
smell_number += ComparativeNum
numberOfComparativeSmell.append(ComparativeNum)
if row['Negative'] != '-':
NegativeNum = len(row['Negative'].split("*"))
else:
NegativeNum = 0
smell_number += NegativeNum
numberOfNegativeSmell.append(NegativeNum)
if row['Vague_pron.'] != '-':
PronounsNum = len(row['Vague_pron.'].split("*"))
else:
PronounsNum = 0
smell_number += PronounsNum
numberOfPronounsSmell.append(PronounsNum)
if row['Uncertain_verb'] != '-':
UncertainNum = len(row['Uncertain_verb'].split("*"))
else:
UncertainNum = 0
smell_number += UncertainNum
numberOfNUncertainSmell.append(UncertainNum)
if row['Polysemy'] != '-':
PolysemyNum = len(set(row['Polysemy'].split("*")))
else:
PolysemyNum = 0
smell_number += PolysemyNum
numberOfPolysemySmells.append(PolysemyNum)
blob = tb.TextBlob(row['Requirement_text'])
all_words = len(blob.words)
number_of_no_clean_word.append(all_words - smell_number)
numberOfTotalSmells.append(smell_number)
print('numberOfTotalSmells', numberOfTotalSmells)
print('numberOfSubjectiveSmell', numberOfSubjectiveSmell)
print('numberOfAmbigAdjAdvSmell', numberOfAmbigAdjAdvSmell)
print('numberOfLoopholeSmell', numberOfLoopholeSmell)
print('numberOfOpenendedSmell', numberOfOpenendedSmell)
print('numberOfSuperlativeSmell', numberOfSuperlativeSmell)
print('numberOfComparativeSmell', numberOfComparativeSmell)
print('numberOfNegativeSmell', numberOfNegativeSmell)
print('numberOfPronounsSmell', numberOfPronounsSmell)
print('numberOfNUncertainSmell', numberOfNUncertainSmell)
print('numberOfPolysemySmells', numberOfPolysemySmells)
df2 = pd.DataFrame()
df2['ReqId'] = my_lables
df2['ReqTxt'] = df['Requirement_text']
df2['Project'] = projects_name_list
df2['Words'] = my_values
df2['SmellyWords'] = numberOfTotalSmells
df2['CleanWords'] = number_of_no_clean_word
df2['Subjective'] = numberOfSubjectiveSmell
df2['Ambiguous'] = numberOfAmbigAdjAdvSmell
df2['NonVerifiable'] = numberOfOpenendedSmell
df2['Superlative'] = numberOfSuperlativeSmell
df2['Comparative'] = numberOfComparativeSmell
df2['Negative'] = numberOfNegativeSmell
df2['VaguePron.'] = numberOfPronounsSmell
df2['UncertainVerb'] = numberOfNUncertainSmell
df2['Polysemy'] = numberOfPolysemySmells
df2.to_excel(r'data/DataLast/dataset1kv1_smell_frequency.xlsx')
"""
data = [numberOfSubjectiveSmell, numberOfAmbigAdjAdvSmell,
numberOfOpenendedSmell, numberOfSuperlativeSmell, numberOfComparativeSmell, numberOfNegativeSmell,
numberOfPronounsSmell, numberOfNUncertainSmell, numberOfPolysemySmells]
# Create a figure instance
fig = plt.figure(1, figsize=(15, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data)
ax.set_xticklabels(['Subjective', 'Ambig Adj./Adv.',
'Non-verifiable', 'Superlative', 'Comparative',
'Negative ', 'Vague pronoun.', 'Uncertain verb', 'Polysemy'], fontsize=10)
plt.show()
"""
df2.drop(columns=['Words', 'SmellyWords', 'CleanWords'], inplace=True)
df3 = | pd.melt(df2, id_vars=['ReqId', 'ReqTxt', 'Project', ], var_name='Type', value_name='Number') | pandas.melt |
import numpy as np
import pandas as pd
from matplotlib import *
# .........................Series.......................#
x1 = np.array([1, 2, 3, 4])
s = pd.Series(x1, index=[1, 2, 3, 4])
print(s)
# .......................DataFrame......................#
x2 = np.array([1, 2, 3, 4, 5, 6])
s = pd.DataFrame(x2)
print(s)
x3 = np.array([['Alex', 10], ['Nishit', 21], ['Aman', 22]])
s = pd.DataFrame(x3, columns=['Name', 'Age'])
print(s)
data = {'Name': ['Tom', 'Jack', 'Steve', 'Ricky'], 'Age': [28, 34, 29, 42]}
df = pd.DataFrame(data, index=['rank1', 'rank2', 'rank3', 'rank4'])
print(df)
data = [{'a': 1, 'b': 2}, {'a': 3, 'b': 4, 'c': 5}]
df = pd.DataFrame(data)
print(df)
d = {'one': pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two': pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
print(df)
# ....Adding New column......#
data = {'one': pd.Series([1, 2, 3, 4], index=[1, 2, 3, 4]),
'two': pd.Series([1, 2, 3], index=[1, 2, 3])}
df = pd.DataFrame(data)
print(df)
df['three'] = pd.Series([1, 2], index=[1, 2])
print(df)
# ......Deleting a column......#
data = {'one': pd.Series([1, 2, 3, 4], index=[1, 2, 3, 4]),
'two': pd.Series([1, 2, 3], index=[1, 2, 3]),
'three': pd.Series([1, 1], index=[1, 2])
}
df = pd.DataFrame(data)
print(df)
del df['one']
print(df)
df.pop('two')
print(df)
# ......Selecting a particular Row............#
data = {'one': pd.Series([1, 2, 3, 4], index=[1, 2, 3, 4]),
'two': pd.Series([1, 2, 3], index=[1, 2, 3]),
'three': pd.Series([1, 1], index=[1, 2])
}
df = pd.DataFrame(data)
print(df.loc[2])
print(df[1:4])
# .........Addition of Row.................#
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['a', 'b'])
df = df.append(df2)
print(df.head())
# ........Deleting a Row..................#
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['a', 'b'])
df = df.append(df2)
# Drop rows with label 0
df = df.drop(0)
print(df)
# ..........................Functions.....................................#
d = {'Name': pd.Series(['Tom', 'James', 'Ricky', 'Vin', 'Steve', 'Smith', 'Jack']),
'Age': pd.Series([25, 26, 25, 23, 30, 29, 23]),
'Rating': pd.Series([4.23, 3.24, 3.98, 2.56, 3.20, 4.6, 3.8])}
df = pd.DataFrame(d)
print("The transpose of the data series is:")
print(df.T)
print(df.shape)
print(df.size)
print(df.values)
# .........................Statistics.......................................#
d = {'Name': pd.Series(['Tom', 'James', 'Ricky', 'Vin', 'Steve', 'Smith', 'Jack',
'Lee', 'David', 'Gasper', 'Betina', 'Andres']),
'Age': pd.Series([25, 26, 25, 23, 30, 29, 23, 34, 40, 30, 51, 46]),
'Rating': pd.Series([4.23, 3.24, 3.98, 2.56, 3.20, 4.6, 3.8, 3.78, 2.98, 4.80, 4.10, 3.65])
}
df = pd.DataFrame(d)
print(df.sum())
d = {'Name': pd.Series(['Tom', 'James', 'Ricky', 'Vin', 'Steve', 'Smith', 'Jack',
'Lee', 'David', 'Gasper', 'Betina', 'Andres']),
'Age': pd.Series([25, 26, 25, 23, 30, 29, 23, 34, 40, 30, 51, 46]),
'Rating': pd.Series([4.23, 3.24, 3.98, 2.56, 3.20, 4.6, 3.8, 3.78, 2.98, 4.80, 4.10, 3.65])
}
df = pd.DataFrame(d)
print(df.describe(include='all'))
# .......................Sorting..........................................#
# Using the sort_index() method, by passing the axis arguments and the order of sorting,
# DataFrame can be sorted. By default, sorting is done on row labels in ascending order.
unsorted_df = pd.DataFrame(np.random.randn(10, 2), index=[1, 4, 6, 2, 3, 5, 9, 8, 0, 7], columns=['col2', 'col1'])
sorted_df = unsorted_df.sort_index()
print(sorted_df)
sorted_df = unsorted_df.sort_index(ascending=False)
print(sorted_df)
# By passing the axis argument with a value 0 or 1,
# the sorting can be done on the column labels. By default, axis=0, sort by row.
# Let us consider the following example to understand the same.
unsorted_df = pd.DataFrame(np.random.randn(10, 2), index=[1, 4, 6, 2, 3, 5, 9, 8, 0, 7], columns=['col2', 'col1'])
sorted_df = unsorted_df.sort_index(axis=1)
print(sorted_df)
unsorted_df = | pd.DataFrame({'col1': [2, 1, 1, 1], 'col2': [1, 3, 2, 4]}) | pandas.DataFrame |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
import datetime
from bs4 import BeautifulSoup as bs
import pandas as pd
import os
import json
import requests
import io
url11='https://www.boxofficemojo.com/weekend/by-year/2019/?area=AU'
url12='https://www.boxofficemojo.com/weekend/by-year/2020/?area=AU'
url21='https://www.boxofficemojo.com/weekend/by-year/2019/?area=DE'
url22='https://www.boxofficemojo.com/weekend/by-year/2020/?area=DE'
url31='https://www.boxofficemojo.com/weekend/by-year/2019/?area=JP'
url32='https://www.boxofficemojo.com/weekend/by-year/2020/?area=JP'
url41='https://www.boxofficemojo.com/weekend/by-year/2019/'
url42='https://www.boxofficemojo.com/weekend/by-year/2020/'
#Australia
dates=[]
dfs1=pd.read_html(url11)
dfs2=pd.read_html(url12)
df11=pd.DataFrame()
df12=pd.DataFrame()
df21=pd.DataFrame()
df22=pd.DataFrame()
total1=pd.DataFrame()
df110=dfs1[0]['Overall Gross'][29::-1]
df12=dfs1[0]['Dates'][29::-1]
df210=dfs2[0]['Overall Gross'][:0:-1].replace(',','')
df22=dfs2[0]['Dates'][:0:-1]
k = []
for i in df110:
k.append(int((i.replace('$','').replace(',',''))))
df11['Overall Gross']=k
k = []
for i in df210:
k.append(int((i.replace('$','').replace(',',''))))
df21['Overall Gross']=k
for i in range(0,42):
dates.append((datetime.datetime.strptime('2019-06-06','%Y-%m-%d')+datetime.timedelta(days=7*i)).date())
dates.append('2020-03-28')
dates.append('2020-06-04')
total1['Dates']=dates
total1['Overall Gross']=pd.concat([df11,df21],ignore_index=True)
print(total1)
total1.to_csv(r'C:/Users/USER/Desktop/資訊/鄭恆安/csv/Australia.csv',encoding='big5',index=False)
#Germany
dates=[]
dfs1=pd.read_html(url21)
dfs2=pd.read_html(url22)
df11=pd.DataFrame()
df12=pd.DataFrame()
df21= | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import geopandas as gpd
import pygeos
from tqdm import tqdm
from rasterstats import zonal_stats
import pyproj
import warnings
warnings.filterwarnings("ignore")
from multiprocessing import Pool,cpu_count
def reproject(geometries):
#Find crs of current df and arbitrary point(lat,lon) for new crs
current_crs="epsg:4326"
#The commented out crs does not work in all cases
#current_crs = [*network.edges.crs.values()]
#current_crs = str(current_crs[0])
geometry = geometries[0]
lat = pygeos.get_y(pygeos.centroid(geometry))
lon = pygeos.get_x(pygeos.centroid(geometry))
# formula below based on :https://gis.stackexchange.com/a/190209/80697
approximate_crs = "epsg:3035"# + str(int(32700-np.round((45+lat)/90,0)*100+np.round((183+lon)/6,0)))
#from pygeos/issues/95
coords = pygeos.get_coordinates(geometries)
transformer=pyproj.Transformer.from_crs(current_crs, approximate_crs,always_xy=True)
new_coords = transformer.transform(coords[:, 0], coords[:, 1])
result = pygeos.set_coordinates(geometries, np.array(new_coords).T)
return result
def return_all(x):
return x
def intersect_country(country):
data_path = r'/scistor/ivm/data_catalogue/open_street_map/' #os.path.join('C:\\','Data')
if not os.path.exists(os.path.join(data_path,'EU_flooded_road_networks','{}-edges.feather'.format(country))):
flood_maps_path = os.path.join(data_path,'floodMaps_Europe_2018_mergedshifted')
flood_maps = [os.path.join(flood_maps_path,x) for x in os.listdir(flood_maps_path)]
aoi_maps_path = os.path.join(data_path,'floodMaps_Europe_2018_AreaofInfluence_shifted')
aoi_maps = [os.path.join(aoi_maps_path,x) for x in os.listdir(aoi_maps_path)]
road_network = os.path.join(data_path,'road_networks','{}-edges.feather'.format(country))
road_df = | pd.read_feather(road_network) | pandas.read_feather |
import os
import numpy as np
import pandas as pd
import pytest
from conceptnet5.uri import is_term
from conceptnet5.vectors import get_vector
from conceptnet5.vectors.transforms import (
l1_normalize_columns,
l2_normalize_rows,
make_big_frame,
make_small_frame,
shrink_and_sort,
standardize_row_labels,
)
from conceptnet5.vectors.query import VectorSpaceWrapper
@pytest.fixture
def simple_frame():
data = [
[4, 4, 4],
[1, 1, 1],
[1, 2, 10],
[3, 3, 4],
[2, 3, 4],
[2, 3, 5],
[7, 2, 7],
[3, 8, 2],
]
index = [
'island',
'Island',
'cat',
'figure',
'figure skating',
'figure skater',
'thing',
'17',
]
return pd.DataFrame(data=data, index=index)
@pytest.fixture
def multi_ling_frame():
data = [[8, 10, 3], [4, 5, 6], [4, 4, 5], [10, 6, 12], [10, 7, 11], [20, 20, 7]]
index = [
'/c/pl/kombinacja',
'/c/en/ski_jumping',
'/c/en/nordic_combined',
'/c/en/present',
'/c/en/gift',
'/c/en/quiz',
]
return pd.DataFrame(data=data, index=index)
def test_get_vector(simple_frame):
assert get_vector(simple_frame, '/c/en/cat').equals(
get_vector(simple_frame, 'cat', 'en')
)
def test_standardize_row_labels(simple_frame):
vec1 = simple_frame.loc['island']
vec2 = simple_frame.loc['Island']
vec3 = simple_frame.loc['thing']
standardized_vectors = standardize_row_labels(simple_frame)
# Check if all labels are terms
assert all(is_term(label) for label in standardized_vectors.index)
# Check if all terms standardized to the same concept are merged
assert standardized_vectors.index.is_unique
assert '/c/en/Island' not in standardized_vectors.index
assert '/c/en/island' in standardized_vectors.index
assert '/c/en/thing' in standardized_vectors.index
assert standardized_vectors.loc['/c/en/island'].equals( | pd.Series([3.0, 3.0, 3.0]) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from systrade.trading.brokers import PaperBroker
T_START = pd.to_datetime('2019/07/10-09:30:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
T_END = pd.to_datetime('2019/07/10-10:00:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
TIMEINDEX = pd.date_range(start=T_START,end=T_END,freq='1min')
DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX)) ,
'tick1':np.arange(len(TIMEINDEX)-1,-1,-1)},
index=TIMEINDEX)
# DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX))},
# index=TIMEINDEX)
class TestPaperBroker:
def test_init(self):
testseries = pd.Series(np.arange(10))
with pytest.raises(TypeError):
broker = PaperBroker(testseries)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,slippage_time=1.0)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,transaction_cost=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,transaction_cost=-0.5)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,spread_pct=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=-0.5)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=200)
def test_next_extant_time(self):
broker = PaperBroker(DATA_DF)
t_get = pd.to_datetime('2019/07/10-09:35:05:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t_out = broker.next_extant_time(t_get)
t_expect = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
assert t_out==t_expect
t_get = pd.to_datetime('2019/07/10-11:35:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
t_out = broker.next_extant_time(t_get)
def test_get_timeindex_subset(self):
broker = PaperBroker(DATA_DF)
t0 = pd.to_datetime('2019/07/10-09:29:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t1 = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
tind = broker.get_timeindex_subset(t0,t1)
t0 = pd.to_datetime('2019/07/10-09:34:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t1 = pd.to_datetime('2019/07/10-11:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
tind = broker.get_timeindex_subset(t0,t1)
with pytest.raises(TypeError):
tind = broker.get_timeindex_subset(0,t1)
with pytest.raises(TypeError):
tind = broker.get_timeindex_subset(t0,1)
t1 = | pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f') | pandas.to_datetime |
import os
import sys
import time
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import seaborn as sns
from src.utils import time_my_func
from src.obtain import run_on_bash, get_file_info
from src.obtain import connect_to_db, load_file_to_db, print_table_names
from src.scrub import import_filter_df, get_target_df
from src.scrub import drop_zv, drop_nzv, drop_missings, remove_zv_missings
from src.scrub import make_dummies, create_dummified_df
from src.scrub import compress_numeric, clip_categorical
from src.scrub import backup_df
path_raw = "data/raw/gravity_contact_20180406.csv"
path_clean = "data/raw/clean_contact.csv"
path_clean_db = "data/interim/clean.db"
# --- Declare Helper Objects ---
dict_replace_1 = {}
replace_spaces = lambda i: "_".join([x.lower().strip() for x in i.split()])
def get_x_from_y():
"""
"""
pass
# --- Declare Data Processing Functions ---
@time_my_func
def engineer_features(df):
"""
"""
print("Scrubbing Cell Description")
num_items_cellDescr = df['CELL_DESCRIPTION'].map(lambda i: len(str(i).split("|")))
indexes_to_drop = \
(num_items_cellDescr
.where(lambda i: i != 9)
.dropna()
.index
.tolist())
df.drop(indexes_to_drop, inplace=True)
dict_replace_cellDescr = {
k:v for k, v in zip(
df['CELL_DESCRIPTION'].drop_duplicates().values,
df['CELL_DESCRIPTION'].drop_duplicates().map(lambda i: Series(i.split("|")).to_dict()).values
)
}
df_cellDescr = DataFrame(df['CELL_DESCRIPTION'].map(lambda i: dict_replace_cellDescr.get(i, None)).tolist())
df.drop('CELL_DESCRIPTION', axis=1, inplace=True)
cols_df_cellDescr = {
0: 'CAMPAIGN_BRAND_CDS',
1: 'CAMPAIGN_STATUS_CDS',
2: 'CAMPAIGN_TYPE_CDS',
3: 'CAMPAIGN_CONTENT_1_CDS',
4: 'CAMPAIGN_CONTENT_2_CDS',
5: 'CAMPAIGN_CONTENT_3_CDS'
}
df_cellDescr.drop(range(6, 9), axis=1, inplace=True)
df_cellDescr.rename(columns=cols_df_cellDescr, inplace=True)
print("Creating Campaign Brand")
if 'CAMPAIGN_BRAND' in df.columns:
df.drop('CAMPAIGN_BRAND', axis=1, inplace=True)
else:
pass
df.loc[:, 'CAMPAIGN_BRAND'] = df_cellDescr['CAMPAIGN_BRAND_CDS'].values
print("Creating Campaign Status")
df.loc[:, 'CAMPAIGN_STATUS'] = df_cellDescr['CAMPAIGN_STATUS_CDS'].values
print("Creating Campaign Type")
dict_replace_CampaignType = {
"00": "Welcome_Email",
"01": "Email_w_ItemRaffle",
"02": "Event_Email_wo_Item",
"03": "Event_Email_w_Item",
"04": "Email_w_Pack",
"05": "Email_w_eVoucher",
"06": "Email_wo_Incentive",
"07": "SMS_w_eVoucher",
"08": "SMS_Info",
"09": "SMS_w_REG_Code",
"10": "Postal_Mail",
"11": "Pack_Mail",
"12": "Unknown",
"13": "Postal_Mail_w_eVoucher",
"14": "Postal_Mail_w_item",
"15": "Postal_Mail_w_REG_Code",
"16": "Email_w_Everything"
}
df.loc[:, 'CAMPAIGN_TYPE'] = \
(df_cellDescr['CAMPAIGN_TYPE_CDS']
.fillna('_missing_')
.map(lambda i: str(i).zfill(2))
.replace(dict_replace_CampaignType)
.pipe(clip_categorical, COVERAGE=0.99)
.values
)
print("Creating Campaign Content")
dict_replace_campaign_content = {
'Other': 'Other',
'day_00': 'day_00',
'ipsos': 'ipsos',
'ipsos_panel': 'ipsos',
'iqos_national': 'iqos_national',
'mgm_march_transition': 'mgm',
'mgm_spring_last_march_push': 'mgm',
'ob01_better2018_care': 'ob01_betterCare',
'ob01_betterstories_2018_care': 'ob01_betterCare',
'personicx_main_accessoires': 'personicx',
'pr_amplification_newsarticle': 'pr_amplification',
'valentines_day_2018': 'valentines_day',
'valentines_day_white_mail_evoucher': 'valentines_day',
'valentinesday_pack_mail': 'valentines_day'
}
df.loc[:, 'CAMPAIGN_CONTENT'] = \
(df_cellDescr['CAMPAIGN_CONTENT_1_CDS']
.map(lambda i: i.strip().lower())
.pipe(clip_categorical, COVERAGE=0.88)
.replace(dict_replace_campaign_content)
.values
)
del df_cellDescr
print("Scrubbing Channel")
df.loc[:, 'CHANNEL'] = \
(df['CHANNEL']
.map(replace_spaces)
.pipe(clip_categorical)
.values
)
df.drop(['CONTACT_HISTORY_ID'], axis=1, inplace=True)
return df
def aggregate_df(df, df_y, cols_flags):
"""
"""
ckey = df.CONSUMER_KEY.sample(1).iloc[0]
try:
_, conversion_measure, date_survey = df_y.query("CONSUMER_KEY == {}".format(ckey)).values[0]
dfrp = df.query("CONSUMER_KEY == {}".format(ckey))
dfrp = dfrp[dfrp.SELECTION_DATE <= date_survey]
s1 = dfrp[cols_flags].mean()
if len(s1) > 1:
pass
else:
s1 = Series(0, index=flag_cols)
weekend_responses = \
(df
.SELECTION_DATE
.dt.strftime("%a")
.value_counts()
.reindex(['Sat', 'Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri'])
)
s2 = Series({
'num_contacts': dfrp.shape[0],
'num_months_active_contacts': dfrp.SELECTION_DATE.dt.strftime("%b_%Y").nunique(),
'num_days_bw_lastContact_survey': (date_survey - dfrp.SELECTION_DATE.max())/np.timedelta64(1, 'D'),
'perc_contacts_weekend': weekend_responses.loc[['Sat', 'Sun']].sum()/weekend_responses.sum(),
'y': conversion_measure
})
return | pd.concat([s1, s2]) | pandas.concat |
"""
@author: <NAME>
"""
from gensim.models import KeyedVectors
import xml.etree.ElementTree as ET
import os
import re
import numpy as np
import pandas as pd
from argparse import ArgumentParser
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
parser = ArgumentParser()
parser.add_argument("--dataDir", type=str, default="./data/Train/Source", help="directory containing raw data")
parser.add_argument("--outDir", type=str, default="./datasets", help="directory to store dataset with features")
parser.add_argument('--test', action='store_true', help="indicator for test data")
parser.add_argument('--kl', type=int, default=2, help="number of words forming left context of focus word")
parser.add_argument('--kr', type=int, default=2, help="number of words forming right context of focus word")
class PSDDatasetGenerator:
"""Class to generate datasets from raw data for Preposition Sense Disambiguation"""
def __init__(self, data_dir):
print("Loading word2vec model...")
self.model = KeyedVectors.load_word2vec_format("./GoogleNews-vectors-negative300.bin", binary=True)
print("Loading TPP mappings...")
tpp_mappings_df = | pd.read_csv("./data/Tratz Variations/mappings_for_tpp_data", sep="\t", header=None, names=["id", "prep_sense"]) | pandas.read_csv |
# Copyright 2019 <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Plot Service will make use of appropriately decorated functions in this module.
import datetime
import logging
import re
import time
from collections import namedtuple
from enum import auto
from numbers import Real
from dateutil import tz
import cachetools.func
import numpy as np
import pandas as pd
from pandas import Series
from pandas.tseries.holiday import Holiday, AbstractHolidayCalendar, USMemorialDay, USLaborDay, USThanksgivingDay, \
nearest_workday
from gs_quant.api.gs.assets import GsIdType
from gs_quant.api.gs.data import GsDataApi
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.fields import Fields
from gs_quant.datetime.gscalendar import GsCalendar
from gs_quant.datetime.point import relative_days_add
from gs_quant.errors import MqTypeError, MqValueError
from gs_quant.markets.securities import *
from gs_quant.markets.securities import Asset, AssetIdentifier, SecurityMaster
from gs_quant.target.common import AssetClass, FieldFilterMap, AssetType, Currency
from gs_quant.timeseries.helper import log_return, plot_measure
GENERIC_DATE = Union[datetime.date, str]
TD_ONE = datetime.timedelta(days=1)
_logger = logging.getLogger(__name__)
MeasureDependency: namedtuple = namedtuple("MeasureDependency", ["id_provider", "query_type"])
# TODO: get NERC Calendar from SecDB
class NercCalendar(AbstractHolidayCalendar):
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def _to_fx_strikes(strikes):
out = []
for strike in strikes:
if strike == 50:
out.append('ATMS')
elif strike < 50:
out.append(f'{round(strike)}DC')
else:
out.append(f'{round(abs(100 - strike))}DP')
return out
class SkewReference(Enum):
DELTA = 'delta'
NORMALIZED = 'normalized'
SPOT = 'spot'
FORWARD = 'forward'
class VolReference(Enum):
DELTA_CALL = 'delta_call'
DELTA_PUT = 'delta_put'
DELTA_NEUTRAL = 'delta_neutral'
NORMALIZED = 'normalized'
SPOT = 'spot'
FORWARD = 'forward'
class VolSmileReference(Enum):
SPOT = 'spot'
FORWARD = 'forward'
class EdrDataReference(Enum):
DELTA_CALL = 'delta_call'
DELTA_PUT = 'delta_put'
FORWARD = 'forward'
class ForeCastHorizon(Enum):
THREE_MONTH = '3m'
SIX_MONTH = '6m'
ONE_YEAR = '1y'
EOY1 = 'EOY1'
EOY2 = 'EOY2'
EOY3 = 'EOY3'
EOY4 = 'EOY4'
class BenchmarkType(Enum):
LIBOR = 'LIBOR'
EURIBOR = 'EURIBOR'
STIBOR = 'STIBOR'
OIS = 'OIS'
class RatesConversionType(Enum):
DEFAULT_BENCHMARK_RATE = auto()
INFLATION_BENCHMARK_RATE = auto()
CROSS_CURRENCY_BASIS = auto()
CURRENCY_TO_DEFAULT_RATE_BENCHMARK = {
'USD': 'USD-LIBOR-BBA',
'EUR': 'EUR-EURIBOR-Telerate',
'GBP': 'GBP-LIBOR-BBA',
'JPY': 'JPY-LIBOR-BBA'
}
CURRENCY_TO_INFLATION_RATE_BENCHMARK = {
'GBP': 'CPI-UKRPI',
'EUR': 'CPI-CPXTEMU'
}
CROSS_TO_CROSS_CURRENCY_BASIS = {
'JPYUSD': 'USD-3m/JPY-3m',
'USDJPY': 'USD-3m/JPY-3m',
'USDEUR': 'EUR-3m/USD-3m',
'EURUSD': 'EUR-3m/USD-3m',
'USDGBP': 'GBP-3m/USD-3m',
'GBPUSD': 'GBP-3m/USD-3m'
}
def cross_stored_direction_for_fx_vol(asset_id: str) -> str:
result_id = asset_id
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
if asset.asset_class is AssetClass.FX:
bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
if bbid is not None:
legit_usd_cross = str.startswith(bbid, "USD") and not str.endswith(bbid, ("EUR", "GBP", "NZD", "AUD"))
legit_eur_cross = str.startswith(bbid, "EUR")
legit_jpy_cross = str.endswith(bbid, "JPY") and not str.startswith(bbid, ("KRW", "IDR", "CLP", "COP"))
odd_cross = bbid in ("EURUSD", "GBPUSD", "NZDUSD", "AUDUSD", "JPYKRW", "JPYIDR", "JPYCLP", "JPYCOP")
if not legit_usd_cross and not legit_eur_cross and not legit_jpy_cross and not odd_cross:
cross = bbid[3:] + bbid[:3]
cross_asset = SecurityMaster.get_asset(cross, AssetIdentifier.BLOOMBERG_ID)
result_id = cross_asset.get_marquee_id()
except TypeError:
result_id = asset_id
return result_id
def cross_to_usd_based_cross(asset_id: str) -> str:
result_id = asset_id
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
if asset.asset_class is AssetClass.FX:
bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
if bbid is not None and not str.startswith(bbid, "USD"):
cross = bbid[3:] + bbid[:3]
cross_asset = SecurityMaster.get_asset(cross, AssetIdentifier.BLOOMBERG_ID)
result_id = cross_asset.get_marquee_id()
except TypeError:
result_id = asset_id
return result_id
def currency_to_default_benchmark_rate(asset_id: str) -> str:
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
result = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
except TypeError:
result = asset_id
return result
def currency_to_inflation_benchmark_rate(asset_id: str) -> str:
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
result = convert_asset_for_rates_data_set(asset, RatesConversionType.INFLATION_BENCHMARK_RATE)
except TypeError:
result = asset_id
return result
def cross_to_basis(asset_id: str) -> str:
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
result = convert_asset_for_rates_data_set(asset, RatesConversionType.CROSS_CURRENCY_BASIS)
except TypeError:
result = asset_id
return result
def convert_asset_for_rates_data_set(from_asset: Asset, c_type: RatesConversionType) -> str:
try:
bbid = from_asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
if bbid is None:
return from_asset.get_marquee_id()
if c_type is RatesConversionType.DEFAULT_BENCHMARK_RATE:
to_asset = CURRENCY_TO_DEFAULT_RATE_BENCHMARK[bbid]
elif c_type is RatesConversionType.INFLATION_BENCHMARK_RATE:
to_asset = CURRENCY_TO_INFLATION_RATE_BENCHMARK[bbid]
else:
to_asset = CROSS_TO_CROSS_CURRENCY_BASIS[bbid]
return GsAssetApi.map_identifiers(GsIdType.mdapi, GsIdType.id, [to_asset])[to_asset]
except KeyError:
logging.info(f'Unsupported currency or cross ${bbid}')
raise from_asset.get_marquee_id()
def _get_custom_bd(exchange):
from pandas.tseries.offsets import CustomBusinessDay
calendar = GsCalendar.get(exchange).business_day_calendar()
return CustomBusinessDay(calendar=calendar)
@log_return(_logger, 'trying pricing dates')
def _range_from_pricing_date(exchange, pricing_date: Optional[GENERIC_DATE] = None):
if isinstance(pricing_date, datetime.date):
return pricing_date, pricing_date
today = pd.Timestamp.today().normalize()
if pricing_date is None:
t1 = today - _get_custom_bd(exchange)
return t1, t1
assert isinstance(pricing_date, str)
matcher = re.fullmatch('(\\d+)b', pricing_date)
if matcher:
start = end = today - _get_custom_bd(exchange) * int(matcher.group(1))
else:
end = today - datetime.timedelta(days=relative_days_add(pricing_date, True))
start = end - _get_custom_bd(exchange)
return start, end
def _to_offset(tenor: str) -> pd.DateOffset:
import re
matcher = re.fullmatch('(\\d+)([dwmy])', tenor)
if not matcher:
raise ValueError('invalid tenor ' + tenor)
ab = matcher.group(2)
if ab == 'd':
name = 'days'
elif ab == 'w':
name = 'weeks'
elif ab == 'm':
name = 'months'
else:
assert ab == 'y'
name = 'years'
kwarg = {name: int(matcher.group(1))}
return pd.DateOffset(**kwarg)
def _market_data_timed(q):
start = time.perf_counter()
df = GsDataApi.get_market_data(q)
_logger.debug('market data query ran in %.3f ms', (time.perf_counter() - start) * 1000)
return df
@plot_measure((AssetClass.FX, AssetClass.Equity), None, [MeasureDependency(
id_provider=cross_stored_direction_for_fx_vol, query_type=QueryType.IMPLIED_VOLATILITY)])
def skew(asset: Asset, tenor: str, strike_reference: SkewReference, distance: Real, *, location: str = 'NYC',
source: str = None, real_time: bool = False) -> Series:
"""
Difference in implied volatility of equidistant out-of-the-money put and call options.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level (for equities)
:param distance: distance from at-the-money option
:param location: location at which a price fixing has been taken (for FX assets)
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: skew curve
"""
if real_time:
raise MqValueError('real-time skew not supported')
if strike_reference in (SkewReference.DELTA, None):
b = 50
elif strike_reference == SkewReference.NORMALIZED:
b = 0
else:
b = 100
kwargs = {}
if strike_reference in (SkewReference.DELTA, None):
# using delta call strikes so X DP is represented as (100 - X) DC
q_strikes = [100 - distance, distance, b]
else:
q_strikes = [b - distance, b + distance, b]
asset_id = asset.get_marquee_id()
if asset.asset_class == AssetClass.FX:
asset_id = cross_stored_direction_for_fx_vol(asset_id)
q_strikes = _to_fx_strikes(q_strikes)
kwargs['location'] = location
column = 'deltaStrike' # should use SkewReference.DELTA for FX
else:
assert asset.asset_class == AssetClass.Equity
if not strike_reference:
raise MqTypeError('strike reference required for equities')
if strike_reference != SkewReference.NORMALIZED:
q_strikes = [x / 100 for x in q_strikes]
kwargs['strikeReference'] = strike_reference.value
column = 'relativeStrike'
kwargs[column] = q_strikes
_logger.debug('where tenor=%s and %s', tenor, kwargs)
where = FieldFilterMap(tenor=tenor, **kwargs)
q = GsDataApi.build_market_data_query([asset_id], QueryType.IMPLIED_VOLATILITY, where=where, source=source)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return pd.Series()
curves = {k: v for k, v in df.groupby(column)}
if len(curves) < 3:
raise MqValueError('skew not available for given inputs')
series = [curves[qs]['impliedVolatility'] for qs in q_strikes]
return (series[0] - series[1]) / series[2]
@plot_measure((AssetClass.Equity, AssetClass.Commod, AssetClass.FX,), None,
[MeasureDependency(id_provider=cross_stored_direction_for_fx_vol,
query_type=QueryType.IMPLIED_VOLATILITY)])
def implied_volatility(asset: Asset, tenor: str, strike_reference: VolReference, relative_strike: Real = None, *,
source: str = None, real_time: bool = False) -> Series:
"""
Volatility of an asset implied by observations of market prices.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied volatility curve
"""
if relative_strike is None and strike_reference != VolReference.DELTA_NEUTRAL:
raise MqValueError('Relative strike must be provided if your strike reference is not delta_neutral')
if asset.asset_class == AssetClass.FX:
if strike_reference == VolReference.DELTA_NEUTRAL:
delta_strike = 'DN'
elif strike_reference == VolReference.DELTA_CALL:
delta_strike = f'{relative_strike}DC'
elif strike_reference == VolReference.DELTA_PUT:
delta_strike = f'{relative_strike}DP'
elif strike_reference == VolReference.FORWARD:
if relative_strike == 100:
delta_strike = 'ATMF'
else:
raise MqValueError('Relative strike must be 100 for Forward strike reference')
elif strike_reference == VolReference.SPOT:
if relative_strike == 100:
delta_strike = 'ATMS'
else:
raise MqValueError('Relative strike must be 100 for Spot strike reference')
else:
raise MqValueError('strikeReference: ' + strike_reference.value + ' not supported for FX')
asset_id = cross_stored_direction_for_fx_vol(asset.get_marquee_id())
_logger.debug('where tenor=%s, deltaStrike=%s, location=NYC', tenor, delta_strike)
q = GsDataApi.build_market_data_query(
[asset_id],
QueryType.IMPLIED_VOLATILITY,
where=FieldFilterMap(tenor=tenor, deltaStrike=delta_strike, location='NYC'),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
else:
if strike_reference == VolReference.DELTA_NEUTRAL:
raise NotImplementedError('delta_neutral strike reference is not supported for equities.')
if strike_reference == VolReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike if strike_reference == VolReference.NORMALIZED else relative_strike / 100
ref_string = "delta" if strike_reference in (VolReference.DELTA_CALL,
VolReference.DELTA_PUT) else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, ref_string, relative_strike)
where = FieldFilterMap(tenor=tenor, strikeReference=ref_string, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.IMPLIED_VOLATILITY,
where=where, source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['impliedVolatility']
@plot_measure((AssetClass.Equity,), (AssetType.Index, AssetType.ETF,), [QueryType.IMPLIED_CORRELATION])
def implied_correlation(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Correlation of an asset implied by observations of market prices.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied correlation curve
"""
if real_time:
raise NotImplementedError('realtime implied_correlation not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.IMPLIED_CORRELATION, where=where, source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['impliedCorrelation']
@plot_measure((AssetClass.Equity,), (AssetType.Index, AssetType.ETF,), [QueryType.AVERAGE_IMPLIED_VOLATILITY])
def average_implied_volatility(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Historic weighted average implied volatility for the underlying assets of an equity index.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: average implied volatility curve
"""
if real_time:
raise NotImplementedError('realtime average_implied_volatility not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.AVERAGE_IMPLIED_VOLATILITY,
where=where, source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['averageImpliedVolatility']
@plot_measure((AssetClass.Equity,), (AssetType.Index, AssetType.ETF,), [QueryType.AVERAGE_IMPLIED_VARIANCE])
def average_implied_variance(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Historic weighted average implied variance for the underlying assets of an equity index.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: average implied variance curve
"""
if real_time:
raise NotImplementedError('realtime average_implied_variance not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.AVERAGE_IMPLIED_VARIANCE, where=where, source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['averageImpliedVariance']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,), [QueryType.SWAP_RATE])
def swap_rate(asset: Asset, tenor: str, benchmark_type: BenchmarkType = None, floating_index: str = None,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day Fixed-Floating interest rate swap (IRS) curves across major currencies.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param benchmark_type: benchmark type e.g. LIBOR
:param floating_index: floating index rate
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: swap rate curve
"""
if real_time:
raise NotImplementedError('realtime swap_rate not implemented')
currency = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
currency = Currency(currency)
# default benchmark types
if benchmark_type is None:
if currency == Currency.EUR:
benchmark_type = BenchmarkType.EURIBOR
elif currency == Currency.SEK:
benchmark_type = BenchmarkType.STIBOR
else:
benchmark_type = BenchmarkType.LIBOR
over_nights = [BenchmarkType.OIS]
# default floating index
if floating_index is None:
if benchmark_type in over_nights:
floating_index = '1d'
else:
if currency in [Currency.USD]:
floating_index = '3m'
elif currency in [Currency.GBP, Currency.EUR, Currency.CHF, Currency.SEK]:
floating_index = '6m'
mdapi_divider = " " if benchmark_type in over_nights else "-"
mdapi_floating_index = BenchmarkType.OIS.value if benchmark_type is BenchmarkType.OIS else floating_index
mdapi = currency.value + mdapi_divider + mdapi_floating_index
rate_mqid = GsAssetApi.map_identifiers(GsIdType.mdapi, GsIdType.id, [mdapi])[mdapi]
_logger.debug('where tenor=%s, floatingIndex=%s', tenor, floating_index)
q = GsDataApi.build_market_data_query(
[rate_mqid],
QueryType.SWAP_RATE,
where=FieldFilterMap(tenor=tenor),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['swapRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate, query_type=QueryType.SWAPTION_VOL)])
def swaption_vol(asset: Asset, expiration_tenor: str, termination_tenor: str, relative_strike: float,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day implied normal volatility for swaption vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param relative_strike: strike level relative to at the money e.g. 10
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: swaption implied normal volatility curve
"""
if real_time:
raise NotImplementedError('realtime swaption_vol not implemented')
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
_logger.debug('where expiry=%s, tenor=%s, strike=%s', expiration_tenor, termination_tenor, relative_strike)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.SWAPTION_VOL,
where=FieldFilterMap(expiry=expiration_tenor, tenor=termination_tenor, strike=relative_strike),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['swaptionVol']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate, query_type=QueryType.ATM_FWD_RATE)])
def swaption_atm_fwd_rate(asset: Asset, expiration_tenor: str, termination_tenor: str, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day at-the-money forward rate for swaption vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: swaption at-the-money forward rate curve
"""
if real_time:
raise NotImplementedError('realtime swaption_atm_fwd_rate not implemented')
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
_logger.debug('where expiry=%s, tenor=%s', expiration_tenor, termination_tenor)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.ATM_FWD_RATE,
where=FieldFilterMap(expiry=expiration_tenor, tenor=termination_tenor, strike=0),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['atmFwdRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate, query_type=QueryType.MIDCURVE_VOL)])
def midcurve_vol(asset: Asset, expiration_tenor: str, forward_tenor: str, termination_tenor: str,
relative_strike: float,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day implied normal volatility for midcurve vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param forward_tenor: relative date representation of swap's start date after option expiry e.g. 2y
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param relative_strike: strike level relative to at the money e.g. 10
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: midcurve implied normal volatility curve
"""
if real_time:
raise NotImplementedError('realtime midcurve_vol not implemented')
_logger.debug('where expiry=%s, forwardTenor=%s, tenor=%s, strike=%s', expiration_tenor, forward_tenor,
termination_tenor, relative_strike)
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.MIDCURVE_VOL,
where=FieldFilterMap(expiry=expiration_tenor, forwardTenor=forward_tenor, tenor=termination_tenor,
strike=relative_strike),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['midcurveVol']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate,
query_type=QueryType.MIDCURVE_ATM_FWD_RATE)])
def midcurve_atm_fwd_rate(asset: Asset, expiration_tenor: str, forward_tenor: str, termination_tenor: str,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day at-the-money forward rate for midcurve vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param forward_tenor: relative date representation of swap's start date after option expiry e.g. 2y
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: midcurve atm forward rate curve
"""
if real_time:
raise NotImplementedError('realtime midcurve_atm_fwd_rate not implemented')
q = GsDataApi.build_market_data_query(
[convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)],
QueryType.MIDCURVE_ATM_FWD_RATE,
where=FieldFilterMap(expiry=expiration_tenor, forwardTenor=forward_tenor, tenor=termination_tenor, strike=0),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['midcurveAtmFwdRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate, query_type=QueryType.CAP_FLOOR_VOL)])
def cap_floor_vol(asset: Asset, expiration_tenor: str, relative_strike: float, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day implied normal volatility for cap and floor vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param relative_strike: strike level relative to at the money e.g. 10
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: cap and floor implied normal volatility curve
"""
if real_time:
raise NotImplementedError('realtime cap_floor_vol not implemented')
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
_logger.debug('where expiry=%s, strike=%s', expiration_tenor, relative_strike)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.CAP_FLOOR_VOL,
where=FieldFilterMap(expiry=expiration_tenor, strike=relative_strike),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['capFloorVol']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate,
query_type=QueryType.CAP_FLOOR_ATM_FWD_RATE)])
def cap_floor_atm_fwd_rate(asset: Asset, expiration_tenor: str, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day at-the-money forward rate for cap and floor matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: cap and floor atm forward rate curve
"""
if real_time:
raise NotImplementedError('realtime cap_floor_atm_fwd_rate not implemented')
q = GsDataApi.build_market_data_query(
[convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)],
QueryType.CAP_FLOOR_ATM_FWD_RATE,
where=FieldFilterMap(expiry=expiration_tenor, strike=0),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['capFloorAtmFwdRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate,
query_type=QueryType.SPREAD_OPTION_VOL)])
def spread_option_vol(asset: Asset, expiration_tenor: str, long_tenor: str, short_tenor: str, relative_strike: float,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day implied normal volatility for spread option vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param long_tenor: relative date representation of the instrument's tenor date e.g. 1y
:param short_tenor: relative date representation of the instrument's tenor date e.g. 1y
:param relative_strike: strike level relative to at the money e.g. 10
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: spread option implied normal volatility curve
"""
if real_time:
raise NotImplementedError('realtime spread_option_vol not implemented')
rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
_logger.debug('where expiry=%s, longTenor=%s, shortTenor=%s, strike=%s', expiration_tenor, long_tenor, short_tenor,
relative_strike)
q = GsDataApi.build_market_data_query(
[rate_benchmark_mqid],
QueryType.SPREAD_OPTION_VOL,
where=FieldFilterMap(expiry=expiration_tenor, longTenor=long_tenor, shortTenor=short_tenor,
strike=relative_strike),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['spreadOptionVol']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_default_benchmark_rate,
query_type=QueryType.SPREAD_OPTION_ATM_FWD_RATE)])
def spread_option_atm_fwd_rate(asset: Asset, expiration_tenor: str, long_tenor: str, short_tenor: str,
*, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day At-the-money forward rate for spread option vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param long_tenor: relative date representation of the instrument's tenor date e.g. 1y
:param short_tenor: relative date representation of the instrument's tenor date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: spread option at-the-money forward rate curve
"""
if real_time:
raise NotImplementedError('realtime spread_option_atm_fwd_rate not implemented')
q = GsDataApi.build_market_data_query(
[convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)],
QueryType.SPREAD_OPTION_ATM_FWD_RATE,
where=FieldFilterMap(expiry=expiration_tenor, longTenor=long_tenor, shortTenor=short_tenor, strike=0),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['spreadOptionAtmFwdRate']
@plot_measure((AssetClass.Cash,), (AssetType.Currency,),
[MeasureDependency(id_provider=currency_to_inflation_benchmark_rate,
query_type=QueryType.INFLATION_SWAP_RATE)])
def zc_inflation_swap_rate(asset: Asset, termination_tenor: str, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day zero coupon inflation swap break-even rate.
:param asset: asset object loaded from security master
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: zero coupon inflation swap break-even rate curve
"""
if real_time:
raise NotImplementedError('realtime zc_inflation_swap_rate not implemented')
infl_rate_benchmark_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.INFLATION_BENCHMARK_RATE)
_logger.debug('where tenor=%s', termination_tenor)
q = GsDataApi.build_market_data_query(
[infl_rate_benchmark_mqid],
QueryType.INFLATION_SWAP_RATE,
where=FieldFilterMap(tenor=termination_tenor),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['inflationSwapRate']
@plot_measure((AssetClass.FX,), (AssetType.Cross,),
[MeasureDependency(id_provider=cross_to_basis, query_type=QueryType.BASIS)])
def basis(asset: Asset, termination_tenor: str, *, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day cross-currency basis swap spread.
:param asset: asset object loaded from security master
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: cross-currency basis swap spread curve
"""
if real_time:
raise NotImplementedError('realtime basis not implemented')
basis_mqid = convert_asset_for_rates_data_set(asset, RatesConversionType.CROSS_CURRENCY_BASIS)
_logger.debug('where tenor=%s', termination_tenor)
q = GsDataApi.build_market_data_query(
[basis_mqid],
QueryType.BASIS,
where=FieldFilterMap(tenor=termination_tenor),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['basis']
@plot_measure((AssetClass.FX,), (AssetType.Cross,), [MeasureDependency(
id_provider=cross_to_usd_based_cross, query_type=QueryType.FORECAST)])
def forecast(asset: Asset, forecast_horizon: str, *, source: str = None, real_time: bool = False) -> Series:
"""
GS end-of-day FX forecasts made by Global Investment Research (GIR) macro analysts.
:param asset: asset object loaded from security master
:param forecast_horizon: relative period of time to forecast e.g. 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: FX forecast curve
"""
if real_time:
raise NotImplementedError('realtime forecast not implemented')
cross_mqid = asset.get_marquee_id()
usd_based_cross_mqid = cross_to_usd_based_cross(cross_mqid)
horizon = '12m' if forecast_horizon == '1y' else forecast_horizon
q = GsDataApi.build_market_data_query(
[usd_based_cross_mqid],
QueryType.FORECAST,
where=FieldFilterMap(relativePeriod=horizon),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
series = Series() if df.empty else df['forecast']
if cross_mqid != usd_based_cross_mqid:
series = 1 / series
return series
@plot_measure((AssetClass.Equity, AssetClass.Commod), None, [QueryType.IMPLIED_VOLATILITY])
def vol_term(asset: Asset, strike_reference: SkewReference, relative_strike: Real,
pricing_date: Optional[GENERIC_DATE] = None, *, source: str = None, real_time: bool = False) -> pd.Series:
"""
Volatility term structure. Uses most recent date available if pricing_date is not provided.
:param asset: asset object loaded from security master
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param pricing_date: YYYY-MM-DD or relative days before today e.g. 1d, 1m, 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: volatility term structure
"""
if real_time:
raise NotImplementedError('realtime forward term not implemented') # TODO
if strike_reference != SkewReference.NORMALIZED:
relative_strike /= 100
start, end = _range_from_pricing_date(asset.exchange, pricing_date)
with DataContext(start, end):
_logger.debug('where strikeReference=%s, relativeStrike=%s', strike_reference.value, relative_strike)
where = FieldFilterMap(strikeReference=strike_reference.value, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.IMPLIED_VOLATILITY, where=where,
source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return pd.Series()
latest = df.index.max()
_logger.info('selected pricing date %s', latest)
df = df.loc[latest]
cbd = _get_custom_bd(asset.exchange)
df = df.assign(expirationDate=df.index + df['tenor'].map(_to_offset) + cbd - cbd)
df = df.set_index('expirationDate')
df.sort_index(inplace=True)
df = df.loc[DataContext.current.start_date: DataContext.current.end_date]
return df['impliedVolatility'] if not df.empty else pd.Series()
@plot_measure((AssetClass.Equity,), None, [QueryType.IMPLIED_VOLATILITY])
def vol_smile(asset: Asset, tenor: str, strike_reference: VolSmileReference,
pricing_date: Optional[GENERIC_DATE] = None,
*, source: str = None, real_time: bool = False) -> Series:
"""
Volatility smile of an asset implied by observations of market prices.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param pricing_date: YYYY-MM-DD or relative days before today e.g. 1d, 1m, 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied volatility smile
"""
if real_time:
raise NotImplementedError('realtime vol_smile not implemented')
mqid = asset.get_marquee_id()
start, end = _range_from_pricing_date(asset.exchange, pricing_date)
with DataContext(start, end):
q = GsDataApi.build_market_data_query(
[mqid],
QueryType.IMPLIED_VOLATILITY,
where=FieldFilterMap(tenor=tenor, strikeReference=strike_reference.value),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return Series
latest = df.index.max()
_logger.info('selected pricing date %s', latest)
df = df.loc[latest]
vols = df['impliedVolatility'].values
strikes = df['relativeStrike'].values
return Series(vols, index=strikes)
@plot_measure((AssetClass.Equity, AssetClass.Commod), None, [QueryType.FORWARD])
def fwd_term(asset: Asset, pricing_date: Optional[GENERIC_DATE] = None, *, source: str = None,
real_time: bool = False) -> pd.Series:
"""
Forward term structure. Uses most recent date available if pricing_date is not provided.
:param asset: asset object loaded from security master
:param pricing_date: YYYY-MM-DD or relative days before today e.g. 1d, 1m, 1y
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: forward term structure
"""
if real_time:
raise NotImplementedError('realtime forward term not implemented') # TODO
start, end = _range_from_pricing_date(asset.exchange, pricing_date)
with DataContext(start, end):
where = FieldFilterMap(strikeReference='forward', relativeStrike=1)
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.FORWARD, where=where, source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return pd.Series()
latest = df.index.max()
_logger.info('selected pricing date %s', latest)
df = df.loc[latest]
cbd = _get_custom_bd(asset.exchange)
df.loc[:, 'expirationDate'] = df.index + df['tenor'].map(_to_offset) + cbd - cbd
df = df.set_index('expirationDate')
df.sort_index(inplace=True)
df = df.loc[DataContext.current.start_date: DataContext.current.end_date]
return df['forward'] if not df.empty else pd.Series()
@cachetools.func.ttl_cache() # fine as long as availability is not different between users
def _var_swap_tenors(asset: Asset):
from gs_quant.session import GsSession
aid = asset.get_marquee_id()
body = GsSession.current._get(f"/data/markets/{aid}/availability")
for r in body['data']:
if r['dataField'] == Fields.VAR_SWAP.value:
for f in r['filteredFields']:
if f['field'] == Fields.TENOR.value:
return f['values']
raise MqValueError("var swap is not available for " + aid)
def _tenor_to_month(relative_date: str) -> int:
matcher = re.fullmatch('([1-9]\\d*)([my])', relative_date)
if matcher:
mag = int(matcher.group(1))
return mag if matcher.group(2) == 'm' else mag * 12
raise MqValueError('invalid input: relative date must be in months or years')
def _month_to_tenor(months: int) -> str:
return f'{months//12}y' if months % 12 == 0 else f'{months}m'
@plot_measure((AssetClass.Equity, AssetClass.Commod), None, [QueryType.VAR_SWAP])
def var_term(asset: Asset, pricing_date: Optional[str] = None, forward_start_date: Optional[str] = None,
*, source: str = None, real_time: bool = False) -> pd.Series:
"""
Variance swap term structure. Uses most recent date available if pricing_date is not provided.
:param asset: asset object loaded from security master
:param pricing_date: relative days before today e.g. 3d, 2m, 1y
:param forward_start_date: forward start date e.g. 2m, 1y; defaults to none
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: variance swap term structure
"""
if not (pricing_date is None or isinstance(pricing_date, str)):
raise MqTypeError('pricing_date should be a relative date')
start, end = _range_from_pricing_date(asset.exchange, pricing_date)
with DataContext(start, end):
if forward_start_date:
tenors = _var_swap_tenors(asset)
sub_frames = []
for t in tenors:
diff = _tenor_to_month(t) - _tenor_to_month(forward_start_date)
if diff < 1:
continue
t1 = _month_to_tenor(diff)
c = var_swap(asset, t1, forward_start_date, source=source, real_time=real_time).to_frame()
if not c.empty:
c['tenor'] = t1
sub_frames.append(c)
df = pd.concat(sub_frames)
else:
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.VAR_SWAP,
source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return pd.Series()
latest = df.index.max()
_logger.info('selected pricing date %s', latest)
df = df.loc[latest]
cbd = _get_custom_bd(asset.exchange)
df.loc[:, Fields.EXPIRATION_DATE.value] = df.index + df[Fields.TENOR.value].map(_to_offset) + cbd - cbd
df = df.set_index(Fields.EXPIRATION_DATE.value)
df.sort_index(inplace=True)
df = df.loc[DataContext.current.start_date: DataContext.current.end_date]
return df[Fields.VAR_SWAP.value] if not df.empty else pd.Series()
@plot_measure((AssetClass.Equity, AssetClass.Commod,), None, [QueryType.VAR_SWAP])
def var_swap(asset: Asset, tenor: str, forward_start_date: Optional[str] = None,
*, source: str = None, real_time: bool = False) -> Series:
"""
Strike such that the price of an uncapped variance swap on the underlying index is zero at inception. If
forward start date is provided, then the result is a forward starting variance swap.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param forward_start_date: forward start date e.g. 2m, 1y; defaults to none
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied volatility curve
"""
if forward_start_date is None:
_logger.debug('where tenor=%s', tenor)
where = FieldFilterMap(tenor=tenor)
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.VAR_SWAP,
where=where, source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df[Fields.VAR_SWAP.value]
else:
if not isinstance(forward_start_date, str):
raise MqTypeError('forward_start_date must be a relative date')
x = _tenor_to_month(tenor)
y = _tenor_to_month(forward_start_date)
z = x + y
yt = _month_to_tenor(y)
zt = _month_to_tenor(z)
tenors = _var_swap_tenors(asset)
if yt not in tenors or zt not in tenors:
return | Series() | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 21:18:35 2019
@author: sarah
"""
import os
import numpy as np
import jsonpickle as pickle
import jsonpickle.ext.numpy as jsonpickle_numpy
import json
from misc import *
import scipy as sc
import matplotlib.pylab as plt
import scipy.special as scs
import seaborn as sns
from scipy import stats
import sklearn.linear_model as lm
import statsmodels.api as sm
import pandas as pd
#plt.style.use('seaborn-whitegrid')
#sns.set_style("whitegrid", {"axes.edgecolor": "0.15"})#, "axes.spines.top": "False", "axes.spines.right": "False"})
sns.set_style("ticks")
save_figs = False
folder = "data"
fit_functions = [sigmoid, exponential]
def analyze_run(fname, save=False):
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
w = worlds[0]
T = w.T
trials = w.trials
t_trials = trials - 100
Rho = w.environment.Rho
repetitions = len(worlds)
results_won = np.zeros((repetitions,3))
results_chosen = np.zeros((repetitions,3))
results_stayed = np.zeros(repetitions)
results_context = np.zeros((repetitions,2))
results_c_param = np.zeros((repetitions,4))
results_c_param_type = np.zeros(repetitions, dtype=int)
results_p_param = np.zeros((repetitions,4))
results_p_param_type = np.zeros(repetitions, dtype=int)
entropy_c = np.zeros(repetitions)
entropy_p = np.zeros(repetitions)
entropy_l = np.zeros(repetitions)
times = np.arange(0.+1,trials+1,1.)
best_options = np.amax(np.argmax(Rho, axis=1), axis=1)-1
for i in range(repetitions):
#print(i)
w = worlds[i]
results_won[i,0] = (w.rewards[trials//4:trials//2] >0).sum()/(trials//4*(T-1))
results_won[i,1] = (w.rewards[3*trials//4:] >0).sum()/(trials//4*(T-1))
stayed = np.array([((w.actions[i,0] - w.actions[i+1,0])==0) for i in range(trials-1)])
results_stayed[i] = stayed.sum()/(trials * (T-1))
results_chosen[i,0] = np.array([w.actions[i,j] == best_options[i] for i in range(trials-100) for j in range(T-1)]).sum()/((trials-100)*(T-1))
results_chosen[i,1] = np.array([w.actions[i,j] == best_options[i] for i in range(trials-100,trials-100+15) for j in range(T-1)]).sum()/((15)*(T-1))
results_chosen[i,2] = np.array([w.actions[i,j] == best_options[i] for i in range(trials-100+15,trials) for j in range(T-1)]).sum()/((85)*(T-1))
results_context[i,0] = np.array([w.agent.posterior_context[i,j,0] for i in range(trials//2) for j in range(T-1)]).sum()/(trials//2*(T-1))
results_context[i,1] = np.array([w.agent.posterior_context[i,j,0] for i in range(trials//2,trials) for j in range(T-1)]).sum()/(trials//2*(T-1))
if T > 2:
stayed = np.array([((w.actions[i,j] - w.actions[i,j+1])==0) for i in range(trials-1) for j in range(T-2)])
results_stayed[i] += stayed.sum()/(trials * (T-1))
posterior_context = w.agent.posterior_context[:,1,:]
entropy_c[i] = -(posterior_context * ln(posterior_context)).sum(axis=1).mean()
posterior_pol = (w.agent.posterior_policies[:,0,0:]*w.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
entropy_p[i] = -(posterior_pol * ln(posterior_pol)).sum(axis=1).mean()
likelihood = (w.agent.likelihood[:,0,0:,:]*w.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
likelihood /= likelihood.sum(axis=1)[:,np.newaxis]
entropy_l[i] = -(likelihood * ln(likelihood)).sum(axis=1).mean()
try:
posterior_context = w.agent.posterior_context[:,1,1]#bn.move_mean(w.agent.posterior_context[:,1,1],10,1)
results_c_param[i], pcov = sc.optimize.curve_fit(sigmoid, times, posterior_context, p0=[1.,1.,t_trials,0.])#, sigma=[0.5]*200)#*40+[2]*100+[0.5]*50)
results_c_param_type[i] = 0
except RuntimeError:
try:
results_c_param[i,1:], pcov = sc.optimize.curve_fit(exponential, times, posterior_context, p0=[1.,t_trials,0.])#, sigma=[0.5]*40+[2]*100+[0.5]*50)
results_c_param[i,0] = 1.
results_c_param_type[i] = 1
except RuntimeError:
results_c_param[i] = np.nan
try:
posterior_pol = (w.agent.posterior_policies[:,0,1]*w.agent.posterior_context[:,0]).sum(axis=1)[10:]
results_p_param[i], pcov = sc.optimize.curve_fit(sigmoid, times[10:], posterior_pol, p0=[1.,1.,t_trials,0.])#, sigma=[0.5]*40+[2]*100+[0.5]*50)
results_p_param_type[i] = 0
except RuntimeError:
try:
results_p_param[i,1:], pcov = sc.optimize.curve_fit(exponential, times[10:], posterior_pol, p0=[1.,t_trials,0.])#, sigma=[0.5]*40+[2]*100+[0.5]*50)
results_p_param[i,0] = 1.
results_p_param_type[i] = 1
except RuntimeError:
results_p_param[i] = np.nan
if results_c_param[i,0] < 0.1 or results_c_param[i,1] < 0.0 or results_c_param[i,2] < 15 or results_c_param[i,2] > trials:
results_c_param[i] = [0,0,trials+1,0]
if results_p_param[i,0] < 0.1 or results_p_param[i,1] < 0.0 or results_p_param[i,2] < 15 or results_p_param[i,2] > trials:
results_p_param[i] = [0,0,trials+1,0]
if save:
results = [results_won, results_chosen, results_context, \
results_c_param, results_c_param_type, entropy_c, \
results_p_param, results_p_param_type, entropy_p, entropy_l]
analysis_name = fname[:-5] + "_ananlysis.json"
jsonpickle_numpy.register_handlers()
pickled = pickle.encode(results)
with open(analysis_name, 'w') as outfile:
json.dump(pickled, outfile)
def analyze_check(fname, reference_name, check=False, naive=False, save=False):
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
w = worlds[0]
T = w.T
trials = w.trials
if check and naive:
trials = trials//2
Rho = w.environment.Rho
with open(reference_name, 'r') as infile:
data = json.load(infile)
results_won_t, results_chosen_t, results_context_t, \
results_c_param_t, results_c_param_type_t, entropy_c_t, \
results_p_param_t, results_p_param_type_t, entropy_p_t, entropy_l_t = pickle.decode(data)
repetitions = len(worlds)
print(repetitions)
results_won = np.zeros((repetitions,3))
results_chosen = np.zeros((repetitions))
results_stayed = np.zeros(repetitions)
results_context = np.zeros((repetitions))
results_c_param = np.zeros((repetitions,4))
results_c_param_type = np.zeros(repetitions, dtype=int)
results_p_param = np.zeros((repetitions,4))
results_p_param_type = np.zeros(repetitions, dtype=int)
entropy_c = np.zeros(repetitions)
entropy_p = np.zeros(repetitions)
entropy_l = np.zeros(repetitions)
times = np.arange(0.+1,trials+1,1.)
best_options = np.amax(np.argmax(Rho, axis=1), axis=1)-1
for i in range(repetitions):
#print(i)
w = worlds[i]
results_won[i,0] = (w.rewards[:] >0).sum()/(trials*(T-1))
results_won[i,1] = (w.rewards[:] >0).sum()/(trials*(T-1))
stayed = np.array([((w.actions[i,0] - w.actions[i+1,0])==0) for i in range(trials-1)])
results_stayed[i] = stayed.sum()/(trials * (T-1))
results_chosen[i] = np.array([w.actions[i,j] == best_options[i] for i in range(trials) for j in range(T-1)]).sum()/(trials*(T-1))
results_context[i] = np.array([w.agent.posterior_context[i,j,0] for i in range(trials) for j in range(T-1)]).sum()/(trials*(T-1))
if T > 2:
stayed = np.array([((w.actions[i,j] - w.actions[i,j+1])==0) for i in range(trials-1) for j in range(T-2)])
results_stayed[i] += stayed.sum()/(trials * (T-1))
posterior_context = w.agent.posterior_context[:,1,:]
entropy_c[i] = -(posterior_context * ln(posterior_context)).sum(axis=1).mean()
posterior_pol = (w.agent.posterior_policies[:,0,0:]*w.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
entropy_p[i] = -(posterior_pol * ln(posterior_pol)).sum(axis=1).mean()
likelihood = (w.agent.likelihood[:,0,0:,:]*w.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
likelihood /= likelihood.sum(axis=1)[:,np.newaxis]
entropy_l[i] = -(likelihood * ln(likelihood)).sum(axis=1).mean()
threshold_c = fit_functions[results_c_param_type_t[i]](0, *results_c_param_t[i,results_c_param_type_t[i]:])
switch_time = results_c_param_t[i,2]
if threshold_c < 0.5 and switch_time <=200:
posterior_context = w.agent.posterior_context[:,1,1]#bn.move_mean(w.agent.posterior_context[:,1,1],10,1)
if threshold_c < 0.001:
threshold_c = 0.001
time = np.where(posterior_context[:trials]<=threshold_c)[0]
if len(time)>0:
results_c_param[i,2] = time[0]
else:
results_c_param[i,2] = 101
else:
results_c_param[i,2] = np.nan
threshold_p = fit_functions[results_p_param_type_t[i]](0, *results_p_param_t[i,results_p_param_type_t[i]:])
switch_time = results_p_param_t[i,2]
if threshold_p < 0.5 and switch_time <=200:
posterior_pol = (w.agent.posterior_policies[:,0,1]*w.agent.posterior_context[:,0]).sum(axis=1)
if threshold_p < 0.001:
threshold_p = 0.001
time = np.where(posterior_pol[:trials]<=threshold_p)[0]
if len(time)>0:
results_p_param[i,2] = time[0]
else:
results_p_param[i,2] = 101
else:
results_p_param[i,2] = np.nan
if save:
results = [results_won, results_chosen, results_context, \
results_c_param, results_c_param_type, entropy_c, \
results_p_param, results_p_param_type, entropy_p, entropy_l]
analysis_name = fname[:-5] + "_ananlysis_check.json"
jsonpickle_numpy.register_handlers()
pickled = pickle.encode(results)
with open(analysis_name, 'w') as outfile:
json.dump(pickled, outfile)
def plot_beliefs(fname, plot_context=True, plot_policies=True, plot_actions=True, plot_prior_pol=True, fit_params=None, save_num=-1):
if fit_params is not None:
results_c_param, results_c_param_type, results_p_param, results_p_param_type = fit_params
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
w = worlds[0]
T = w.T
trials = w.trials
Rho = w.environment.Rho
repetitions = len(worlds)
times = np.arange(0.+1,trials+1,1.)
arm_cols = ['#007ecdff','#0000b7ff']
for i in range(repetitions):
print(i)
w = worlds[i]
if plot_policies:
plt.figure(figsize=(10,5))
for k in range(1,w.agent.nh):
plt.plot(w.environment.Rho[:,k,k], label="lever "+str(k), c=arm_cols[k-1], linewidth=3)
for t in range(w.agent.T-1):
plt.plot((w.agent.posterior_policies[:,t,1]* w.agent.posterior_context[:,t]).sum(axis=1), ".", label="action", color='darkorange')
if fit_params is not None:
print(results_p_param[i])
fct = fit_functions[results_p_param_type[i]]
param = results_p_param[i,results_p_param_type[i]:]
plt.plot(fct(times, *param))
plt.ylim([-0.1,1.1])
lgd = plt.legend(fontsize=16, bbox_to_anchor=(1.04,1), loc="upper left", ncol=1) #bbox_to_anchor=(0, 1.02, 1, 0.2), mode="expand"
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel("trial", fontsize=20)
plt.ylabel("reward probabilities", fontsize=20)
ax = plt.gca().twinx()
ax.set_ylim([-0.1,1.1])
ax.set_yticks([0,1])
ax.set_yticklabels(["$a_{1}$","$a_{2}$"],fontsize=18)
ax.yaxis.set_ticks_position('right')
if save_num == i:
plt.savefig(os.path.join(folder,fname[:-5]+"_run"+str(i)+"_context.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,fname[:-5]+"_run"+str(i)+"_context.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
# if plot_actions:
# plt.figure(figsize=(10,5))
# for k in range(1,w.agent.nh):
# plt.plot(w.environment.Rho[:,k,k], label="lever "+str(k), c=arm_cols[k-1], linewidth=3)
# for t in range(w.agent.T-1):
# plt.plot((w.actions[:,t]-1), ".", label="action", color='darkorange')
# plt.ylim([-0.1,1.1])
# lgd = plt.legend(fontsize=16, bbox_to_anchor=(1.04,1), loc="upper left", ncol=1) #bbox_to_anchor=(0, 1.02, 1, 0.2), mode="expand"
# plt.yticks(fontsize=18)
# plt.xticks(fontsize=18)
# plt.xlabel("trial", fontsize=20)
# plt.ylabel("reward probabilities", fontsize=20)
# ax = plt.gca().twinx()
# ax.set_ylim([-0.1,1.1])
# ax.set_yticks([0,1])
# ax.set_yticklabels(["$a_{1}$","$a_{2}$"],fontsize=18)
# ax.yaxis.set_ticks_position('right')
# plt.show()
#
# if plot_prior_pol:
# plt.figure(figsize=(10,5))
# for k in range(1,w.agent.nh):
# plt.plot(w.environment.Rho[:,k,k], label="lever "+str(k), c=arm_cols[k-1], linewidth=3)
# prior_policies = np.exp(scs.digamma(w.agent.posterior_dirichlet_pol) - scs.digamma(w.agent.posterior_dirichlet_pol.sum(axis=1))[:,np.newaxis,:])
# prior_policies /= prior_policies.sum(axis=1)[:,np.newaxis,:]
# plt.plot((prior_policies[:,2]), ".", label="action 2", color='darkorange')
# plt.plot((prior_policies[:,1]), ".", label="action 1", color='red')
# plt.ylim([-0.1,1.1])
# lgd = plt.legend(fontsize=16, bbox_to_anchor=(1.04,1), loc="upper left", ncol=1) #bbox_to_anchor=(0, 1.02, 1, 0.2), mode="expand"
# plt.yticks(fontsize=18)
# plt.xticks(fontsize=18)
# plt.xlabel("trial", fontsize=20)
# plt.ylabel("reward probabilities", fontsize=20)
# ax = plt.gca().twinx()
# ax.set_ylim([-0.1,1.1])
# ax.set_yticks([0,1])
# ax.set_yticklabels(["$a_{1}$","$a_{2}$"],fontsize=18)
# ax.yaxis.set_ticks_position('right')
# plt.show()
if plot_context:
plt.figure(figsize=(10,5))
for k in range(1,w.agent.nh):
plt.plot(w.environment.Rho[:,k,k], label="lever "+str(k), c=arm_cols[k-1], linewidth=3)
for t in range(1,w.agent.T):
plt.plot(w.agent.posterior_context[:,t,1], ".", label="context", color='deeppink')
if fit_params is not None:
print(results_c_param[i])
fct = fit_functions[results_c_param_type[i]]
param = results_c_param[i,results_c_param_type[i]:]
plt.plot(fct(times, *param))
plt.ylim([-0.1,1.1])
lgd = plt.legend(fontsize=16, bbox_to_anchor=(1.04,1), loc="upper left", ncol=1) #bbox_to_anchor=(0, 1.02, 1, 0.2), mode="expand"
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel("trial", fontsize=20)
plt.ylabel("reward probabilities", fontsize=20)
ax = plt.gca().twinx()
ax.set_ylim([-0.1,1.1])
ax.set_yticks([0,1])
ax.set_yticklabels(["$c_{1}$","$c_{2}$"],fontsize=18)
ax.yaxis.set_ticks_position('right')
if save_num == i:
plt.savefig(os.path.join(folder,fname[:-5]+"_run"+str(i)+"_action.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,fname[:-5]+"_run"+str(i)+"_action.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
def save_run_plots(fnames, save_nums, tendencies, prefix="", check=False, plot_context=True, plot_policies=True, plot_prior=True, plot_likelihood=True, plot_actions=True, fit_params=None):
w_runs = []
w_checks = []
for i,f in enumerate(fnames):
jsonpickle_numpy.register_handlers()
fname = os.path.join(folder, f)
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
w_runs.append(worlds[save_nums[i]])
if check:
check_name = 'check_'+f
fname = os.path.join(folder, check_name)
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
w_checks.append(worlds[save_nums[i]])
if check:
name_prefix = 'check_'
else:
name_prefix = ''
arm_cols = ['#007ecdff','#0000b7ff']
action_cols = ['#cc6600']#['#993d00', '#ffa366']#['#993d00', '#ff6600', '#ffa366']
context_cols = ['#ff1493']#['#99004d', '#ff66b3']#['#99004d', '#ff0080', '#ff66b3']
for i in range(len(w_runs)):
w = w_runs[i]
trials = w.trials
Rho = w.environment.Rho
times = np.arange(0.+1,trials+1,1.)
actions = w.actions[:,0]
post_pol = (w.agent.posterior_policies[:,0,:,:]* w.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
prior_pol = np.exp(scs.digamma(w.agent.posterior_dirichlet_pol) - scs.digamma(w.agent.posterior_dirichlet_pol.sum(axis=1))[:,np.newaxis,:])
prior_pol /= prior_pol.sum(axis=1)[:,np.newaxis,:]
marginal_prior = (prior_pol[:,:,:] * w.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
likelihood = (w.agent.likelihood[:,0,:,:]* w.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
likelihood /= likelihood.sum(axis=1)[:,np.newaxis]
posterior_context = w.agent.posterior_context[:,1,1]
if check:
w_check = w_checks[i]
check_trials = w_check.trials
Rho = np.append(Rho, w_check.environment.Rho, axis=0)
times = np.arange(1,times[-1]+check_trials+1,1.)
actions = np.append(actions, w_check.actions[:,0])
post_pol_check = (w_check.agent.posterior_policies[:,0,:,:]* w_check.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
post_pol = np.append(post_pol, post_pol_check, axis=0)
prior_pol = np.exp(scs.digamma(w_check.agent.posterior_dirichlet_pol) - scs.digamma(w_check.agent.posterior_dirichlet_pol.sum(axis=1))[:,np.newaxis,:])
prior_pol /= prior_pol.sum(axis=1)[:,np.newaxis,:]
marginal_prior_check = (prior_pol[:,:,:] * w_check.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
marginal_prior = np.append(marginal_prior, marginal_prior_check, axis=0)
likelihood_check = (w_check.agent.likelihood[:,0,:,:]* w_check.agent.posterior_context[:,0,np.newaxis,:]).sum(axis=-1)
likelihood_check /= likelihood_check.sum(axis=1)[:,np.newaxis]
likelihood = np.append(likelihood, likelihood_check, axis=0)
posterior_context = np.append(posterior_context, w_check.agent.posterior_context[:,1,1])
with sns.axes_style("ticks"):
if plot_actions:
plt.figure(figsize=(10,5))
for k in range(1,w.agent.nh):
plt.plot(times, Rho[:,k,k], label="lever "+str(k), c=arm_cols[k-1], linewidth=4, alpha=0.5)
plt.plot(actions, ".", label="action", color=action_cols[0], ms=10)
plt.ylim([-0.01,1.01])
plt.xlim([0,times[-1]])
plt.yticks([0.0,0.5,1.0])
lgd = plt.legend(fontsize=16, bbox_to_anchor=(1.04,1), loc="upper left", ncol=1) #bbox_to_anchor=(0, 1.02, 1, 0.2), mode="expand"
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel("trial", fontsize=20)
plt.ylabel("probability", fontsize=20)
ax = plt.gca().twinx()
ax.set_ylim([-0.01,1.01])
ax.set_yticks([0,1])
ax.set_yticklabels(["$a_{1}$","$a_{2}$"],fontsize=18)
ax.yaxis.set_ticks_position('right')
ax.set_ylabel("action", fontsize=22, rotation=270)
plt.title("chosen actions", fontsize=22)
plt.savefig(os.path.join(folder,name_prefix+prefix+"_actions.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,name_prefix+prefix+"_actions.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
if plot_policies:
plt.figure(figsize=(10,5))
for k in range(1,w.agent.nh):
plt.plot(times, Rho[:,k,k], label="lever "+str(k), c=arm_cols[k-1], linewidth=4, alpha=0.5)
plt.plot(times, post_pol[:,1], ".", label="h="+tendencies[i], color=action_cols[0], ms=10)
if fit_params is not None:
results_c_param, results_c_param_type, results_p_param, results_p_param_type = fit_params[i]
fct = fit_functions[results_p_param_type]
param = results_p_param[results_p_param_type:]
plt.plot(fct(times, *param), color=action_cols[0], linewidth=3)
print("action switch time", round(results_p_param[2]))
plt.ylim([-0.01,1.01])
plt.xlim([0,times[-1]])
plt.yticks([0.0,0.5,1.0])
lgd = plt.legend(fontsize=16, bbox_to_anchor=(1.04,1), loc="upper left", ncol=1) #bbox_to_anchor=(0, 1.02, 1, 0.2), mode="expand"
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel("trial", fontsize=20)
plt.ylabel("probability", fontsize=20)
ax = plt.gca().twinx()
ax.set_ylim([-0.01,1.01])
ax.set_yticks([0,1])
ax.set_yticklabels(["$a_{1}$","$a_{2}$"],fontsize=18)
ax.yaxis.set_ticks_position('right')
ax.set_ylabel("action", fontsize=22, rotation=270)
plt.title("posterior over actions", fontsize=22)
plt.savefig(os.path.join(folder,name_prefix+prefix+"_posterior_actions.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,name_prefix+prefix+"_posterior_actions.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
if plot_prior:
plt.figure(figsize=(10,5))
for k in range(1,w.agent.nh):
plt.plot(times, Rho[:,k,k], label="lever "+str(k), c=arm_cols[k-1], linewidth=4, alpha=0.5)
plt.plot(times, marginal_prior[:,1], ".", label="h="+tendencies[i], color=action_cols[0], ms=10)
# if fit_params is not None:
# results_c_param, results_c_param_type, results_p_param, results_p_param_type = fit_params[i]
# fct = fit_functions[results_p_param_type]
# param = results_p_param[results_p_param_type:]
# plt.plot(fct(times, *param), color=action_cols[i])
plt.ylim([-0.01,1.01])
plt.xlim([0,times[-1]])
plt.yticks([0.0,0.5,1.0])
lgd = plt.legend(fontsize=16, bbox_to_anchor=(1.04,1), loc="upper left", ncol=1) #bbox_to_anchor=(0, 1.02, 1, 0.2), mode="expand"
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel("trial", fontsize=20)
plt.ylabel("probability", fontsize=20)
ax = plt.gca().twinx()
ax.set_ylim([-0.01,1.01])
ax.set_yticks([0,1])
ax.set_yticklabels(["$a_{1}$","$a_{2}$"],fontsize=18)
ax.yaxis.set_ticks_position('right')
ax.set_ylabel("action", fontsize=22, rotation=270)
plt.title("prior over actions", fontsize=22)
plt.savefig(os.path.join(folder,name_prefix+prefix+"_prior_actions.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,name_prefix+prefix+"_prior_actions.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
if plot_likelihood:
plt.figure(figsize=(10,5))
for k in range(1,w.agent.nh):
plt.plot(times, Rho[:,k,k], label="lever "+str(k), c=arm_cols[k-1], linewidth=4, alpha=0.5)
plt.plot(times, likelihood[:,1], ".", label="h="+tendencies[i], color=action_cols[0], ms=10)
# if fit_params is not None:
# results_c_param, results_c_param_type, results_p_param, results_p_param_type = fit_params[i]
# fct = fit_functions[results_p_param_type]
# param = results_p_param[results_p_param_type:]
# plt.plot(fct(times, *param), color=action_cols[i])
plt.ylim([-0.01,1.01])
plt.xlim([0,times[-1]])
plt.yticks([0.0,0.5,1.0])
lgd = plt.legend(fontsize=16, bbox_to_anchor=(1.04,1), loc="upper left", ncol=1) #bbox_to_anchor=(0, 1.02, 1, 0.2), mode="expand"
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel("trial", fontsize=20)
plt.ylabel("probability", fontsize=20)
ax = plt.gca().twinx()
ax.set_ylim([-0.01,1.01])
ax.set_yticks([0,1])
ax.set_yticklabels(["$a_{1}$","$a_{2}$"],fontsize=18)
ax.yaxis.set_ticks_position('right')
ax.set_ylabel("action", fontsize=22, rotation=270)
plt.title("likelihood over actions", fontsize=22)
plt.savefig(os.path.join(folder,name_prefix+prefix+"_likelihood_actions.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,name_prefix+prefix+"_likelihood_actions.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
if plot_context:
plt.figure(figsize=(10,5))
for k in range(1,w.agent.nh):
plt.plot(times, Rho[:,k,k], label="lever "+str(k), c=arm_cols[k-1], linewidth=4, alpha=0.5)
plt.plot(times, posterior_context, ".", label="h="+tendencies[i], color=context_cols[0], ms=10)
if fit_params is not None:
results_c_param, results_c_param_type, results_p_param, results_p_param_type = fit_params[i]
fct = fit_functions[results_c_param_type]
param = results_c_param[results_c_param_type:]
plt.plot(fct(times, *param), color=context_cols[0], linewidth=3)
print("context switch time", round(results_c_param[2]))
plt.ylim([-0.01,1.01])
plt.xlim([0,times[-1]])
plt.yticks([0.0,0.5,1.0])
lgd = plt.legend(fontsize=16, bbox_to_anchor=(1.04,1), loc="upper left", ncol=1) #bbox_to_anchor=(0, 1.02, 1, 0.2), mode="expand"
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel("trial", fontsize=20)
plt.ylabel("probability", fontsize=20)
ax = plt.gca().twinx()
ax.set_ylim([-0.01,1.01])
ax.set_yticks([0,1])
ax.set_yticklabels(["$c_{1}$","$c_{2}$"],fontsize=18)
ax.yaxis.set_ticks_position('right')
ax.set_ylabel("context", fontsize=22, rotation=270)
plt.title("posterior over contexts", fontsize=22)
plt.savefig(os.path.join(folder,name_prefix+prefix+"_posterior_context.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,name_prefix+prefix+"_posterior_context.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
def plot_run(check=False, naive=False):
h = 1
p = 99
rew_p = 1
prefix = "deval"#'sudden'
if naive:
test = "test_"
post = "_check"
elif check:
test = "check_"
post = "_check"
else:
test = "test_"
post = ""
run_name = test+prefix+"_h"+str(h)+"_t"+str(p)+"_r"+str(rew_p)+"_p90.json"
analysis_name = run_name[:-5] + "_ananlysis"+post+".json"
fname = os.path.join(folder, run_name)
if analysis_name in os.listdir(folder):
ana_fname = os.path.join(folder, analysis_name)
jsonpickle_numpy.register_handlers()
with open(ana_fname, 'r') as infile:
data = json.load(infile)
results = pickle.decode(data)
results_won, results_chosen, results_context, \
results_c_param, results_c_param_type, \
results_p_param, results_p_param_type = results
fit_params = [results_c_param, results_c_param_type, \
results_p_param, results_p_param_type ]
print(np.nanmedian(results_chosen[:]))
print(np.nanmedian(results_c_param[:,2]))
else:
fit_params = None
plot_beliefs(fname, plot_context=True, plot_policies=True, plot_actions=False, plot_prior_pol=True, fit_params=fit_params)
def calculate_analyses(tendencies, p_values=None, reward_probs=None, trainings=None, check=False, naive=False, deval=False, recalc=False):
if naive:
test = ""
post = "_check"
elif check:
test = "check_"
post = "_check"
elif deval:
test = "deval_"
post = ""
else:
test = ""
post = ""
if trainings is None:
trainings = [100]
if reward_probs is None:
reward_probs = [90]
if p_values is None:
p_values = [99]
for i,h in enumerate(tendencies):
for j,p in enumerate(p_values):
for m, r in enumerate(reward_probs):
for k,t in enumerate(trainings):
print(h, p, r, t)
run_name = test+"h"+str(h)+"_t"+str(p)+"_p"+str(r)+"_train"+str(t)+".json"
analysis_name = run_name[:-5] + "_ananlysis"+post+".json"
if analysis_name in os.listdir(folder):
time_diff = os.path.getmtime(os.path.join(folder, analysis_name)) - os.path.getmtime(os.path.join(folder, run_name))
if time_diff <= 0:
new = True
else:
new = False
if analysis_name not in os.listdir(folder) or recalc or new:
fname = os.path.join(folder, run_name)
if check:
run_name_training = "h"+str(h)+"_t"+str(p)+"_p"+str(r)+"_train"+str(t)+".json"
analysis_name_training = run_name_training[:-5] + "_ananlysis.json"
training_analysis = os.path.join(folder, analysis_name_training)
analyze_check(fname, training_analysis, check=check, naive=naive, save=True)
else:
analyze_run(fname, save=True)
def load_analyses(tendencies, p_values, reward_probs=None, trainings=None, check=False, naive=False, deval=False):
if naive:
test = ""
post = "_check"
elif check:
test = "check_"
post = "_check"
elif deval:
test = "deval_"
post = ""
else:
test = ""
post = ""
if trainings is None:
trainings = [100]
if reward_probs is None:
reward_probs = [90]
run_name = test+"h"+str(tendencies[0])+"_t"+str(p_values[0]) \
+"_p"+str(reward_probs[0])+"_train"+str(trainings[0])+".json"
jsonpickle_numpy.register_handlers()
fname = os.path.join(folder, run_name)
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
repetitions = len(worlds)
results_c = np.zeros((len(tendencies),len(p_values),len(reward_probs),len(trainings),repetitions ,4))
results_p = np.zeros((len(tendencies),len(p_values),len(reward_probs),len(trainings),repetitions,4))
entropy_c = np.zeros((len(tendencies),len(p_values),len(reward_probs),len(trainings),repetitions))
entropy_p = np.zeros((len(tendencies),len(p_values),len(reward_probs),len(trainings),repetitions))
entropy_l = np.zeros((len(tendencies),len(p_values),len(reward_probs),len(trainings),repetitions))
if check:
chosen = np.zeros((len(tendencies),len(p_values),len(reward_probs),len(trainings),repetitions))
else:
chosen = np.zeros((len(tendencies),len(p_values),len(reward_probs),len(trainings),repetitions,3))
for i,h in enumerate(tendencies):
for j,p in enumerate(p_values):
for m,r in enumerate(reward_probs):
for k,t in enumerate(trainings):
print(h, p, r, t)
run_name = test+"h"+str(h)+"_t"+str(p)+"_p"+str(r)+"_train"+str(t)+".json"
analysis_name = run_name[:-5]+"_ananlysis"+post+".json"
fname = os.path.join(folder, analysis_name)
jsonpickle_numpy.register_handlers()
try:
with open(fname, 'r') as infile:
data = json.load(infile)
except json.JSONDecodeError:
print("corrupt file ... doing recalculation")
calculate_analyses([h], [p], reward_probs=[r], check=check, naive=naive, recalc=True)
with open(fname, 'r') as infile:
data = json.load(infile)
results = pickle.decode(data)
results_won, results_chosen, results_context, \
results_c_param, results_c_param_type, entropy_c_r, \
results_p_param, results_p_param_type, entropy_p_r, entropy_l_r = results
results_c[i,j,m,k] = results_c_param
results_p[i,j,m,k] = results_p_param
chosen[i,j,m,k] = results_chosen
entropy_c[i,j,m,k] = entropy_c_r
entropy_p[i,j,m,k] = entropy_p_r
entropy_l[i,j,m,k] = entropy_l_r
data = 0
results = 0
results_won, results_chosen, results_context, \
results_c_param, results_c_param_type, entropy_c_r, \
results_p_param, results_p_param_type, entropy_p_r, entropy_l_r = [0]*10
return results_c, results_p, entropy_c, entropy_p, entropy_l, chosen
def load_checks(tendencies, reward_priors, p_values, prefixes, check=False, naive=False):
if naive:
test = "test_"
post = "_check"
elif check:
test = "check_"
post = "_check"
else:
test = "test_"
post = ""
measures = np.zeros((len(tendencies),len(reward_priors),len(p_values), len(prefixes),10))
results_c = np.zeros((len(tendencies),len(reward_priors),len(p_values), len(prefixes),100,4))
results_p = np.zeros((len(tendencies),len(reward_priors),len(p_values), len(prefixes),100,4))
entropy_c = np.zeros((len(tendencies),len(reward_priors),len(p_values), len(prefixes),100))
entropy_p = np.zeros((len(tendencies),len(reward_priors),len(p_values), len(prefixes),100))
chosen = np.zeros((len(tendencies),len(reward_priors),len(p_values), len(prefixes),100))
for i,h in enumerate(tendencies):
for k,rew_p in enumerate(reward_priors):
for j,p in enumerate(p_values):
for l,prefix in enumerate(prefixes):
#print(h,rew_p,p,prefix)
run_name = test+prefix+"_h"+str(h)+"_t"+str(p)+"_r"+str(rew_p)+".json"
analysis_name = run_name[:-5]+"_ananlysis"+post+".json"
fname = os.path.join(folder, analysis_name)
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
results = pickle.decode(data)
results_won, results_chosen, results_context, \
results_c_param, results_c_param_type, \
results_p_param, results_p_param_type = results
results_c[i,k,j,l] = results_c_param
results_p[i,k,j,l] = results_p_param
chosen[i,k,j,l] = results_chosen
mask_c = results_c_param[:,0] > 0
mask_p = results_p_param[:,0] > 0
for n in range(100):
if mask_c[n]:
fct = fit_functions[results_c_param_type[n]]
param = results_c_param[n,results_c_param_type[n]:]
entropy_c[i,k,j,l,n] = -(fct([0,199],*param)*ln(fct([0,199],*param))).sum()/2. \
- ((1.-fct([0,199],*param))*ln(1.-fct([0,199],*param))).sum()/2.
if mask_p[n]:
fct = fit_functions[results_p_param_type[n]]
param = results_p_param[n,results_p_param_type[n]:]
entropy_p[i,k,j,l,n] = -(fct([0,199],*param)*ln(fct([0,199],*param))).sum()/2. \
- ((1.-fct([0,199],*param))*ln(1.-fct([0,199],*param))).sum()/2.
measures[i,k,j,l] = [results_won.sum(axis=1).mean()/2.,
results_won[:,0].mean(), results_won[:,1].mean(),
results_chosen.mean(),
np.nanmedian(results_c_param[:,1],axis=0),
np.nanmedian(results_c_param[:,2],axis=0),
entropy_c[i,k,j,l].mean(),
np.nanmedian(results_p_param[:,1],axis=0),
np.nanmedian(results_p_param[:,2],axis=0),
entropy_p[i,k,j,l].mean()]
return measures, results_c, results_p, entropy_c, entropy_p, chosen
def plot_analyses(print_regression_results=False):
tendencies = [1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]
tendency_names = (1./np.array(tendencies)).astype(str)
t_ind_short = [0, 9, 18]
#t_short = [tendencies[i] for i in t_ind_short]
t_names_short = [""] * len(tendencies)
for i in t_ind_short:
t_names_short[i] = tendency_names[i]
transition_probs = [99]#[100,99,98,97,96,95,94]#,93,92,91,90]#
reward_probs = [100,95,90,85,80,75,70,65,60]
calculate_analyses(tendencies, transition_probs, reward_probs=reward_probs, recalc=False)
results_c, results_p, entropy_c, entropy_p, entropy_l, results_chosen = load_analyses(tendencies, transition_probs, reward_probs)
prefix = ""
reward = 2
r = reward_probs[reward]
test = ""
numbers = []
chosen_tendencies = []
fnames = []
fit_params = []
h = 0
chosen_tendencies.append(h)
tendency = tendencies[h]
p = transition_probs[0]
run_name = test+prefix+"h"+str(tendency)+"_t"+str(p)+"_p"+str(r)+"_train100.json"
fnames.append(run_name)
action_times = results_p[h,0,reward,0,:,2]
median = np.nanmedian(action_times)
number = np.nanargmin(np.abs(action_times-median))
numbers.append(number)
fit_params.append([results_c[h,0,reward,0,number,:], 0, \
results_p[h,0,reward,0,number,:], 0 ])
save_run_plots([fnames[-1]], [numbers[-1]], [tendency_names[h]], prefix="strong", check=False, fit_params=[fit_params[-1]])
h = -1
chosen_tendencies.append(h)
tendency = tendencies[h]
p = transition_probs[0]
run_name = test+prefix+"h"+str(tendency)+"_t"+str(p)+"_p"+str(r)+"_train100.json"
fnames.append(run_name)
action_times = results_p[h,0,reward,0,:,2]
median = np.nanmedian(action_times)
number = np.nanargmin(np.abs(action_times-median))
numbers.append(number)
fit_params.append([results_c[h,0,reward,0,number,:], 0, \
results_p[h,0,reward,0,number,:], 0 ])
#save_run_plots(fnames, numbers, check=False, fit_params=fit_params)
save_run_plots([fnames[-1]], [numbers[-1]], [tendency_names[h]], prefix="weak", check=False, fit_params=[fit_params[-1]])
fname = os.path.join(folder, "best_and_average.json")
jsonpickle_numpy.register_handlers()
pickled = pickle.encode([numbers, chosen_tendencies])
with open(fname, 'w') as outfile:
json.dump(pickled, outfile)
plt.figure(figsize=(10,5))
plot_c = (results_c[:,0,2,0,:,2]-100.).T
num_tend = plot_c.shape[1]
num_runs = plot_c.shape[0]
plot_c_data = plot_c.T.reshape(plot_c.size)
labels = np.tile(np.arange(num_tend), (num_runs, 1)).reshape(-1, order='f')
data = pd.DataFrame({'chosen': plot_c_data, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, color='#ff1493', estimator=np.nanmedian, label="context", linewidth=3)
plot_p = np.array([results_p[i,0,2,0,:,2]-100. for i in range(len(tendencies))]).T
plot_p_data = plot_p.T.reshape(plot_p.size)
data = pd.DataFrame({'chosen': plot_p_data, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, color='#cc6600', estimator=np.nanmedian, label="action", linewidth=3)
#plt.xticks(range(len(prefixes)), prefixes)
plt.ylim([0.,20.])
plt.yticks([0,5,10,15,20])
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendency $h$", fontsize=20)
plt.ylabel("trial after switch / habit strength", fontsize=20)
plt.title("action and context infer times", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.savefig(os.path.join(folder,"context_action_infer_times.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"context_action_infer_times.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
print(np.nanmedian(plot_c, axis=0))
# regression if habitual tendency delays context inference
y = np.nanmedian(plot_c, axis=0)#plot_p.flatten(order='F')
names = list(1./np.array(tendencies))
X = np.array([[1]*len(names), names])
reg = sm.OLS(y,X.T)
results = reg.fit()
if print_regression_results:
print("context")
print(results.summary())
# regression if habitual tendency delays action adaptation
y = np.nanmedian(plot_p, axis=0)#plot_p.flatten(order='F')
names = list(1./np.array(tendencies))
X = np.array([[1]*len(names), names])
reg = sm.OLS(y,X.T)
results = reg.fit()
if print_regression_results:
print("actions")
print(results.summary())
plt.figure(figsize=(10,5))
e_c = entropy_c[:,0,2,0,:]
e_c_data = e_c.reshape(e_c.size)
e_c_data = pd.DataFrame({'chosen': e_c_data, 'tendencies': labels})
e_p = entropy_p[:,0,2,0,:]
e_p_data = e_p.reshape(e_p.size)
e_p_data = pd.DataFrame({'chosen': e_p_data, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=e_c_data, color='#ff1493', ci = 95, estimator=np.nanmedian, label='context')
sns.lineplot(x='tendencies', y='chosen', data=e_p_data, color='#cc6600', ci = 95, estimator=np.nanmedian, label='action')
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendency $h$", fontsize=20)
plt.ylabel("entropy", fontsize=20)
plt.title("entropies of the posteriors", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.savefig(os.path.join(folder,"entropies_in_sudden_condition.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"entropies_in_sudden_condition.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
plt.figure(figsize=(10,5))
chosen_0 = results_chosen[:,0,2,0,:,0]
chosen_data = chosen_0.reshape(chosen_0.size)
chosen_data = pd.DataFrame({'chosen': chosen_data, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=chosen_data, color='blue', ci = 95, estimator=np.nanmedian)#, condition="alpha_init=1")
#plt.xticks(range(len(prefixes)), prefixes)
plt.ylim([0.,1.])
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendency $h$", fontsize=20)
plt.ylabel("percent", fontsize=20)
plt.title("proportion of optimal action chosen trials 1-100", fontsize=20)
plt.savefig(os.path.join(folder,"optimal_action_chosen_before_switch.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"optimal_action_chosen_before_switch.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
# regression if habitual tendency increases rewarding behavior in context 1
y = np.nanmedian(chosen_0, axis=1)#plot_p.flatten(order='F')
names = list(1./np.array(tendencies))
X = np.array([[1]*len(names), names])
reg = sm.OLS(y,X.T)
results = reg.fit()
if print_regression_results:
print(results.summary())
plt.figure(figsize=(10,5))
chosen_1 = results_chosen[:,0,2,0,:,1]
chosen_data = chosen_1.reshape(chosen_1.size)
chosen_data = pd.DataFrame({'chosen': chosen_data, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=chosen_data, color='blue', ci = 95, estimator=np.nanmedian)#, condition="alpha_init=1")
#plt.xticks(range(len(prefixes)), prefixes)
plt.ylim([0.,1.])
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendency $h$", fontsize=20)
plt.ylabel("percent", fontsize=20)
plt.title("proportion of optimal action chosen trials 101-115", fontsize=20)
plt.savefig(os.path.join(folder,"optimal_action_chosen_after_switch.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"optimal_action_chosen_after_switch.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
# regression if habitual tendency decreases rewarding behavior directly after switch
y = np.nanmedian(chosen_0, axis=1)#plot_p.flatten(order='F')
names = list(1./np.array(tendencies))
X = np.array([[1]*len(names), names])
reg = sm.OLS(y,X.T)
results = reg.fit()
if print_regression_results:
print(results.summary())
plt.figure(figsize=(10,5))
chosen_2 = results_chosen[:,0,2,0,:,2]
chosen_data = chosen_2.reshape(chosen_2.size)
chosen_data = pd.DataFrame({'chosen': chosen_data, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=chosen_data, color='blue', ci = 95, estimator=np.nanmedian)#, condition="alpha_init=1")
#plt.xticks(range(len(prefixes)), prefixes)
plt.ylim([0.,1.])
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendency $h$", fontsize=20)
plt.ylabel("percent", fontsize=20)
plt.title("proportion of optimal action chosen trials 116-200", fontsize=20)
plt.savefig(os.path.join(folder,"optimal_action_chosen_after_switch2.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"optimal_action_chosen_after_switch2.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
# regression if habitual tendency increases rewarding behavior in context 2
y = np.nanmedian(chosen_0, axis=1)#plot_p.flatten(order='F')
names = list(1./np.array(tendencies))
X = np.array([[1]*len(names), names])
reg = sm.OLS(y,X.T)
results = reg.fit()
if print_regression_results:
print(results.summary())
noises = [0,2,4]
colors = ['purple','red','orange']
plt.figure(figsize=(10,5))
chosen = results_chosen[:,:,:,0,:,0]
for k,j in enumerate(noises):
plot_c = chosen[:,0,j,:]
chosen_0 = plot_c.reshape(plot_c.size)
labels = np.tile(np.arange(num_tend), (num_runs, 1)).reshape(-1, order='f')
data = pd.DataFrame({'chosen': chosen_0, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, color=colors[k], estimator=np.nanmedian, label=r'$\nu$='+str(reward_probs[j]/100))
plt.ylim([0.,1.])
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendencies", fontsize=20)
plt.ylabel("percent", fontsize=20)
plt.title("proportion of optimal action chosen trials 1-100", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.savefig(os.path.join(folder,"stochastic_optimal_action_chosen_before_switch_some.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"stochastic_optimal_action_chosen_before_switch_some.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
plt.figure(figsize=(10,5))
chosen = results_chosen[:,:,:,0,:,1]
for k,j in enumerate(noises):
plot_c = chosen[:,0,j,:]
chosen_0 = plot_c.reshape(plot_c.size)
labels = np.tile(np.arange(num_tend), (num_runs, 1)).reshape(-1, order='f')
data = pd.DataFrame({'chosen': chosen_0, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, color=colors[k], estimator=np.nanmedian, label=r'$\nu$='+str(reward_probs[j]/100))
plt.ylim([0.,1.])
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendencies", fontsize=20)
plt.ylabel("percent", fontsize=20)
plt.title("proportion of optimal action chosen trials 101-115", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.savefig(os.path.join(folder,"stochastic_optimal_action_chosen_after_switch_some.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"stochastic_optimal_action_chosen_after_switch_some.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
plt.figure(figsize=(10,5))
chosen = results_chosen[:,:,:,0,:,2]
for k,j in enumerate(noises):
plot_c = chosen[:,0,j,:]
chosen_0 = plot_c.reshape(plot_c.size)
labels = np.tile(np.arange(num_tend), (num_runs, 1)).reshape(-1, order='f')
data = pd.DataFrame({'chosen': chosen_0, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, color=colors[k], estimator=np.nanmedian, label=r'$\nu$='+str(reward_probs[j]/100))
plt.ylim([0.,1.])
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendencies", fontsize=20)
plt.ylabel("percent", fontsize=20)
plt.title("proportion of optimal action chosen trials 116-200", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.savefig(os.path.join(folder,"stochastic_optimal_action_chosen_after_switch2_some.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"stochastic_optimal_action_chosen_after_switch2_some.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
plt.figure(figsize=(10,5))
for k,j in enumerate(noises):
plot_c = results_c[:,0,j,0,:,2]-100
chosen_0 = plot_c.T.reshape(plot_c.size)
labels = np.tile(np.arange(num_tend), (num_runs, 1)).reshape(-1, order='f')
data = pd.DataFrame({'chosen': chosen_0, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, color=colors[k], estimator=np.nanmedian, label=r'$\nu$='+str(reward_probs[j]/100))
y = plot_c.flatten(order='F')
names = list(1./np.array(tendencies))
X = np.array([names*num_runs]).T
mask = ~np.isnan(y)
reg = lm.LinearRegression().fit(X[mask,:],y[mask])
print("influence of habitual tendency on context inference", reward_probs[j], "beta & r^2", reg.coef_, reg.score(X[mask,:],y[mask]))
plt.ylim([0.,100.])
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendencies", fontsize=20)
plt.ylabel("trial", fontsize=20)
plt.title("context infer times", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.savefig(os.path.join(folder,"stochastic_context_infer_times_some.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"stochastic_context_infer_times_some.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
plt.figure(figsize=(10,5))
for k,j in enumerate(noises):
plot_p = results_p[:,0,j,0,:,2]-100
chosen_0 = plot_p.T.reshape(plot_c.size)
labels = np.tile(np.arange(num_tend), (num_runs, 1)).reshape(-1, order='f')
data = pd.DataFrame({'chosen': chosen_0, 'tendencies': labels})
sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, color=colors[k], estimator=np.nanmedian, label=r'$\nu$='+str(reward_probs[j]/100))
y = plot_p.flatten(order='F')
names = list(1./np.array(tendencies))
X = np.array([names*num_runs]).T
mask = ~np.isnan(y)
reg = lm.LinearRegression().fit(X[mask,:],y[mask])
print("influence of habitual tendency on action inference", reward_probs[j], "beta & r^2", reg.coef_, reg.score(X[mask,:],y[mask]))
plt.xticks(range(len(reward_probs)), reward_probs)
plt.ylim([0.,100.])
plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(tendencies)), t_names_short, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("habitual tendencies", fontsize=20)
plt.ylabel("trial", fontsize=20)
plt.title("action infer times", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.savefig(os.path.join(folder,"stochastic_action_infer_times_some.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"stochastic_action_infer_times_some.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
styles = ['-', '--', ':']
plot_c = []
plt.figure(figsize=(10,5))
num_probs = len(reward_probs)
for k,h in enumerate([0,num_probs,-1]):
plot_c.append((results_p[h,0,:,0,:,2]-results_c[h,0,:,0,:,2]).T)
chosen_0 = plot_c[-1].T.reshape(plot_c[-1].size)
labels = np.tile(np.arange(num_probs), (num_runs, 1)).reshape(-1, order='f')
data = pd.DataFrame({'chosen': chosen_0, 'tendencies': labels})
ax = sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, estimator=np.nanmedian, label="h="+str(tendency_names[h]), linewidth=3)
ax.lines[-1].set_linestyle(styles[k])
plt.ylim([0.,100.])
#plt.xlim([len(tendencies)-1,0])
plt.xticks(range(len(reward_probs)), reward_probs, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("reward probability", fontsize=20)
plt.ylabel("action - context inference", fontsize=20)
plt.title("difference between inference times", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.show()
plot_c = np.array([np.nanmedian(plot_c[i], axis=0) for i in range(len(plot_c))])
y = plot_c.flatten()
names = (0.5-np.array(reward_probs)/100.)*(-2)
stab = np.array(list(names) * 3)
#tend = np.array([0.01]*len(reward_probs) + [0.1]*len(reward_probs) + [1.0]*len(reward_probs))
#cross = stab*tend
X = np.array([[1]*len(stab), stab])
reg = sm.OLS(y,X.T)
results = reg.fit()
if print_regression_results:
print(results.summary())
styles = ['-', '--', ':']
plot_c = []
plt.figure(figsize=(10,5))
for k,h in enumerate([0,num_probs,-1]):
plot_c.append((results_c[h,0,:i,0,:,2]-100).T)
chosen_0 = plot_c[-1].T.reshape(plot_c[-1].size)
labels = np.tile(np.arange(num_probs), (num_runs, 1)).reshape(-1, order='f')
data = pd.DataFrame({'chosen': chosen_0, 'tendencies': labels})
ax = sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, color='#ff1493', estimator=np.nanmedian, label="h="+str(tendency_names[h]), linewidth=3)
ax.lines[-1].set_linestyle(styles[k])
plt.ylim([0.,100.])
plt.xlim([0,num_probs-1])
plt.xticks(range(len(reward_probs)), reward_probs, fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("reward probability", fontsize=20)
plt.ylabel("context inference", fontsize=20)
plt.title("context infer times", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.show()
plot_c = np.array([np.nanmedian(plot_c[i], axis=0) for i in range(len(plot_c))])
y = plot_c.flatten()
names = (0.5-np.array(reward_probs)/100.)*(-2)
stab = np.array(list(names) * 3)
#tend = np.array([0.01]*len(reward_probs) + [0.1]*len(reward_probs) + [1.0]*len(reward_probs))
#cross = stab*tend
X = np.array([[1]*len(stab), stab])
reg = sm.OLS(y,X.T)
results = reg.fit()
if print_regression_results:
print(results.summary())
styles = ['-', '--', ':']
plot_p = []
plt.figure(figsize=(10,5))
for k,h in enumerate([0,num_probs,-1]):
# plot_c = np.array([results_c[h,0,i,0,:,2]-100 for i in range(len(reward_probs))]).T
# sns.lineplot(plot_c, ci = 95, color='#ff1493', estimator=np.nanmedian, condition="context, h="+str(tendency_names[h]), linestyle=styles[k])
plot_p.append((results_p[h,0,:,0,:,2]-100).T)
chosen_0 = plot_p[-1].T.reshape(plot_p[-1].size)
labels = np.tile(np.arange(num_probs), (num_runs, 1)).reshape(-1, order='f')
data = pd.DataFrame({'chosen': chosen_0, 'tendencies': labels})
ax = sns.lineplot(x='tendencies', y='chosen', data=data, ci = 95, color='#cc6600', estimator=np.nanmedian, label="h="+str(tendency_names[h]), linestyle=styles[k], linewidth=3)
ax.lines[-1].set_linestyle(styles[k])
plt.ylim([0.,100.])
plt.xlim([0,num_probs-1])
x = np.array(reward_probs)
x = 100-x
plt.xticks(range(len(reward_probs)), x/100., fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel(r'stochasticity $1-\nu$', fontsize=20)
plt.ylabel(r'habit strength $H$', fontsize=20)
plt.title("habit strength", fontsize=20)
lgd = plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", fontsize=20)
plt.savefig(os.path.join(folder,"stochastic_habit_strength.svg"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(os.path.join(folder,"stochastic_habit_strength.png"), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
fit_results = np.zeros((len(plot_p),num_runs//5,3))
for i in range(len(plot_p)):
for j in range(num_runs//10):
y = np.nanmedian(plot_p[i][j*10:(j+1)*10], axis=0)
first = y.argmax() + 1
x = np.array(reward_probs)
x = 150-x
fit_results[i,j], pcov = sc.optimize.curve_fit(exponential, \
x[:first], y[:first], p0=[1.,20.,10.], \
bounds=[[0, 0, 0],[np.inf, np.inf, np.inf]])
# plt.figure()
# plt.plot(x, np.nanmedian(plot_p[i][j*10:(j+1)*10], axis=0),'x')
# print(fit_results[i,j])
# plt.plot(x, exponential(x,*fit_results[i,j]))
# plt.ylim([0,101])
# plt.show()
print(np.nanmean(fit_results, axis=1))
for i in range(3):
print(sc.stats.f_oneway(*fit_results[:,:,i]))
# plot_p = np.array([np.nanmedian(plot_p[i], axis=0) for i in range(len(plot_p))])
# y = plot_p.flatten()
#
# stab = np.array(list(range(len(reward_probs))) * 3)
# tend = np.array([0.01]*len(reward_probs) + [0.1]*len(reward_probs) + [1.0]*len(reward_probs))
# cross = stab*tend
# X = np.array([[1]*len(stab), stab, tend, cross])
# reg = sm.OLS(y,X.T)
# results = reg.fit()
# print(results.summary())
#return results_c, results_p, entropy_c, entropy_p, results_chosen
def plot_analyses_training():
tendencies = [1,10,100]
tendency_names = (1./np.array(tendencies)).astype(str)
transition_probs = [99]#[100,99,98,97,96,95,94]
trainings = [56, 100, 177, 316, 562, 1000, 1778]#, 3162, 5623, 10000]
training_names = [1.75, 2., 2.25, 2.5, 2.72, 3., 3.25, 3.5, 3.75, 4.0]
tr_ind_short = [1,5]#,9]
tr_names_short = [""] * len(trainings)
for i in tr_ind_short:
tr_names_short[i] = trainings[i]
calculate_analyses(tendencies, transition_probs, trainings=trainings, recalc=False)
results_c, results_p, entropy_c, entropy_p, entropy_l, results_chosen = load_analyses(tendencies, transition_probs, trainings=trainings)
styles = ['-', '--', ':']
plot_p = []
plt.figure(figsize=(10,5))
for h in range(len(tendencies)):
plot_p.append(np.array([results_p[h,0,0,i,:,2]-trainings[i] for i in range(len(trainings))]).T)
num_train = plot_p[-1].shape[1]
num_runs = plot_p[-1].shape[0]
chosen_0 = plot_p[-1].T.reshape(plot_p[-1].size)
labels = np.tile(np.arange(num_train), (num_runs, 1)).reshape(-1, order='f')
data = | pd.DataFrame({'chosen': chosen_0, 'tendencies': labels}) | pandas.DataFrame |
import shutil
import os
import pytest
import pandas as pd
import numpy as np
from rumi.processing import postprocess
from rumi.io import config
from rumi.io import utilities
from rumi.io import constant
from rumi.io import loaders
def get_parameter(name, **kwargs):
if name == "ModelPeriod":
return pd.DataFrame({"StartYear": [2021],
"EndYear": [2022]})
elif name == "Seasons":
return utilities.make_dataframe("""Season,StartMonth,StartDate
SUMMER,4,1
MONSOON,6,1
AUTUMN,9,1
WINTER,11,1
SPRING,2,1""")
elif name == "DayTypes":
return utilities.make_dataframe("""DayType,Weight
A,0.4
B,0.6""")
elif name == "DaySlices":
return utilities.make_dataframe("""DaySlice,StartHour
EARLY,6
MORN,9
MID,12
AFTERNOON,15
EVENING,18
NIGHT,22"""
)
elif name == "ModelGeography":
return "INDIA"
elif name == "SubGeography1":
return 'ER,WR,NR,SR,NER'.split(",")
elif name == "SubGeography2":
return {"ER": 'BR,JH,OD,WB'.split(","),
"WR": 'CG,GJ,MP,MH,GA,UT'.split(","),
"NR": 'DL,HR,HP,JK,PB,RJ,UP,UK'.split(","),
"SR": 'AP,KA,KL,TN,TS'.split(","),
"NER": 'AS,NE'.split(",")}
def get_config_value(configname):
if configname == "scenario":
return "S1_Ref"
else:
return config.get_config_value(configname)
@pytest.fixture()
def dummy_output():
path = "test_output_supply"
scenario = "S1_Ref"
os.mkdir(path)
os.mkdir(os.path.join(path, scenario))
os.mkdir(os.path.join(path, scenario, "Supply"))
os.mkdir(os.path.join(path, scenario, "Supply", "Output"))
os.mkdir(os.path.join(path, scenario, "Demand"))
os.mkdir(os.path.join(path, scenario, "Demand", "Output"))
os.mkdir(os.path.join(path, scenario, "Supply", "Output", "Run-Outputs"))
files = ["EndUseDemandMetByDom",
"EndUseDemandMetByImp",
"ECTInputDom",
"ECTInputImp"]
files = [".".join([f, "csv"]) for f in files]
for f in files:
with open(os.path.join(path, scenario, "Supply", "Output", "Run-Outputs", f), "w") as fd:
fd.write("\n")
demandpath = os.path.join(path, scenario, "Demand", "Output")
with open(os.path.join(demandpath, "EndUseDemandEnergy.csv"), "w") as f:
f.write("\n")
yield path
shutil.rmtree(path)
def test_season_wise(monkeypatch):
monkeypatch.setattr(loaders, 'get_parameter', get_parameter)
tcols = list(constant.TIME_SLICES)
gcols = list(constant.GEOGRAPHIES)
entity = "EnergyConvTech"
df1 = utilities.base_dataframe_all(geocols=gcols[:2],
timecols=tcols,
colname="value",
val=1.0).reset_index()
df1[entity] = "DF1"
df2 = utilities.base_dataframe_all(geocols=gcols[:2],
timecols=tcols[:2],
colname="value",
val=1.0).reset_index()
df2[entity] = "DF2"
df3 = utilities.base_dataframe_all(geocols=gcols[:2],
timecols=tcols,
colname="value",
val=1.0).reset_index()
df3[entity] = "DF3"
df3['DayNo'] = 1
df4 = utilities.base_dataframe_all(geocols=gcols[:2],
timecols=tcols,
colname="value",
val=1.0).reset_index()
df4[entity] = "DF4"
df4['DayNo'] = np.nan
df5 = utilities.base_dataframe_all(geocols=gcols[:2],
timecols=tcols,
colname="value",
val=1.0).reset_index()
df5[entity] = "DF5"
df5['DayNo'] = 1
df = | pd.concat([df1, df2, df3, df4, df5]) | pandas.concat |
import pandas as pd
import seaborn as sns
import os
import numpy as np
from scipy import signal as sig_lib
import sklearn.decomposition as decomposition
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
class data_analyzer(object):
def __init__(self,data,config, name, psd_scaler=None,pca_transformer=None):
self.frags = data['frags']
self.segments = data['segments']
self.config = config
self.__name__ = name
self.path = 'data/features'
self.feature_set = None
self.psd_pca_scaler = psd_scaler
self.psd_pca_transformer = pca_transformer
def analyse_missing_observation(self):
sensors = set()
observations = set()
nan_columns = list()
missed_groups = list()
for_df = list()
datset_size = len(self.frags) if self.config.dataset_size == 'max' else self.config.dataset_size
cnt = 0
for item in self.frags:
if cnt > datset_size:
break
# extract the segment name
name = int(item.split('.')[-2].split('/')[-1])
at_least_one_missed = 0
frag = pd.read_csv(item)
missed_group = list()
missed_percents = list()
# Run the segment columns
for col in frag.columns:
# calculate how many values are missing in percentages from this column
missed_percents.append(frag[col].isnull().sum() / len(frag))
#check if we are missing at least one sample
if pd.isnull(frag[col]).all() == True:
at_least_one_missed = 1
nan_columns.append(col)
missed_group.append(col)
if len(missed_group) > 0:
missed_groups.append(missed_group)
sensors.add(len(frag.columns))
observations.add(len(frag))
for_df.append([name, at_least_one_missed] + missed_percents)
cnt += 1
self.for_df = pd.DataFrame(for_df, columns=['segment_id', 'has_missed_sensors', 'missed_percent_sensor1',
'missed_percent_sensor2', 'missed_percent_sensor3', 'missed_percent_sensor4',
'missed_percent_sensor5', 'missed_percent_sensor6', 'missed_percent_sensor7',
'missed_percent_sensor8', 'missed_percent_sensor9', 'missed_percent_sensor10'])
self.merged = pd.merge(self.segments, self.for_df)
if self.config.with_missing_sensor_dist_analysis:
self.analyse_missing_sensor_distribution(nan_columns,missed_groups)
def analyse_missing_sensor_distribution(self,nan_columns,missed_groups):
absent_sensors = dict()
for item in nan_columns:
if item in absent_sensors:
absent_sensors[item] += 1
else:
absent_sensors[item] = 0
absent_df = pd.DataFrame(absent_sensors.items(), columns=['Sensor', 'Missed sensors'])
# fig = px.bar(
# absent_df,
# x="Sensor",
# y='Missed sensors',
# width=800,
# height=500,
# title='Number of missed sensors in training dataset'
# )
# fig.show()
absent_groups = dict()
for item in missed_groups:
if str(item) in absent_groups:
absent_groups[str(item)] += 1
else:
absent_groups[str(item)] = 0
def analyse_segements_sensors_correlation(self):
if self.config.pairplot_sensors_correaltion is True:
indices = np.random.randint(len(self.train_frags), size=self.config.pairplot_number_of_sensors)
for index in indices:
item = self.train_frags[index]
name = int(item.split('.')[-2].split('/')[-1])
frag = pd.read_csv(item)
sns_plot = sns.pairplot(frag)
if not os.path.exists('outputs/pair_plots'):
os.makedirs('outputs/pair_plots')
sns_plot.savefig(f"outputs/pair_plots/segment_{name}.png")
def build_features_signal(self, signal, ts, sensor_id):
X = pd.DataFrame()
X.loc[ts, f'{sensor_id}_sum'] = signal.sum()
X.loc[ts, f'{sensor_id}_mean'] = signal.mean()
X.loc[ts, f'{sensor_id}_std'] = signal.std()
X.loc[ts, f'{sensor_id}_var'] = signal.var()
X.loc[ts, f'{sensor_id}_max'] = signal.max()
X.loc[ts, f'{sensor_id}_min'] = signal.min()
X.loc[ts, f'{sensor_id}_skew'] = signal.skew()
X.loc[ts, f'{sensor_id}_mad'] = signal.mad()
X.loc[ts, f'{sensor_id}_kurtosis'] = signal.kurtosis()
X.loc[ts, f'{sensor_id}_quantile99'] = np.quantile(signal, 0.99)
X.loc[ts, f'{sensor_id}_quantile95'] = np.quantile(signal, 0.95)
X.loc[ts, f'{sensor_id}_quantile85'] = np.quantile(signal, 0.85)
X.loc[ts, f'{sensor_id}_quantile75'] = np.quantile(signal, 0.75)
X.loc[ts, f'{sensor_id}_quantile55'] = np.quantile(signal, 0.55)
X.loc[ts, f'{sensor_id}_quantile45'] = np.quantile(signal, 0.45)
X.loc[ts, f'{sensor_id}_quantile25'] = np.quantile(signal, 0.25)
X.loc[ts, f'{sensor_id}_quantile15'] = np.quantile(signal, 0.15)
X.loc[ts, f'{sensor_id}_quantile05'] = np.quantile(signal, 0.05)
X.loc[ts, f'{sensor_id}_quantile01'] = np.quantile(signal, 0.01)
fs = 100 # sampling frequency
freq, psd = sig_lib.welch(signal, fs=fs)
if signal.isna().sum() > 1000: ##########
X.loc[ts, f'{sensor_id}_A_pow'] = np.nan
X.loc[ts, f'{sensor_id}_A_num'] = np.nan
X.loc[ts, f'{sensor_id}_BH_pow'] = np.nan
X.loc[ts, f'{sensor_id}_BH_num'] = np.nan
X.loc[ts, f'{sensor_id}_BL_pow'] = np.nan
X.loc[ts, f'{sensor_id}_BL_num'] = np.nan
X.loc[ts, f'{sensor_id}_C_pow'] = np.nan
X.loc[ts, f'{sensor_id}_C_num'] = np.nan
X.loc[ts, f'{sensor_id}_D_pow'] = np.nan
X.loc[ts, f'{sensor_id}_D_num'] = np.nan
X.loc[ts, f'{sensor_id}_fft_real_mean'] = np.nan
X.loc[ts, f'{sensor_id}_fft_real_std'] = np.nan
X.loc[ts, f'{sensor_id}_fft_real_max'] = np.nan
X.loc[ts, f'{sensor_id}_fft_real_min'] = np.nan
else:
# STFT(Short Time Fourier Transform) Specifications
n = 256 # FFT segment size
max_f = 20 # ~20Hz
delta_f = fs / n # 0.39Hz
# delta_t = n / fs / 2 # 1.28s
f = np.fft.fft(signal)
f_real = np.real(f)
f, t, Z = sig_lib.stft(signal.fillna(0), fs=fs, window='hann', nperseg=n)
# f = f[:round(max_f / delta_f) + 1]
Z = np.abs(Z[:round(max_f / delta_f) + 1]).T # ~max_f, row:time,col:freq
th = Z.mean() * 1 ##########
Z_pow = Z.copy()
Z_pow[Z < th] = 0
Z_num = Z_pow.copy()
Z_num[Z >= th] = 1
Z_pow_sum = Z_pow.sum(axis=0)
Z_num_sum = Z_num.sum(axis=0)
X.loc[ts, f'{sensor_id}_A_pow'] = Z_pow_sum[round(10 / delta_f):].sum()
X.loc[ts, f'{sensor_id}_A_num'] = Z_num_sum[round(10 / delta_f):].sum()
X.loc[ts, f'{sensor_id}_BH_pow'] = Z_pow_sum[round(5 / delta_f):round(8 / delta_f)].sum()
X.loc[ts, f'{sensor_id}_BH_num'] = Z_num_sum[round(5 / delta_f):round(8 / delta_f)].sum()
X.loc[ts, f'{sensor_id}_BL_pow'] = Z_pow_sum[round(1.5 / delta_f):round(2.5 / delta_f)].sum()
X.loc[ts, f'{sensor_id}_BL_num'] = Z_num_sum[round(1.5 / delta_f):round(2.5 / delta_f)].sum()
X.loc[ts, f'{sensor_id}_C_pow'] = Z_pow_sum[round(0.6 / delta_f):round(1.2 / delta_f)].sum()
X.loc[ts, f'{sensor_id}_C_num'] = Z_num_sum[round(0.6 / delta_f):round(1.2 / delta_f)].sum()
X.loc[ts, f'{sensor_id}_D_pow'] = Z_pow_sum[round(2 / delta_f):round(4 / delta_f)].sum()
X.loc[ts, f'{sensor_id}_D_num'] = Z_num_sum[round(2 / delta_f):round(4 / delta_f)].sum()
X.loc[ts, f'{sensor_id}_fft_real_mean'] = f_real.mean()
X.loc[ts, f'{sensor_id}_fft_real_std'] = f_real.std()
X.loc[ts, f'{sensor_id}_fft_real_max'] = f_real.max()
X.loc[ts, f'{sensor_id}_fft_real_min'] = f_real.min()
return X, psd
def calculate_statistics(self):
self.feature_set = list()
j = 0
print('Starting statistics calculation')
signal_record_dict = {}
'''
Initialize the sensor statistics
'''
for sensor_number in range(0, 10):
# iterate over all sensor's signals
sensor_id = f'sensor_{sensor_number + 1}'
signal_record_dict[sensor_id] = np.array([])
for seg in self.merged.segment_id:
# read signals from csv
signals = pd.read_csv(f'data/dataset/{self.__name__}/{seg}.csv')
train_row = []
if j % 500 == 0:
print(j)
for sensor_number in range(0, 10):
# iterate over all sensor's signals
sensor_id = f'sensor_{sensor_number + 1}'
if signals[sensor_id].isnull().values.any():
# check if there are NaN values
if signals[sensor_id].isnull().sum() != len(signals[sensor_id]):
np.concatenate([signal_record_dict[sensor_id], signals[sensor_id].dropna().values], axis=0)
else:
signal_record_dict[sensor_id] = np.concatenate([signal_record_dict[sensor_id], signals[sensor_id].values], axis=0)
j = j + 1
self.statistic = {'mean': {}, 'std': {}}
for sensor_number in range(0, 10):
# iterate over all sensor's signals
sensor_id = f'sensor_{sensor_number + 1}'
self.statistic['mean'][sensor_id] = np.mean(signal_record_dict[sensor_id])
self.statistic['std'][sensor_id] = np.std(signal_record_dict[sensor_id])
def fill_missing_values(self,signal,sensor_id):
if signal.isnull().values.any():
if signal.isnull().sum() != len(signal):
# this case we will use linear interpolation:
signal = signal.mode(dropna=True)
else:
mean = self.statistic['mean'][sensor_id]
std = self.statistic['std'][sensor_id]
random_sample = std * np.random.normal() + mean
else:
# this case there are no missing values
return signal
def extract_data_features(self):
self.feature_set = list()
psd_samples = list()
j = 0
for seg in self.merged.segment_id:
# read signals from csv
signals = pd.read_csv(f'data/dataset/{self.__name__}/{seg}.csv')
train_row = []
if j % 500 == 0:
print(j)
psd_signal = np.array([])
features = []
for sensor_number in range(0, 10):
#iterate over all sensor's signals
sensor_id = f'sensor_{sensor_number + 1}'
element,psd = self.build_features_signal(signals[sensor_id], seg, sensor_id)
features.append(element)
psd_signal = np.concatenate((psd_signal, psd), axis=-1)
features = pd.concat(features,axis=1)
psd_samples.append(psd_signal)
self.feature_set.append(features)
j += 1
'''
Dimension reducation for the PSD result
'''
psd_samples = np.nan_to_num(np.array(psd_samples))
if self.__name__ == 'test':
psd_pca_scaled = self.psd_pca_scaler.transform(psd_samples)
psd_pca_scaled = np.nan_to_num(psd_pca_scaled)
psd_after_dimension_reduction = self.psd_pca_transformer.transform(psd_pca_scaled)
else:
psd_pca_scaled = self.psd_pca_scaler.fit(psd_samples).transform(psd_samples)
psd_pca_scaled = np.nan_to_num(psd_pca_scaled)
psd_after_dimension_reduction = self.psd_pca_transformer.fit_transform(psd_pca_scaled)
for psd_index in range(psd_after_dimension_reduction.shape[0]):
for pca_feature_index in range(psd_after_dimension_reduction.shape[1]):
self.feature_set[psd_index].loc[self.feature_set[psd_index].index.values[0],f'psd_{pca_feature_index}'] = psd_after_dimension_reduction[psd_index][pca_feature_index]
self.feature_set = pd.concat(self.feature_set)
self.feature_set = self.feature_set.reset_index()
self.feature_set = self.feature_set.rename(columns={'index': 'segment_id'})
self.feature_set = pd.merge(self.feature_set, self.merged, on='segment_id')
if not os.path.exists(self.path):
os.makedirs(self.path)
self.feature_set.to_csv(f'{self.path}/{self.config.feature_version}_fft_and_psd_stft_{self.__name__}_with_redudant_feat.csv')
def divide_input_output(self,drop_cols=None):
if self.__name__ == 'train':
X = self.feature_set.drop(['segment_id', 'time_to_eruption'], axis=1)
y = self.feature_set['time_to_eruption']
return X, y, drop_cols
elif self.__name__ == 'test':
X = self.feature_set.drop(['segment_id', 'time_to_eruption'], axis=1)
return X, self.feature_set['segment_id']
else:
raise Exception('Invalid class name please use train or test or define a new name!')
def remove_redudant_features(self, drop_cols=None):
if drop_cols is None:
if self.feature_set is None:
raise Exception('Invalid command, please extract or load features in order to remove redudant ones!')
drop_cols = []
# Remove uncorrelated columns
for col in self.feature_set.columns:
if col == 'segment_id':
continue
if abs(self.feature_set[col].corr(self.feature_set['time_to_eruption'])) < self.config.uncorrelated_cols_threshold:
drop_cols.append(col)
not_to_drop_cols = []
for col1 in self.feature_set.columns:
for col2 in self.feature_set.columns:
if col1 == col2:
continue
if col1 == 'segment_id' or col2 == 'segment_id':
continue
if col1 == 'time_to_eruption' or col2 == 'time_to_eruption':
continue
if abs(self.feature_set[col1].corr(self.feature_set[col2])) > self.config.correlated_cols_threshold:
if col2 not in drop_cols and col1 not in not_to_drop_cols:
drop_cols.append(col2)
not_to_drop_cols.append(col1)
# Drop corresponding columns
self.feature_set = self.feature_set.drop(drop_cols, axis=1)
self.feature_set.to_csv(f'{self.path}/{self.config.feature_version}_fft_and_psd_stft_{self.__name__}_without_redudant_feat.csv')
return self.divide_input_output(drop_cols)
def load_data_features_before_removing_features(self):
if not os.path.exists(self.path):
raise Exception('Missing features! please extract them first')
self.feature_set = pd.read_csv(f'{self.path}/{self.config.feature_version}_fft_and_psd_stft_{self.__name__}_with_redudant_feat.csv')
return self.divide_input_output()
def load_data_features_after_removing_features(self):
if not os.path.exists(self.path):
raise Exception('Missing features! please extract them first')
self.feature_set = | pd.read_csv(f'{self.path}/{self.config.feature_version}_fft_and_psd_stft_{self.__name__}_without_redudant_feat.csv') | pandas.read_csv |
import pathlib
import sys
parser_dir = pathlib.Path(__file__).parent.parent / 'solidity-universal-parser' / 'parser'
sys.path.append(str(parser_dir))
from python3.SolidityLexer import SolidityLexer
from python3.SolidityParser import SolidityParser
from typing import Iterable
from antlr4 import InputStream, CommonTokenStream
from comment_visitor import CommentVisitor
import pandas as pd
from tqdm import tqdm
import itertools
from multiprocessing import Pool
def chunk_gen(df: pd.DataFrame, chunk_size: int) -> Iterable:
i = 0
while i < df.shape[0]:
yield df.iloc[i:i + chunk_size]
i += chunk_size
def applyParallel(iterable, total, func, chunksize=1):
with Pool() as p:
res_list = []
with tqdm(total=total, desc="Extracting", leave=False) as pbar:
for res in p.imap_unordered(func=func, iterable=iterable, chunksize=chunksize):
pbar.update()
res_list.append(res)
return list(itertools.chain.from_iterable(res_list))
def parse_data_worker(iterrows):
index, row = iterrows
extract = []
try:
code = row['source_code']
input_stream = InputStream(code)
lexer = SolidityLexer(input_stream)
lexer.removeErrorListeners() # remove default error listener
stream = CommonTokenStream(lexer)
parser = SolidityParser(stream)
parser.removeErrorListeners() # remove default error listener
visitor = CommentVisitor()
tree = parser.sourceUnit()
extract = visitor.visit(tree)
for entry in extract:
entry['contract_name'] = row['contract_name']
entry['file_path'] = row['file_path']
entry['contract_address'] = row['contract_address']
entry['language'] = row['language']
entry['compiler_version'] = row['compiler_version']
entry['license_type'] = row['license_type']
entry['swarm_source'] = row['swarm_source']
except RecursionError as re:
print(re)
pass
return extract
def parse_data(dir_path: str, chunk_size: int) -> pd.DataFrame:
df = | pd.read_parquet(dir_path) | pandas.read_parquet |
import csv
import numpy as np
import pandas as pd
from copy import deepcopy
from enum import IntEnum
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize': (18, 6)})
sns.set_theme()
FPS = 35
class GameStage(IntEnum):
EXPLORATION = 0
COMBAT = 1
MENU = 2
CONSOLE = 3
SCOREBOARD = 4
legendMap = {GameStage.EXPLORATION: ["blue", "Exploration"],
GameStage.COMBAT: ["red", "Combat"],
GameStage.MENU: ["gold", "Menu"],
GameStage.CONSOLE: ["lime", "Console"],
GameStage.SCOREBOARD: ["cyan", "Scoreboard"]}
def mean(nums):
if nums: return round(sum(nums) / len(nums), 2)
else: return '-'
def ratio_negatives(nums):
if nums: return round(len([num for num in nums if num < 0]) * 100 / len(nums), 2)
else: return '-'
class DatasetAnalyzer():
def __init__(self, pathCSV, smooth=None, windowLength=None):
self.pathCSV = pathCSV
self.pathOutput = '/'.join(pathCSV.split('/')[:-1])
self.windowLength = windowLength
self.smooth = smooth
self.dfInput = pd.DataFrame()
self.videoData = {}
self.kbpsOverall = 0
self.avgStageBitrates = {}
self.bitrateVariances = {}
self.read_CSV()
def read_CSV(self):
self.dfInput = pd.read_csv(self.pathCSV)
self.dfInput['bitrateKbps'] = self.dfInput['size'].rolling(window=FPS).sum() * 8 // 1000
self.dfInput['accumulatedKbits'] = self.dfInput['size'].cumsum() * 8 // 1000
if self.smooth:
# ONLY FOR AN EXPERIMENT WITHOUT SYNTHETIC SCREENS
self.dfInput['gamestageEMA'] = self.dfInput['gamestage'].ewm(span=self.windowLength, adjust=False).mean()
for i in range(self.dfInput.shape[0]):
self.dfInput.loc[i, 'gamestageEMA'] = 1 if self.dfInput.loc[i, 'gamestageEMA'] > 0.5 else 0
self.dfInput['bitrateKbpsEMA'] = self.dfInput['bitrateKbps'].ewm(span=self.windowLength, adjust=False).mean()
self.kbpsOverall = round(self.dfInput['bitrateKbpsEMA'].mean(), 2)
self.fileName = "%s_smooth_w%i" % (self.pathCSV.split('.')[1].split('.')[0].split('/')[-1], self.windowLength)
columnStage = 'gamestageEMA'
else:
self.kbpsOverall = round(self.dfInput['bitrateKbps'].mean(), 2)
self.fileName = self.pathCSV.split('.')[1].split('.')[0].split('/')[-1]
columnStage = 'gamestage'
for index, row in self.dfInput.iterrows():
if index == 0:
firstStage = self.dfInput.loc[0, columnStage]
secfirstStage = self.dfInput.loc[0, 'time']
tmpRow = deepcopy(self.dfInput.iloc[0])
continue
if row[columnStage] != firstStage:
self.videoData[round(row['time'], 6)] = tmpRow[columnStage]
# Update
secfirstStage = row['time']
firstStage = row[columnStage]
tmpRow = deepcopy(row)
# For the last game stage seconds. They are not saved with the code block above because they won't see different stage afterward.
self.videoData[round(self.dfInput.iloc[-1]['time'], 6)] = self.dfInput.iloc[-1][columnStage]
def render_table(self, data, col_width=3.5, row_height=0.6, font_size=12, header_color='#40466e', edge_color='w',
row_colors=['#f1f1f2', 'w'], bbox=[0, 0, 1, 1], header_columns=0, ax=None, **kwargs):
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in mpl_table._cells.items():
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
return ax.get_figure(), ax
def get_overall_stats(self):
count_total = self.dfInput.shape[0]
if self.smooth: columnStage = 'gamestageEMA'
else: columnStage = 'gamestage'
df_exploration = self.dfInput[(self.dfInput[columnStage] == GameStage.EXPLORATION)]
df_combat = self.dfInput[(self.dfInput[columnStage] == GameStage.COMBAT)]
df_menu = self.dfInput[(self.dfInput[columnStage] == GameStage.MENU)]
df_console = self.dfInput[(self.dfInput[columnStage] == GameStage.CONSOLE)]
df_scoreboard = self.dfInput[(self.dfInput[columnStage] == GameStage.SCOREBOARD)]
min_exploration = df_exploration['size'].min()
max_exploration = df_exploration['size'].max()
cv_exploration = round((df_exploration['size'].std() / df_exploration['size'].mean()), 2)
mean_exploration = round(df_exploration['size'].mean(), 2)
count_exploration = df_exploration.shape[0]
dom_exploration = round(count_exploration * 100 / count_total, 2)
min_combat = df_combat['size'].min()
max_combat = df_combat['size'].max()
cv_combat = round((df_combat['size'].std() / df_combat['size'].mean()), 2)
mean_combat = round(df_combat['size'].mean(), 2)
count_combat = df_combat.shape[0]
dom_combat = round(count_combat * 100 / count_total , 2)
min_menu = df_menu['size'].min()
max_menu = df_menu['size'].max()
cv_menu = round((df_menu['size'].std() / df_menu['size'].mean()), 2)
mean_menu = round(df_menu['size'].mean(), 2)
count_menu = df_menu.shape[0]
dom_menu = round(count_menu * 100 / count_total , 2)
min_console = df_console['size'].min()
max_console = df_console['size'].max()
cv_console = round((df_console['size'].std() / df_console['size'].mean()), 2)
mean_console = round(df_console['size'].mean(), 2)
count_console = df_console.shape[0]
dom_console = round(count_console * 100 / count_total , 2)
min_scoreboard = df_scoreboard['size'].min()
max_scoreboard = df_scoreboard['size'].max()
cv_scoreboard = round((df_scoreboard['size'].std() / df_scoreboard['size'].mean()), 2)
mean_scoreboard = round(df_scoreboard['size'].mean(), 2)
count_scoreboard = df_scoreboard.shape[0]
dom_scoreboard = round(count_scoreboard * 100 / count_total , 2)
data = {"Game Stage": ["Exploration", "Combat", "Menu", "Console", "Scoreboard"],
"Min (Bytes)": [min_exploration, min_combat, min_menu, min_console, min_scoreboard],
"Max (Bytes)": [max_exploration, max_combat, max_menu, max_console, max_scoreboard],
"Mean (Bytes)": [mean_exploration,mean_combat, mean_menu, mean_console, mean_scoreboard],
"Coeff. Variation": [cv_exploration, cv_combat, cv_menu, cv_console, cv_scoreboard],
"Fraction (%)": [dom_exploration, dom_combat, dom_menu, dom_console, dom_scoreboard]}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_overall_stats.png" % (self.pathOutput, self.fileName))
plt.clf()
def plot_bitrate_time(self):
# In order to avoid using same legend multiple times
usedLegends = []
if self.smooth:
fig = sns.lineplot(data=self.dfInput, x="time", y="bitrateKbpsEMA", color='black', linewidth=1)
else:
fig = sns.lineplot(data=self.dfInput, x="time", y="bitrateKbps", color='black', linewidth=1)
formerSec = 0
for sec, stage in self.videoData.items():
color, label = legendMap[stage]
if stage in usedLegends:
label = None
else:
usedLegends.append(stage)
plt.axvspan(formerSec, sec, facecolor=color, alpha=0.15, label=label)
formerSec = sec
plt.legend(facecolor='white', framealpha=1, loc="upper right")
plt.xlabel("Time [s]")
plt.ylabel("Bitrate [kbps]")
plt.margins(x=0)
fig.figure.savefig("%s/%s.png" % (self.pathOutput, self.fileName), bbox_inches = "tight")
plt.clf()
def plot_avg_bitrate_per_stage(self, plotVariationsBetweenStages=True):
if self.smooth: columnStage = 'gamestageEMA'
else: columnStage = 'gamestage'
self.avgStageBitrates = {0.0: 0}
for index, row in self.dfInput.iterrows():
if index == 0:
firstRow = deepcopy(row)
continue
if row[columnStage] != firstRow[columnStage]:
timeDiff = row['time'] - firstRow['time']
accBitrateStage = int((row['accumulatedKbits'] - firstRow['accumulatedKbits']) // timeDiff)
self.avgStageBitrates[round(firstRow['time'] + timeDiff/2.0, 3)] = (accBitrateStage, firstRow[columnStage])
firstRow = deepcopy(row)
tmpRow = deepcopy(row)
# for the last game stage
if tmpRow['time'] != firstRow['time']:
timeDiff = tmpRow['time'] - firstRow['time']
accBitrateStage = int((tmpRow['accumulatedKbits'] - firstRow['accumulatedKbits']) // timeDiff)
self.avgStageBitrates[round(firstRow['time'] + timeDiff/2.0, 3)] = (accBitrateStage, tmpRow[columnStage])
del self.avgStageBitrates[0.0]
fig = plt.figure()
ax = fig.add_subplot(111, facecolor='white')
# In order to avoid using same legend multiple times
usedLegends = []
for sec, (kbps, stage) in self.avgStageBitrates.items():
color, label = legendMap[stage]
if stage in usedLegends:
label = None
else:
usedLegends.append(stage)
ax.scatter(x=sec, y=kbps, c=color, label=label)
ax.grid(b=True, which='major', color='black', linestyle='-', linewidth=0.5, alpha=0.1)
plt.margins(x=0.01, y=0.01)
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Bitrate [kbps]")
fig.canvas.draw()
plt.savefig("%s/%s_avgBperStage.png" % (self.pathOutput, self.fileName), bbox_inches = "tight")
plt.clf()
if plotVariationsBetweenStages:
""" Variation of Average Bitrate: B(n) - B(n-1) """
fig = plt.figure()
ax = fig.add_subplot(111, facecolor='white')
usedLegends.clear()
formerKbps = 0
for sec, (kbps, stage) in self.avgStageBitrates.items():
if not formerKbps:
formerKbps = list(self.avgStageBitrates.values())[0][0]
continue
color, label = legendMap[stage]
if stage in usedLegends:
label = None
else:
usedLegends.append(stage)
ax.scatter(x=sec, y=kbps-formerKbps, c=color, label=label)
formerKbps = kbps
ax.grid(b=True, which='major', color='black', linestyle='-', linewidth=0.5, alpha=0.1)
plt.margins(x=0.01, y=0.01)
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Bitrate [kbps]")
fig.canvas.draw()
plt.savefig("%s/%s_varB.png" % (self.pathOutput, self.fileName), bbox_inches = "tight")
plt.clf()
def get_stats_avg_bitrate_var_exp_combat(self):
""" Variance of average bitrates from exploration to combat or vice-versa (Between an average kbps of former stage – average kbps of latter stage) """
tuples_kbps_stage = list(self.avgStageBitrates.values())
exp_combat_bandwidth_0_200, combat_exp_bandwidth_0_200 = [], []
exp_combat_bandwidth_200_300, combat_exp_bandwidth_200_300 = [], []
exp_combat_bandwidth_300_350, combat_exp_bandwidth_300_350 = [], []
exp_combat_bandwidth_350_400, combat_exp_bandwidth_350_400 = [], []
exp_combat_bandwidth_400_450, combat_exp_bandwidth_400_450 = [], []
exp_combat_bandwidth_450_500, combat_exp_bandwidth_450_500 = [], []
exp_combat_bandwidth_500_inf, combat_exp_bandwidth_500_inf = [], []
for i in range(len(tuples_kbps_stage)-1):
baselineKbps = tuples_kbps_stage[i][0]
varKbps = round((tuples_kbps_stage[i+1][0] - baselineKbps) * 100 / baselineKbps, 2)
# Transition from Exploration to Combat
if tuples_kbps_stage[i][1] == GameStage.EXPLORATION and tuples_kbps_stage[i+1][1] == GameStage.COMBAT:
if baselineKbps < 200: exp_combat_bandwidth_0_200.append(varKbps)
elif baselineKbps < 300: exp_combat_bandwidth_200_300.append(varKbps)
elif baselineKbps < 350: exp_combat_bandwidth_300_350.append(varKbps)
elif baselineKbps < 400: exp_combat_bandwidth_350_400.append(varKbps)
elif baselineKbps < 450: exp_combat_bandwidth_400_450.append(varKbps)
elif baselineKbps < 500: exp_combat_bandwidth_450_500.append(varKbps)
else: exp_combat_bandwidth_500_inf.append(varKbps)
# Transition from Combat to Exploration
elif tuples_kbps_stage[i][1] == GameStage.COMBAT and tuples_kbps_stage[i+1][1] == GameStage.EXPLORATION:
if baselineKbps < 200: combat_exp_bandwidth_0_200.append(varKbps)
elif baselineKbps < 300: combat_exp_bandwidth_200_300.append(varKbps)
elif baselineKbps < 350: combat_exp_bandwidth_300_350.append(varKbps)
elif baselineKbps < 400: combat_exp_bandwidth_350_400.append(varKbps)
elif baselineKbps < 450: combat_exp_bandwidth_400_450.append(varKbps)
elif baselineKbps < 500: combat_exp_bandwidth_450_500.append(varKbps)
else: combat_exp_bandwidth_500_inf.append(varKbps)
data = {"Exploration->Combat": ["BW < 200 kbps", "BW 200-300 kbps", "BW 300-350 kbps", "BW 350-400 kbps", "BW 400-450 kbps", "BW 450-500 kbps", "BW > 500 kbps"],
"Number of Transitions": [len(exp_combat_bandwidth_0_200), len(exp_combat_bandwidth_200_300), len(exp_combat_bandwidth_300_350), len(exp_combat_bandwidth_350_400),
len(exp_combat_bandwidth_400_450), len(exp_combat_bandwidth_450_500), len(exp_combat_bandwidth_500_inf)],
"Negative Variance (%)": [ratio_negatives(exp_combat_bandwidth_0_200), ratio_negatives(exp_combat_bandwidth_200_300), ratio_negatives(exp_combat_bandwidth_300_350),
ratio_negatives(exp_combat_bandwidth_350_400), ratio_negatives(exp_combat_bandwidth_400_450), ratio_negatives(exp_combat_bandwidth_450_500), ratio_negatives(exp_combat_bandwidth_500_inf)],
"Min. Variance (%)": [min(exp_combat_bandwidth_0_200, default='-'), min(exp_combat_bandwidth_200_300, default='-'), min(exp_combat_bandwidth_300_350, default='-'),
min(exp_combat_bandwidth_350_400, default='-'), min(exp_combat_bandwidth_400_450, default='-'), min(exp_combat_bandwidth_450_500, default='-'), min(exp_combat_bandwidth_500_inf, default='-')],
"Max. Variance (%)": [max(exp_combat_bandwidth_0_200, default='-'), max(exp_combat_bandwidth_200_300, default='-'), max(exp_combat_bandwidth_300_350, default='-'),
max(exp_combat_bandwidth_350_400, default='-'), max(exp_combat_bandwidth_400_450, default='-'), max(exp_combat_bandwidth_450_500, default='-'), max(exp_combat_bandwidth_500_inf, default='-')],
"Avg. Variance (%)": [mean(exp_combat_bandwidth_0_200), mean(exp_combat_bandwidth_200_300), mean(exp_combat_bandwidth_300_350), mean(exp_combat_bandwidth_350_400), mean(exp_combat_bandwidth_400_450), mean(exp_combat_bandwidth_450_500), mean(exp_combat_bandwidth_500_inf)]
}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_stats_varAvgKbps_expToCombat.png" % (self.pathOutput, self.fileName))
plt.clf()
data = {"Combat->Exploration": ["BW < 200 kbps", "BW 200-300 kbps", "BW 300-350 kbps", "BW 350-400 kbps", "BW 400-450 kbps", "BW 450-500 kbps", "BW > 500 kbps"],
"Number of Transitions": [len(combat_exp_bandwidth_0_200), len(combat_exp_bandwidth_200_300), len(combat_exp_bandwidth_300_350), len(combat_exp_bandwidth_350_400),
len(combat_exp_bandwidth_400_450), len(combat_exp_bandwidth_450_500), len(combat_exp_bandwidth_500_inf)],
"Negative Variance (%)": [ratio_negatives(combat_exp_bandwidth_0_200), ratio_negatives(combat_exp_bandwidth_200_300), ratio_negatives(combat_exp_bandwidth_300_350),
ratio_negatives(combat_exp_bandwidth_350_400), ratio_negatives(combat_exp_bandwidth_400_450), ratio_negatives(combat_exp_bandwidth_450_500), ratio_negatives(combat_exp_bandwidth_500_inf)],
"Min. Variance (%)": [min(combat_exp_bandwidth_0_200, default='-'), min(combat_exp_bandwidth_200_300, default='-'), min(combat_exp_bandwidth_300_350, default='-'),
min(combat_exp_bandwidth_350_400, default='-'), min(combat_exp_bandwidth_400_450, default='-'), min(combat_exp_bandwidth_450_500, default='-'), min(combat_exp_bandwidth_500_inf, default='-')],
"Max. Variance (%)": [max(combat_exp_bandwidth_0_200, default='-'), max(combat_exp_bandwidth_200_300, default='-'), max(combat_exp_bandwidth_300_350, default='-'),
max(combat_exp_bandwidth_350_400, default='-'), max(combat_exp_bandwidth_400_450, default='-'), max(combat_exp_bandwidth_450_500, default='-'), max(combat_exp_bandwidth_500_inf, default='-')],
"Avg. Variance (%)": [mean(combat_exp_bandwidth_0_200), mean(combat_exp_bandwidth_200_300), mean(combat_exp_bandwidth_300_350), mean(combat_exp_bandwidth_350_400), mean(combat_exp_bandwidth_400_450), mean(combat_exp_bandwidth_450_500), mean(combat_exp_bandwidth_500_inf)]
}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_stats_varAvgKbps_combatToExp.png" % (self.pathOutput, self.fileName))
plt.clf()
def plot_bitrate_var_on_stage_changes(self, frameRange):
""" Bitrate Variance between 'frameRange' frames before a change and 'frameRange' frames after a change (maximum)"""
if self.smooth:
columnStage = 'gamestageEMA'
columnBitrate = 'bitrateKbpsEMA'
else:
columnStage = 'gamestage'
columnBitrate = 'bitrateKbps'
# There is no calculated bitrate value before 35th frame.
tmpRow = deepcopy(self.dfInput.iloc[FPS-1])
totalRows = self.dfInput.shape[0]
for index, row in self.dfInput.iterrows():
# There is no calculated bitrate value before 35th frame.
if index < FPS: continue
# Game stage change
if row[columnStage] != tmpRow[columnStage]:
# Lowerbound and upperbound must be at the same distance from the index of change
counterBackward = counterForward = 0
while counterBackward < frameRange and \
index-(counterBackward+1) >= FPS-1 and \
self.dfInput.iloc[index-(counterBackward+1)][columnStage] == tmpRow[columnStage]:
counterBackward += 1
while counterForward < frameRange and \
index+(counterForward+1) < totalRows and \
self.dfInput.iloc[index+(counterForward+1)][columnStage] == row[columnStage]:
counterForward += 1
# if counterBackward is not zero (to avoid cases like: 0001000)
if counterBackward == 0 or counterForward == 0:
baselineKbps = self.dfInput.iloc[index-1][columnBitrate]
varKbps = int((self.dfInput.iloc[index][columnBitrate] - baselineKbps) * 100 / baselineKbps)
else:
bound = counterBackward if counterBackward < counterForward else counterForward
baselineKbps = self.dfInput.iloc[index-bound][columnBitrate]
varKbps = int((self.dfInput.iloc[index+bound][columnBitrate] - baselineKbps) * 100 / baselineKbps)
self.bitrateVariances[(round(row['time'], 3), baselineKbps)] = (varKbps, tmpRow[columnStage], row[columnStage])
tmpRow = deepcopy(row)
# In order to avoid using same legend multiple times
usedLegends = []
fig = plt.figure()
ax = fig.add_subplot(111, facecolor='white')
for (sec, baselineKbps), (varKbps, formerStage, latterStage) in self.bitrateVariances.items():
color, label = legendMap[latterStage]
if latterStage in usedLegends:
label = None
else:
usedLegends.append(latterStage)
ax.scatter(x=sec, y=varKbps, c=color, label=label)
ax.grid(b=True, which='major', color='black', linestyle='-', linewidth=0.5, alpha=0.1)
plt.margins(x=0.01, y=0.01)
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Bitrate [kbps]")
fig.canvas.draw()
plt.savefig("%s/%s_kbps_var_range%i.png" % (self.pathOutput, self.fileName, frameRange), bbox_inches = "tight")
plt.clf()
def get_stats_bitrate_var_on_stage_changes(self, frameRange):
""" Bitrate Variances between 'frameRange' frames before change and 'frameRange' frames after change (maximum)"""
exp_combat_bandwidth_0_200, combat_exp_bandwidth_0_200 = [], []
exp_combat_bandwidth_200_300, combat_exp_bandwidth_200_300 = [], []
exp_combat_bandwidth_300_350, combat_exp_bandwidth_300_350 = [], []
exp_combat_bandwidth_350_400, combat_exp_bandwidth_350_400 = [], []
exp_combat_bandwidth_400_450, combat_exp_bandwidth_400_450 = [], []
exp_combat_bandwidth_450_500, combat_exp_bandwidth_450_500 = [], []
exp_combat_bandwidth_500_inf, combat_exp_bandwidth_500_inf = [], []
for (sec, baselineKbps), (varKbps, formerStage, latterStage) in self.bitrateVariances.items():
# Transition from Exploration to Combat
if formerStage == GameStage.EXPLORATION and latterStage == GameStage.COMBAT:
if baselineKbps < 200: exp_combat_bandwidth_0_200.append(varKbps)
elif baselineKbps < 300: exp_combat_bandwidth_200_300.append(varKbps)
elif baselineKbps < 350: exp_combat_bandwidth_300_350.append(varKbps)
elif baselineKbps < 400: exp_combat_bandwidth_350_400.append(varKbps)
elif baselineKbps < 450: exp_combat_bandwidth_400_450.append(varKbps)
elif baselineKbps < 500: exp_combat_bandwidth_450_500.append(varKbps)
else: exp_combat_bandwidth_500_inf.append(varKbps)
# Transition from Combat to Exploration
if formerStage == GameStage.COMBAT and latterStage == GameStage.EXPLORATION:
if baselineKbps < 200: combat_exp_bandwidth_0_200.append(varKbps)
elif baselineKbps < 300: combat_exp_bandwidth_200_300.append(varKbps)
elif baselineKbps < 350: combat_exp_bandwidth_300_350.append(varKbps)
elif baselineKbps < 400: combat_exp_bandwidth_350_400.append(varKbps)
elif baselineKbps < 450: combat_exp_bandwidth_400_450.append(varKbps)
elif baselineKbps < 500: combat_exp_bandwidth_450_500.append(varKbps)
else: combat_exp_bandwidth_500_inf.append(varKbps)
data = {"Exploration->Combat": ["BW < 200 kbps", "BW 200-300 kbps", "BW 300-350 kbps", "BW 350-400 kbps", "BW 400-450 kbps", "BW 450-500 kbps", "BW > 500 kbps"],
"Number of Transitions": [len(exp_combat_bandwidth_0_200), len(exp_combat_bandwidth_200_300), len(exp_combat_bandwidth_300_350), len(exp_combat_bandwidth_350_400),
len(exp_combat_bandwidth_400_450), len(exp_combat_bandwidth_450_500), len(exp_combat_bandwidth_500_inf)],
"Negative Variance (%)": [ratio_negatives(exp_combat_bandwidth_0_200), ratio_negatives(exp_combat_bandwidth_200_300), ratio_negatives(exp_combat_bandwidth_300_350),
ratio_negatives(exp_combat_bandwidth_350_400), ratio_negatives(exp_combat_bandwidth_400_450), ratio_negatives(exp_combat_bandwidth_450_500), ratio_negatives(exp_combat_bandwidth_500_inf)],
"Min. Variance (%)": [min(exp_combat_bandwidth_0_200, default='-'), min(exp_combat_bandwidth_200_300, default='-'), min(exp_combat_bandwidth_300_350, default='-'),
min(exp_combat_bandwidth_350_400, default='-'), min(exp_combat_bandwidth_400_450, default='-'), min(exp_combat_bandwidth_450_500, default='-'), min(exp_combat_bandwidth_500_inf, default='-')],
"Max. Variance (%)": [max(exp_combat_bandwidth_0_200, default='-'), max(exp_combat_bandwidth_200_300, default='-'), max(exp_combat_bandwidth_300_350, default='-'),
max(exp_combat_bandwidth_350_400, default='-'), max(exp_combat_bandwidth_400_450, default='-'), max(exp_combat_bandwidth_450_500, default='-'), max(exp_combat_bandwidth_500_inf, default='-')],
"Avg. Variance (%)": [mean(exp_combat_bandwidth_0_200), mean(exp_combat_bandwidth_200_300), mean(exp_combat_bandwidth_300_350), mean(exp_combat_bandwidth_350_400), mean(exp_combat_bandwidth_400_450), mean(exp_combat_bandwidth_450_500), mean(exp_combat_bandwidth_500_inf)]
}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_kbps_var_range%i_exp_combat.png" % (self.pathOutput, self.fileName, frameRange))
plt.clf()
data = {"Combat->Exploration": ["BW < 200 kbps", "BW 200-300 kbps", "BW 300-350 kbps", "BW 350-400 kbps", "BW 400-450 kbps", "BW 450-500 kbps", "BW > 500 kbps"],
"Number of Transitions": [len(combat_exp_bandwidth_0_200), len(combat_exp_bandwidth_200_300), len(combat_exp_bandwidth_300_350), len(combat_exp_bandwidth_350_400),
len(combat_exp_bandwidth_400_450), len(combat_exp_bandwidth_450_500), len(combat_exp_bandwidth_500_inf)],
"Negative Variance (%)": [ratio_negatives(combat_exp_bandwidth_0_200), ratio_negatives(combat_exp_bandwidth_200_300), ratio_negatives(combat_exp_bandwidth_300_350),
ratio_negatives(combat_exp_bandwidth_350_400), ratio_negatives(combat_exp_bandwidth_400_450), ratio_negatives(combat_exp_bandwidth_450_500), ratio_negatives(combat_exp_bandwidth_500_inf)],
"Min. Variance (%)": [min(combat_exp_bandwidth_0_200, default='-'), min(combat_exp_bandwidth_200_300, default='-'), min(combat_exp_bandwidth_300_350, default='-'),
min(combat_exp_bandwidth_350_400, default='-'), min(combat_exp_bandwidth_400_450, default='-'), min(combat_exp_bandwidth_450_500, default='-'), min(combat_exp_bandwidth_500_inf, default='-')],
"Max. Variance (%)": [max(combat_exp_bandwidth_0_200, default='-'), max(combat_exp_bandwidth_200_300, default='-'), max(combat_exp_bandwidth_300_350, default='-'),
max(combat_exp_bandwidth_350_400, default='-'), max(combat_exp_bandwidth_400_450, default='-'), max(combat_exp_bandwidth_450_500, default='-'), max(combat_exp_bandwidth_500_inf, default='-')],
"Avg. Variance (%)": [mean(combat_exp_bandwidth_0_200), mean(combat_exp_bandwidth_200_300), mean(combat_exp_bandwidth_300_350), mean(combat_exp_bandwidth_350_400), mean(combat_exp_bandwidth_400_450), mean(combat_exp_bandwidth_450_500), mean(combat_exp_bandwidth_500_inf)]
}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_kbps_var_range%i_combat_exp.png" % (self.pathOutput, self.fileName, frameRange))
plt.clf()
def get_transition_matrix_exp_combat(self):
if self.smooth: columnStage = 'gamestageEMA'
else: columnStage = 'gamestage'
countTransitionExpToCombat = countTransitionCombatToExp = countTransitionExpToExp = countTransitionCombatToCombat = 0
for i in range(self.dfInput.shape[0] - 1):
formerStage, latterStage = self.dfInput.loc[i, columnStage], self.dfInput.loc[i+1, columnStage]
if formerStage == GameStage.EXPLORATION and latterStage == GameStage.COMBAT: countTransitionExpToCombat += 1
elif formerStage == GameStage.COMBAT and latterStage == GameStage.EXPLORATION: countTransitionCombatToExp += 1
elif formerStage == GameStage.EXPLORATION and latterStage == GameStage.EXPLORATION: countTransitionExpToExp += 1
elif formerStage == GameStage.COMBAT and latterStage == GameStage.COMBAT: countTransitionCombatToCombat += 1
transitionExpToExp = round(countTransitionExpToExp/(countTransitionExpToCombat+countTransitionExpToExp), 2)
transitionExpToCombat = round(countTransitionExpToCombat/(countTransitionExpToCombat+countTransitionExpToExp), 2)
transitionCombatToCombat = round(countTransitionCombatToCombat/(countTransitionCombatToCombat+countTransitionCombatToExp), 2)
transitionCombatToExp = round(countTransitionCombatToExp/(countTransitionCombatToCombat+countTransitionCombatToExp), 2)
data = {"Game Stage": ["Exploration", "Combat"],
"Exploration": [transitionExpToExp, transitionCombatToExp],
"Combat": [transitionExpToCombat, transitionCombatToCombat]
}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_transitionMatrix.png" % (self.pathOutput, self.fileName))
plt.clf()
def get_stats_bitrate_on_exp_combat(self):
""" For each phase, what are the min-max-avg bitrate? How is the distribution on different bandwidths? """
if self.smooth:
columnStage = 'gamestageEMA'
columnBitrate = 'bitrateKbpsEMA'
else:
columnStage = 'gamestage'
columnBitrate = 'bitrateKbps'
# Trim the first 34 rows, since those are nan in bitrate
self.dfInput.drop(range(FPS-1), inplace=True)
self.dfInput.reset_index(drop=True, inplace=True)
df_exploration = self.dfInput[(self.dfInput[columnStage] == GameStage.EXPLORATION)]
df_combat = self.dfInput[(self.dfInput[columnStage] == GameStage.COMBAT)]
df_exploration_0_100 = df_exploration[(df_exploration[columnBitrate] < 100)]
df_exploration_100_200 = df_exploration[(df_exploration[columnBitrate] >= 100) & (df_exploration[columnBitrate] < 200)]
df_exploration_200_300 = df_exploration[(df_exploration[columnBitrate] >= 200) & (df_exploration[columnBitrate] < 300)]
df_exploration_300_400 = df_exploration[(df_exploration[columnBitrate] >= 300) & (df_exploration[columnBitrate] < 400)]
df_exploration_400_500 = df_exploration[(df_exploration[columnBitrate] >= 400) & (df_exploration[columnBitrate] < 500)]
df_exploration_500_inf = df_exploration[(df_exploration[columnBitrate] > 500)]
df_combat_0_100 = df_combat[(df_combat[columnBitrate] < 100)]
df_combat_100_200 = df_combat[(df_combat[columnBitrate] >= 100) & (df_combat[columnBitrate] < 200)]
df_combat_200_300 = df_combat[(df_combat[columnBitrate] >= 200) & (df_combat[columnBitrate] < 300)]
df_combat_300_400 = df_combat[(df_combat[columnBitrate] >= 300) & (df_combat[columnBitrate] < 400)]
df_combat_400_500 = df_combat[(df_combat[columnBitrate] >= 400) & (df_combat[columnBitrate] < 500)]
df_combat_500_inf = df_combat[(df_combat[columnBitrate] > 500)]
data = [["< 100", df_exploration_0_100.shape[0], df_combat_0_100.shape[0]],
["100-200",df_exploration_100_200.shape[0], df_combat_100_200.shape[0]],
["200-300",df_exploration_200_300.shape[0], df_combat_200_300.shape[0]],
["300-400",df_exploration_300_400.shape[0], df_combat_300_400.shape[0]],
["400-500",df_exploration_400_500.shape[0], df_combat_400_500.shape[0]],
["> 500", df_exploration_500_inf.shape[0], df_combat_500_inf.shape[0]]
]
dfOutput = pd.DataFrame(data, columns=["Kbps", "Exploration", "Combat"])
ax = dfOutput.plot(x="Kbps", y=["Exploration", "Combat"], kind="bar", figsize=(10,6), rot=0)
for p in ax.patches:
ax.annotate(str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.015), fontsize=8)
plt.ylabel("Number of Frames")
plt.xlabel("Bitrate [kbps]")
plt.savefig("%s/%s_countHistogram.png" % (self.pathOutput, self.fileName), bbox_inches = "tight")
plt.clf()
data = {"Game Stage": ["Exploration", "Combat"],
"Min. Bitrate": [round(df_exploration[columnBitrate].min(), 2), round(df_combat[columnBitrate].min(), 2)],
"Max. Bitrate": [round(df_exploration[columnBitrate].max(), 2), round(df_combat[columnBitrate].max(), 2)],
"Avg. Bitrate": [round(df_exploration[columnBitrate].mean(), 2), round(df_combat[columnBitrate].mean(), 2)]
}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_bitrateStats.png" % (self.pathOutput, self.fileName))
plt.clf()
def get_stats_bitrate_var_exp_combat(self):
""" What are the min-avg-max variation on bitrate when you transition from E->C - C->E - E->E - C->C (on different baseline bitrates) """
if self.smooth:
columnStage = 'gamestageEMA'
columnBitrate = 'bitrateKbpsEMA'
else:
columnStage = 'gamestage'
columnBitrate = 'bitrateKbps'
bitrateVariance_exp_combat = {}
bitrateVariance_combat_exp = {}
bitrateVariance_exp_exp = {}
bitrateVariance_combat_combat = {}
for i in range(self.dfInput.shape[0]-1):
formerStage, latterStage = self.dfInput.loc[i, columnStage], self.dfInput.loc[i+1, columnStage]
baseBitrate = self.dfInput.loc[i, columnBitrate]
variance = round((self.dfInput.loc[i+1, columnBitrate] - baseBitrate) * 100 / baseBitrate, 2)
if formerStage == GameStage.EXPLORATION and latterStage == GameStage.COMBAT:
if baseBitrate not in bitrateVariance_exp_combat:
bitrateVariance_exp_combat[baseBitrate] = variance
else:
bitrateVariance_exp_combat[baseBitrate] = round((bitrateVariance_exp_combat[baseBitrate] + variance) / 2, 2)
elif formerStage == GameStage.COMBAT and latterStage == GameStage.EXPLORATION:
if baseBitrate not in bitrateVariance_combat_exp:
bitrateVariance_combat_exp[baseBitrate] = variance
else:
bitrateVariance_combat_exp[baseBitrate] = round((bitrateVariance_combat_exp[baseBitrate] + variance) / 2, 2)
elif formerStage == GameStage.EXPLORATION and latterStage == GameStage.EXPLORATION:
if baseBitrate not in bitrateVariance_exp_exp:
bitrateVariance_exp_exp[baseBitrate] = variance
else:
bitrateVariance_exp_exp[baseBitrate] = round((bitrateVariance_exp_exp[baseBitrate] + variance) / 2, 2)
elif formerStage == GameStage.COMBAT and latterStage == GameStage.COMBAT:
if baseBitrate not in bitrateVariance_combat_combat:
bitrateVariance_combat_combat[baseBitrate] = variance
else:
bitrateVariance_combat_combat[baseBitrate] = round((bitrateVariance_combat_combat[baseBitrate] + variance) / 2, 2)
exp_combat_bandwidth_0_200 = []
exp_combat_bandwidth_200_300 = []
exp_combat_bandwidth_300_350 = []
exp_combat_bandwidth_350_400 = []
exp_combat_bandwidth_400_450 = []
exp_combat_bandwidth_450_500 = []
exp_combat_bandwidth_500_inf = []
combat_exp_bandwidth_0_200 = []
combat_exp_bandwidth_200_300 = []
combat_exp_bandwidth_300_350 = []
combat_exp_bandwidth_350_400 = []
combat_exp_bandwidth_400_450 = []
combat_exp_bandwidth_450_500 = []
combat_exp_bandwidth_500_inf = []
exp_exp_bandwidth_0_200 = []
exp_exp_bandwidth_200_300 = []
exp_exp_bandwidth_300_350 = []
exp_exp_bandwidth_350_400 = []
exp_exp_bandwidth_400_450 = []
exp_exp_bandwidth_450_500 = []
exp_exp_bandwidth_500_inf = []
combat_combat_bandwidth_0_200 = []
combat_combat_bandwidth_200_300 = []
combat_combat_bandwidth_300_350 = []
combat_combat_bandwidth_350_400 = []
combat_combat_bandwidth_400_450 = []
combat_combat_bandwidth_450_500 = []
combat_combat_bandwidth_500_inf = []
for (b, var) in bitrateVariance_exp_combat.items():
if b < 200:
exp_combat_bandwidth_0_200.append(var)
elif b < 300:
exp_combat_bandwidth_200_300.append(var)
elif b < 350:
exp_combat_bandwidth_300_350.append(var)
elif b < 400:
exp_combat_bandwidth_350_400.append(var)
elif b < 450:
exp_combat_bandwidth_400_450.append(var)
elif b < 500:
exp_combat_bandwidth_450_500.append(var)
else:
exp_combat_bandwidth_500_inf.append(var)
for (b, var) in bitrateVariance_combat_exp.items():
if b < 200:
combat_exp_bandwidth_0_200.append(var)
elif b < 300:
combat_exp_bandwidth_200_300.append(var)
elif b < 350:
combat_exp_bandwidth_300_350.append(var)
elif b < 400:
combat_exp_bandwidth_350_400.append(var)
elif b < 450:
combat_exp_bandwidth_400_450.append(var)
elif b < 500:
combat_exp_bandwidth_450_500.append(var)
else:
combat_exp_bandwidth_500_inf.append(var)
for (b, var) in bitrateVariance_exp_exp.items():
if b < 200:
exp_exp_bandwidth_0_200.append(var)
elif b < 300:
exp_exp_bandwidth_200_300.append(var)
elif b < 350:
exp_exp_bandwidth_300_350.append(var)
elif b < 400:
exp_exp_bandwidth_350_400.append(var)
elif b < 450:
exp_exp_bandwidth_400_450.append(var)
elif b < 500:
exp_exp_bandwidth_450_500.append(var)
else:
exp_exp_bandwidth_500_inf.append(var)
for (b, var) in bitrateVariance_combat_combat.items():
if b < 200:
combat_combat_bandwidth_0_200.append(var)
elif b < 300:
combat_combat_bandwidth_200_300.append(var)
elif b < 350:
combat_combat_bandwidth_300_350.append(var)
elif b < 400:
combat_combat_bandwidth_350_400.append(var)
elif b < 450:
combat_combat_bandwidth_400_450.append(var)
elif b < 500:
combat_combat_bandwidth_450_500.append(var)
else:
combat_combat_bandwidth_500_inf.append(var)
data = {"Exploration->Combat": ["BW 0 < 200 kbps", "BW 200-300 kbps", "BW 300-350 kbps", "BW 350-400 kbps", "BW 400-450 kbps", "BW 450-500 kbps", "BW > 500 kbps"],
"Min. Variance (%)": [min(exp_combat_bandwidth_0_200, default='-'), min(exp_combat_bandwidth_200_300, default='-'), min(exp_combat_bandwidth_300_350, default='-'),
min(exp_combat_bandwidth_350_400, default='-'), min(exp_combat_bandwidth_400_450, default='-'), min(exp_combat_bandwidth_450_500, default='-'), min(exp_combat_bandwidth_500_inf, default='-')],
"Max. Variance (%)": [max(exp_combat_bandwidth_0_200, default='-'), max(exp_combat_bandwidth_200_300, default='-'), max(exp_combat_bandwidth_300_350, default='-'),
max(exp_combat_bandwidth_350_400, default='-'), max(exp_combat_bandwidth_400_450, default='-'), max(exp_combat_bandwidth_450_500, default='-'), max(exp_combat_bandwidth_500_inf, default='-')],
"Avg. Variance (%)": [mean(exp_combat_bandwidth_0_200), mean(exp_combat_bandwidth_200_300), mean(exp_combat_bandwidth_300_350),
mean(exp_combat_bandwidth_350_400), mean(exp_combat_bandwidth_400_450), mean(exp_combat_bandwidth_450_500), mean(exp_combat_bandwidth_500_inf)]
}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_bitrateVar_exp_combat.png" % (self.pathOutput, self.fileName))
plt.clf()
data = {"Combat->Exploration": ["BW 0 < 200 kbps", "BW 200-300 kbps", "BW 300-350 kbps", "BW 350-400 kbps", "BW 400-450 kbps", "BW 450-500 kbps", "BW > 500 kbps"],
"Min. Variance (%)": [min(combat_exp_bandwidth_0_200, default='-'), min(combat_exp_bandwidth_200_300, default='-'), min(combat_exp_bandwidth_300_350, default='-'),
min(combat_exp_bandwidth_350_400, default='-'), min(combat_exp_bandwidth_400_450, default='-'), min(combat_exp_bandwidth_450_500, default='-'), min(combat_exp_bandwidth_500_inf, default='-')],
"Max. Variance (%)": [max(combat_exp_bandwidth_0_200, default='-'), max(combat_exp_bandwidth_200_300, default='-'), max(combat_exp_bandwidth_300_350, default='-'),
max(combat_exp_bandwidth_350_400, default='-'), max(combat_exp_bandwidth_400_450, default='-'), max(combat_exp_bandwidth_450_500, default='-'), max(combat_exp_bandwidth_500_inf, default='-')],
"Avg. Variance (%)": [mean(combat_exp_bandwidth_0_200), mean(combat_exp_bandwidth_200_300), mean(combat_exp_bandwidth_300_350),
mean(combat_exp_bandwidth_350_400), mean(combat_exp_bandwidth_400_450), mean(combat_exp_bandwidth_450_500), mean(combat_exp_bandwidth_500_inf)]
}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_bitrateVar_combat_exp.png" % (self.pathOutput, self.fileName))
plt.clf()
data = {"Exploration->Exploration": ["BW 0 < 200 kbps", "BW 200-300 kbps", "BW 300-350 kbps", "BW 350-400 kbps", "BW 400-450 kbps", "BW 450-500 kbps", "BW > 500 kbps"],
"Min. Variance (%)": [min(exp_exp_bandwidth_0_200, default='-'), min(exp_exp_bandwidth_200_300, default='-'), min(exp_exp_bandwidth_300_350, default='-'),
min(exp_exp_bandwidth_350_400, default='-'), min(exp_exp_bandwidth_400_450, default='-'), min(exp_exp_bandwidth_450_500, default='-'), min(exp_exp_bandwidth_500_inf, default='-')],
"Max. Variance (%)": [max(exp_exp_bandwidth_0_200, default='-'), max(exp_exp_bandwidth_200_300, default='-'), max(exp_exp_bandwidth_300_350, default='-'),
max(exp_exp_bandwidth_350_400, default='-'), max(exp_exp_bandwidth_400_450, default='-'), max(exp_exp_bandwidth_450_500, default='-'), max(exp_exp_bandwidth_500_inf, default='-')],
"Avg. Variance (%)": [mean(exp_exp_bandwidth_0_200), mean(exp_exp_bandwidth_200_300), mean(exp_exp_bandwidth_300_350),
mean(exp_exp_bandwidth_350_400), mean(exp_exp_bandwidth_400_450), mean(exp_exp_bandwidth_450_500), mean(exp_exp_bandwidth_500_inf)]
}
dfOutput = pd.DataFrame(data)
fig,ax = self.render_table(dfOutput)
fig.savefig("%s/%s_bitrateVar_exp_exp.png" % (self.pathOutput, self.fileName))
plt.clf()
data = {"Combat->Combat": ["BW 0 < 200 kbps", "BW 200-300 kbps", "BW 300-350 kbps", "BW 350-400 kbps", "BW 400-450 kbps", "BW 450-500 kbps", "BW > 500 kbps"],
"Min. Variance (%)": [min(combat_combat_bandwidth_0_200, default='-'), min(combat_combat_bandwidth_200_300, default='-'), min(combat_combat_bandwidth_300_350, default='-'),
min(combat_combat_bandwidth_350_400, default='-'), min(combat_combat_bandwidth_400_450, default='-'), min(combat_combat_bandwidth_450_500, default='-'), min(combat_combat_bandwidth_500_inf, default='-')],
"Max. Variance (%)": [max(combat_combat_bandwidth_0_200, default='-'), max(combat_combat_bandwidth_200_300, default='-'), max(combat_combat_bandwidth_300_350, default='-'),
max(combat_combat_bandwidth_350_400, default='-'), max(combat_combat_bandwidth_400_450, default='-'), max(combat_combat_bandwidth_450_500, default='-'), max(combat_combat_bandwidth_500_inf, default='-')],
"Avg. Variance (%)": [mean(combat_combat_bandwidth_0_200), mean(combat_combat_bandwidth_200_300), mean(combat_combat_bandwidth_300_350),
mean(combat_combat_bandwidth_350_400), mean(combat_combat_bandwidth_400_450), mean(combat_combat_bandwidth_450_500), mean(combat_combat_bandwidth_500_inf)]
}
dfOutput = | pd.DataFrame(data) | pandas.DataFrame |
import airflow
import numpy as np
from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.utils.trigger_rule import TriggerRule
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from dags.analytics.logger.logger import get_logger
from dags.analytics.main import fetch_housing_data, load_housing_data
from dags.common.CombinedAttributesAdder import CombinedAttributesAdder
args = {
'owner': 'Airflow',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id='mnist_workflow',
default_args=args,
schedule_interval=None,
)
logger = get_logger("MAIN")
# [START datascience_workflow]
def start_workflow(ds, **kwargs):
return 'START'
step_start = PythonOperator(
task_id='start_workflow',
provide_context=True, # ds
python_callable=start_workflow,
dag=dag,
)
# Task 1
def task_1(output_path):
logger.info("COLLECTING AND SAVING DATA")
fetch_housing_data()
housing = load_housing_data()
housing_df = pd.DataFrame(housing)
housing_df.to_csv(output_path)
return "TASK_LOAD_DATA"
step_1 = PythonOperator(
task_id='step_1',
python_callable=task_1,
op_kwargs={'output_path': 'data/housing_data.csv'
},
dag=dag,
)
def task_2(input_path, output_path):
logger.info("""# Preparation steps""")
housing = pd.read_csv(input_path)
median = housing["total_bedrooms"].median()
housing["total_bedrooms"].fillna(median, inplace=True)
imputer = SimpleImputer(strategy="median")
housing_num = housing.drop("ocean_proximity", axis=1)
imputer.fit(housing_num)
X = imputer.transform(housing_num)
housing_tr = pd.DataFrame(X, columns=housing_num.columns,
index=housing_num.index)
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_tr = attr_adder.transform(housing_tr.values)
housing_num = housing_tr.drop("ocean_proximity", axis=1)
logger.info("""# Transformation Pipelines""")
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", OneHotEncoder(), cat_attribs),
])
housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared.to_csv(output_path)
return "TASK_DATA_PREPARATION"
step_2 = PythonOperator(
task_id='step_2',
python_callable=task_2,
op_kwargs={'input_path_X': 'data/housing_data.csv',
'output_path_train': 'housing/housing_data_prepared.csv',
},
dag=dag,
)
# SPLIT DATA INTO TRAINING AND TESTING SETS
# SAVE TO CSV FROM PANDAS
def task_3(input_path_prepared_data, output_path_train, output_path_test):
global strat_train_set, strat_test_set
housing = pd.read_csv(input_path_prepared_data)
housing["income_cat"] = pd.cut(housing["median_income"],
bins=[0., 1.5, 3.0, 4.5, 6., np.inf],
labels=[1, 2, 3, 4, 5])
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
for set_ in (strat_train_set, strat_test_set):
set_.drop("income_cat", axis=1, inplace=True)
strat_train_set.to_csv(output_path_train)
strat_test_set.to_csv(output_path_test)
return "TASK_TESTING_SETS"
step_3 = PythonOperator(
task_id='step_3',
python_callable=task_3,
op_kwargs={'input_path_X': 'data/housing_data_prepared.csv',
'output_path_train': 'sets/train_sets.csv',
'output_path_test': 'sets/test_sets.csv',
},
dag=dag,
)
def task_4(input_path_train, input_path_test, output_path_predictions, output_path_performance):
logger.info("""# Training and Evaluating on the Training Set""")
strat_train_set = | pd.read_csv(input_path_train) | pandas.read_csv |
import os
import json
import pandas as pd
from argparse import ArgumentParser
# Reading Watson Assistant log files in .json format, each log event is a JSON record.
# Return a list of Watson Assistant log events.
def readLogs(inputPath, conversation_id_key='response.context.conversation_id', custom_field_names_comma_separated=None):
"""Reads all log event .json files in `inputPath` and its subdirectories."""
if(os.path.isdir(inputPath)):
data = pd.DataFrame()
print('Processing input directory {}'.format(inputPath))
for root, dirs, files in os.walk(inputPath):
dirs.sort()
files.sort()
for file in files:
if file.endswith('.json'):
logFile = os.path.join(root, file)
fileData = readLogsFromFile(logFile, conversation_id_key, custom_field_names_comma_separated)
if fileData is not None and len(fileData) > 0:
data = data.append(fileData)
return data
else:
return readLogsFromFile(inputPath, conversation_id_key, custom_field_names_comma_separated)
def readLogsFromFile(filename, conversation_id_key='response.context.conversation_id', custom_field_names_comma_separated=None):
"""Reads all log events from JSON file `filename`."""
print('Processing input file {}'.format(filename))
with open(filename) as json_file:
data = json.load(json_file)
if data is not None and len(data) > 0:
#If using `getAllLogs.py` you just get the array, the raw Watson API produces a field called 'logs' which contains the array
if 'logs' in data:
data = data['logs']
return extractConversationData(data, conversation_id_key, custom_field_names_comma_separated)
else:
return None
#------------------------------------------------------------------------
# deep_get reads an arbitrarily-nested key sequence from a dictionary.
# getCustomFields' `key_list` turns "request.context.somevariable" into ["request","context","somevariable"]
# The combination allows extraction of arbitrary key-value sequences from the log event.
def deep_get(dct, keys, default=None):
for key in keys:
try:
dct = dct[key]
except KeyError:
return default
except TypeError:
return default
return dct
def getFieldShortName(field_name):
""" Simplifies `field_name` in the exported dataframe by removing Watson Assistant prefixes """
return field_name.replace('request.','').replace('response.','').replace('context.system.','').replace('context.','')
# Caches information about custom fields so they do not need to be re-calculated on every log event.
# Example dictionary format is `{'request.response.XYZ': {'name': 'XYZ', 'key_list': ['request', 'response', 'XYZ']}}``
def getCustomFields(custom_field_names):
customFields = {}
for field_original_name in custom_field_names:
field_short_name = getFieldShortName(field_original_name)
field_keys_list = field_original_name.split(".")
customFields[field_original_name] = {'name':field_short_name, 'key_list':field_keys_list}
return customFields
##------------------------------------------------------------------------
def logToRecord(log, customFields):
record = {}
record['conversation_id'] = log['response']['context']['conversation_id']
#Location of dialog_turn_counter varies by WA version
if 'dialog_turn_counter' in log['response']['context']['system']:
record['dialog_turn_counter'] = log['response']['context']['system']['dialog_turn_counter']
else:
record['dialog_turn_counter'] = log['request']['context']['system']['dialog_turn_counter']
record['request_timestamp'] = log['request_timestamp']
record['response_timestamp'] = log['response_timestamp']
if 'text' in log['request']['input']:
record['input.text'] = log['request']['input']['text']
if 'text' in log['response']['output']:
record['output.text'] = ' '.join(filter(None,log['response']['output']['text'])).replace('\r','').replace('\n','')
if 'intents' in log['response'] and (len(log['response']['intents']) > 0):
record['intent'] = log['response']['intents'][0]['intent']
record['intent_confidence'] = log['response']['intents'][0]['confidence']
if 'entities' in log['response'] and len(log['response']['entities']) > 0:
record['entities'] = tuple ( log['response']['entities'] )
if 'nodes_visited' in log['response']['output']:
record['nodes_visited'] = tuple (log['response']['output']['nodes_visited'])
elif 'debug' in log['response']['output'] and 'nodes_visited' in log['response']['output']['debug']:
record['nodes_visited'] = tuple (log['response']['output']['debug']['nodes_visited'])
else:
record['nodes_visited'] = ()
if 'branch_exited_reason' in log['response']['context']['system']:
record['branch_exited_reason'] = log['response']['context']['system']['branch_exited_reason']
for field in customFields:
key, value = customFields[field]['name'], customFields[field]['key_list']
record[key] = deep_get(log, value)
return record
def extractConversationData(logs, conversation_id_key='response.context.conversation_id', custom_field_names_comma_separated=None):
# Parse custom field names from argument list
if custom_field_names_comma_separated is None:
custom_field_names = []
else:
custom_field_names = custom_field_names_comma_separated.split(',')
# Determine conversation primary key and make sure we extract it from log records
if conversation_id_key == 'response.context.conversation_id':
primarySortField = 'conversation_id'
else:
primarySortField = getFieldShortName(conversation_id_key)
if conversation_id_key not in custom_field_names:
custom_field_names.insert(0, conversation_id_key)
customFields = getCustomFields(custom_field_names)
# Summarize each Watson Assistant log event into a more workable conversational record
conversation_records_list = [logToRecord(log, customFields) for log in logs]
df = pd.DataFrame(conversation_records_list)
#converting date fields.
df['request_timestamp'] = pd.to_datetime(df['request_timestamp'])
df['response_timestamp'] = | pd.to_datetime(df['response_timestamp']) | pandas.to_datetime |
# -*- coding=utf-8 -*-
# 用调参后的模型生成用于第二层的stacking特征
import best_models as bm
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
import getpass
from sklearn.metrics import roc_auc_score
import time
time_begin = time.time()
SEED=36
# ===============data==================
DATA_DIR = '../../data/data_4/'
print('读取数据...')
print('位置:', DATA_DIR)
train_df = pd.read_csv(DATA_DIR + 'train_preprocessed1.csv', encoding='gbk')
test_df = pd.read_csv(DATA_DIR + 'test_preprocessed1.csv', encoding='gbk')
if getpass.getuser() == 'stone':
train_df = train_df[:20]
# ==============END data==================
kf = KFold(n_splits=5, shuffle=True, random_state=SEED)
def get_oof(model, x_train, y_train, x_test, model_name):
oof_train = np.zeros((x_train.shape[0],))
oof_test = np.zeros((x_test.shape[0],))
oof_test_skf = np.empty((5, x_test.shape[0]))
for i,(train_index, test_index) in enumerate(kf.split(x_train)):
kf_x_train = x_train[train_index]
kf_y_train = y_train[train_index]
kf_x_test = x_train[test_index]
print(model_name, 'trainning... 数据量:{},{}'.format(kf_x_train.shape, kf_y_train.shape))
model.fit(kf_x_train, kf_y_train)
oof_train[test_index] = model.predict_proba(kf_x_test)[:,1]
oof_test_skf[i,:] = model.predict_proba(x_test)[:,1]
oof_test[:] = oof_test_skf.mean(axis=0)
oof_train = oof_train.reshape(-1, 1)
oof_test = oof_test.reshape(-1, 1)
print('{}-CV roc_auc_score: {}'.format(model_name, roc_auc_score(y_train, oof_train)))
return oof_train, oof_test
# 初始化各个调过参数的模型,获取对应数据
xgb_model = bm.get_tuned_xgb()
x_train_xgb, y_train_xgb, x_test_xgb = bm.get_data(train_df=train_df,
test_df=test_df,
DATA_DIR=DATA_DIR,
model_name='xgb')
rf_model = bm.get_tuned_rf()
x_train_rf, y_train_rf, x_test_rf = bm.get_data(train_df=train_df,
test_df=test_df,
DATA_DIR=DATA_DIR,
model_name='rf')
# 产生在训练集上交叉预测的列,以及在测试集上预测的平均值
xgb_oof_train, xgb_oof_test = get_oof(xgb_model,
x_train=x_train_xgb,
y_train=y_train_xgb,
x_test=x_test_xgb,
model_name='xgb')
rf_oof_train, rf_oof_test = get_oof(rf_model,
x_train=x_train_rf,
y_train=y_train_rf,
x_test=x_test_rf,
model_name='rf')
# 产生新的训练集和测试集,即各个算法在训练集上交叉预测的列的并排
z_train = np.concatenate((xgb_oof_train,
rf_oof_train,), axis=1)
z_test = np.concatenate((xgb_oof_test,
rf_oof_test,), axis=1)
print("\nz_train:{}, z_test:{}".format(z_train.shape, z_test.shape))
# 保存新的训练集和测试集
print("\n存储数据中:")
print('位置:', DATA_DIR)
z_train_pd = pd.DataFrame(z_train, columns=['XGB', 'RF'])
z_test_pd = | pd.DataFrame(z_test, columns=['XGB', 'RF']) | pandas.DataFrame |
import pandas as pd
import os
import json
import numpy as np
from datetime import datetime, date
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, desc
from flask import Flask, jsonify, render_template, redirect, url_for, request
from flask_sqlalchemy import SQLAlchemy
# might be able to remove
import pymysql
# additional packages might be able to remove
from sqlalchemy import Table
from sqlalchemy import Column, Integer, Text
import csv
# project 2 config file
from config import pw
# gp config file
from db_config import pwd
# prediction apps
from FirstPrediction import FirstPredict, recipe_info, FinalPredict
from secondPredict import SecondPredict
df = pd.read_csv('wine_pairings_v7.csv', index_col = 'wine')
app = Flask(__name__, template_folder="templates")
app.config["SQLALCHEMY_DATABASE_URI"] = (
os.environ.get("JAWSDB_URL", "")
or f"mysql+pymysql://root:{pwd}@127.0.0.1:3306/wine_db"
)
db = SQLAlchemy(app)
session = db.session
Base = automap_base()
Base.prepare(db.engine, reflect=True)
wine_data = Base.classes.wine_data
wine_map_data = Base.classes.world_wine_data
wine_blurbs = Base.classes.wine_blurbs
@app.route("/")
def index():
result = render_template("index.html")
return result
@app.route("/grape_guide", methods = ["GET","POST"])
def wine():
wine_prediction = "blank"
if request.method == "POST":
wine_prediction = request.form["wine-selection"]
print(wine_prediction)
result = render_template("grape_guide.html", wine_selection = wine_prediction)
return result
@app.route("/wine_data/<wine>")
def get_wine_data(wine):
qry = (
session.query("* from wine_data;")
.statement
)
df = pd.read_sql_query(qry, db.engine).drop(columns = "ID")
df = df.loc[df[wine]> 0]
data = {
"Wine_Name": pd.DataFrame(df[wine]).columns.values.tolist(),
"Attribute_Labels": np.array(pd.DataFrame(df["Attributes"]).values).flatten().tolist(),
"Attribute_Values": np.array(pd.DataFrame(df[wine]).values).flatten().tolist()
}
return jsonify(data)
@app.route("/wine_blurb/<wine>")
def get_wine_blurb(wine):
qry = session.query("*").filter(wine_blurbs.wine == wine).statement
df = pd.read_sql_query(qry, db.engine).drop(columns="ID")
data = {
"Wine": np.array(pd.DataFrame(df["wine"]).values)
.flatten()
.tolist(),
"Blurb": np.array(pd.DataFrame(df["blurb"]).values)
.flatten()
.tolist()
}
return jsonify(data)
@app.route("/sandbox")
def sandbox():
result = render_template("sandbox.html")
return result
@app.route("/wine_map")
def wine_map():
result = render_template("wine_map.html")
return result
@app.route("/wine_map_data")
def get_wine_map_data():
qry = (
session.query("* from world_wine_data").statement
)
df = pd.read_sql_query(qry, db.engine).drop(columns = "ID")
data = {
"Country": np.array(pd.DataFrame(df["Country"]).values).flatten().tolist(),
"Wine_Production": np.array(pd.DataFrame(df["Wine_Production"]).values).flatten().tolist(),
"CODES": np.array(pd.DataFrame(df["CODES"]).values).flatten().tolist(),
"Largest_Vineyards": np.array(pd.DataFrame(df["Largest_Vineyards"]).values).flatten().tolist(),
"Exports_Values": np.array(pd.DataFrame(df["Exports_Values"]).values).flatten().tolist(),
"Exports": np.array(pd.DataFrame(df["Exports"]).values).flatten().tolist(),
"Imports_Values": np.array(pd.DataFrame(df["Imports_Values"]).values).flatten().tolist(),
"Imports": np.array( | pd.DataFrame(df["Imports"]) | pandas.DataFrame |
import requests, zipfile, io, os, re
import pandas as pd
import numpy as np
import geopandas, astral
import time
from astral.sun import sun
import tabulate
METEO_FOLDER = r"C:/Users/48604/Documents/semestr5/PAG/pag2/Meteo/"
ZAPIS_ZIP = METEO_FOLDER + r"Meteo_"
url = "https://dane.imgw.pl/datastore/getfiledown/Arch/Telemetria/Meteo/2015/Meteo_2015-07.zip"
#!
#change: METEO_FOLDER, url
def get_data(url, pth):
file = requests.get(url)
zip = zipfile.ZipFile(io.BytesIO(file.content))
#download zip from IMGW archive
url_end = url[-4:]
#later checking if file ends with .zip or .ZIP
pattern = "Meteo_(.*?)" + url_end
substring = re.search(pattern, url).group(1)
#pattern matching in order to name new dir properly
path = pth + substring + "/"
#path to dir with data from specified period
if os.path.isdir(path) == 0:
os.mkdir(path)
zip.extractall(path)
#creating dir if it doesnt exist and unpacking data
return path
path_data = get_data(url, ZAPIS_ZIP)
path_parametry = METEO_FOLDER + "kody_parametr.csv"
path_effacility = METEO_FOLDER + "effacility.geojson"
path_powiaty = METEO_FOLDER + "powiaty/powiaty.shp"
path_wojewodztwa = METEO_FOLDER + "woj/woj.shp"
def read_parametry(path_parametr):
parametr = pd.read_csv(path_parametr, sep=';', index_col=False, encoding='cp1250')
#separator=';' - by default ','
#index_col=False - store all data as columns not indexes
return parametr
#function to read parameters from the path_parametr file
def read_data(path_data):
fields = ["KodSH", "ParametrSH", "Date", "Wartosc"]
data = {}
#column names; empty dictionary for data from separate csv files in folder
for filename in os.listdir(path_data):
#for every file in folder
dataset_name = pd.read_csv(path_data + filename, sep=';', header=None, names=fields, index_col=False, low_memory=False, dtype={'KodSH': int, 'Wartosc': str}, parse_dates=['Date'])
#applying value
#separator=';' - by default ','
#no header by default
#names=fields - column names
#index_col=False - store all data as columns not indexes
#low_memory=false - way to get rid of different datatypes in columns warning
dataset_name["Wartosc"] = dataset_name["Wartosc"].str.replace(',','.').astype('float64')
#replace ',' with '.' and convert string to float
dataset_name["Date"] = dataset_name["Date"].dt.tz_localize("Europe/Warsaw")
#setting "Data" column to datetime64[ns, Europe/Warsaw] from datetime64[ns]
data[filename] = dataset_name
return data
#function to read data from the path_data file
def read_effacility(path_effacility):
path = open(path_effacility)
effacility = geopandas.read_file(path)
#read geojson
effacility["geometry"] = effacility["geometry"].to_crs(epsg=4258)
x = effacility["geometry"].x
y = effacility["geometry"].y
data = {"KodSH" : effacility["name"], "City" : effacility["name1"], "Lon" : x, "Lat" : y}
effacility = pd.DataFrame(data)
effacility["KodSH"] = effacility["KodSH"].astype('float64')
#store KodSH as number not string
return effacility
def f_init_mean(data):
init_mean = {}
for key in data:
init_mean[key] = data[key].groupby(["KodSH", data[key]["Date"].dt.date])["Wartosc"].mean()
init_mean[key] = init_mean[key].to_frame()
init_mean[key].drop(columns = ["Wartosc"], inplace=True)
return init_mean
def f_sun_info(init_mean, effacility):
sun_info = {}
for key in init_mean:
init_mean[key] = init_mean[key].reset_index("Date")
#Date as a non index value
#init_mean[key] = init_mean[key].drop(["24h"], axis=1)
sun_info[key] = pd.merge(init_mean[key], effacility, on = "KodSH", how = "left")
astral_info = {}
for key in sun_info:
shp = sun_info[key].shape[0]
Dawn = list(range(shp))
Dusk = list(range(shp))
for k in sun_info[key].index:
City = astral.LocationInfo(sun_info[key]["City"][k],"Poland", "Europe/Warsaw", sun_info[key]["Lat"][k], sun_info[key]["Lon"][k])
Dawn[k] = (sun(City.observer, date=sun_info[key]["Date"][k], tzinfo=City.timezone))["dawn"]
Dusk[k] = (sun(City.observer, date=sun_info[key]["Date"][k], tzinfo=City.timezone))["dusk"]
data = {"KodSH" : sun_info[key]["KodSH"], "Dawn" : Dawn ,"Dusk" : Dusk}
astral_info[key] = pd.DataFrame(data)
sun_info[key] = pd.merge(sun_info[key], astral_info[key], left_index=True, right_index=True)
sun_info[key].drop(["KodSH_y"], axis=1, inplace=True)
sun_info[key].rename(columns = {"KodSH_x" : "KodSH", "Date" : "Date"}, inplace=True)
sun_info[key]["Date"] = pd.to_datetime(sun_info[key]["Date"]).dt.tz_localize("Europe/Warsaw")
return sun_info
def f_day_night(data, sun_info):
day_night = {}
for key in data:
date_time = data[key]["Date"]
#save old datetime
data[key]["Date"] = data[key]["Date"].dt.date
#trim Date of time, which is necessary to merge(unwanted conv from datetime64 to object)
data[key]["Date"] = pd.to_datetime(data[key]["Date"]).dt.tz_localize("Europe/Warsaw")
#conversion from object to datetime64
day_night[key] = pd.merge(data[key], sun_info[key], on=["KodSH", "Date"], how="inner")
#merging data with info about dusk and dawn
data[key].drop(["Date"], axis=1, inplace=True)
data[key].insert(2, "Date", date_time)
day_night[key].drop(["Date"], axis=1, inplace=True)
day_night[key].insert(2, "Date", date_time)
#bringing back proper "Date" VALUE
day_night[key]["day/night"] = np.where((day_night[key]["Date"] >= day_night[key]["Dawn"]) & (day_night[key]["Date"] < day_night[key]["Dusk"]), 1, 0)
#add column which determins if its day or night
return day_night
def f_analysis_basic(sun_info, day_night):
analysis_basic = {}
mean = {}
mean_day = {}
mean_night = {}
median = {}
median_day = {}
median_night = {}
for key in day_night:
mean[key] = day_night[key].groupby(["KodSH", day_night[key]["Date"].dt.date, day_night[key]["day/night"]], dropna=False)["Wartosc"].mean()
mean[key].to_frame
mean[key] = mean[key].reset_index()
#mean group by
median[key] = day_night[key].groupby(["KodSH", day_night[key]["Date"].dt.date, day_night[key]["day/night"]], dropna=False)["Wartosc"].median()
median[key].to_frame
median[key] = median[key].reset_index()
#median geoup by
mean_day[key] = mean[key][mean[key]["day/night"] != 0]
mean_night[key] = mean[key][mean[key]["day/night"] != 1]
median_day[key] = median[key][median[key]["day/night"] != 0]
median_night[key] = median[key][median[key]["day/night"] != 1]
#selecting values for different time of day(loss of nan data)
mean_day[key] = sun_info[key].merge(mean_day[key], how="left", right_on=["KodSH", "Date"], left_on=["KodSH", sun_info[key]["Date"].dt.date])
mean_night[key] = sun_info[key].merge(mean_night[key], how="left", right_on=["KodSH", "Date"], left_on=["KodSH", sun_info[key]["Date"].dt.date])
median_day[key] = sun_info[key].merge(median_day[key], how="left", right_on=["KodSH", "Date"], left_on=["KodSH", sun_info[key]["Date"].dt.date])
median_night[key] = sun_info[key].merge(median_night[key], how="left", right_on=["KodSH", "Date"], left_on=["KodSH", sun_info[key]["Date"].dt.date])
#bring nan data back
mean_day[key].drop(["Date_x", "Dawn", "Dusk", "Date_y", "day/night"], axis=1, inplace=True)
mean_night[key].drop(["Date_x", "Dawn", "Dusk", "Date_y", "day/night"], axis=1, inplace=True)
median_day[key].drop(["Date_x", "Dawn", "Dusk", "Date_y", "day/night"], axis=1, inplace=True)
median_night[key].drop(["Date_x", "Dawn", "Dusk", "Date_y", "day/night"], axis=1, inplace=True)
mean_day[key].rename(columns = {"Wartosc" : "Mean_day"}, inplace=True)
mean_night[key].rename(columns = {"Wartosc" : "Mean_night"}, inplace=True)
median_day[key].rename(columns = {"Wartosc" : "Median_day"}, inplace=True)
median_night[key].rename(columns = {"Wartosc" : "Median_night"}, inplace=True)
#basic dataframe maintenance
mean_day[key] = pd.concat([mean_day[key], mean_night[key]["Mean_night"], median_day[key]["Median_day"], median_night[key]["Median_night"]], axis=1)
analysis_basic[key] = mean_day[key]
return analysis_basic
def f_analysis_trim(sun_info, day_night):
analysis_trim = {}
return analysis_trim
def f_display_analysis(analysis_basic):
hdrs = ["KodSH", "Date", "City", "Lon", "Lat", "Mean value day", "Mean value night", "Median value day", "Median value night"]
for key in analysis_basic:
table = tabulate(analysis_basic[key], headers = hdrs, tablefmt = 'psql')
result = open("analysis_basic_" + key[:15] + ".txt", "w")
result.write(table)
result.close()
def read_powiaty(path_powiaty):
powiaty = geopandas.read_file(path_powiaty)
powiaty["geometry"] = powiaty["geometry"].to_crs(epsg=4258)
data = {"Powiat" : powiaty["name"], "geometry" : powiaty["geometry"]}
powiaty = geopandas.GeoDataFrame(data)
return powiaty
def read_wojewodztwa(path_wojewodztwa):
wojewodztwa = geopandas.read_file(path_wojewodztwa)
wojewodztwa["geometry"] = wojewodztwa["geometry"].to_crs(epsg=4258)
data = {"Wojewodztwo" : wojewodztwa["name"], "geometry" : wojewodztwa["geometry"]}
wojewodztwa = geopandas.GeoDataFrame(data)
return wojewodztwa
def f_merge_stacje_powiaty(effacility, powiaty):
stacje_powiaty = effacility
stacje_powiaty = geopandas.GeoDataFrame(stacje_powiaty, crs="EPSG:4258", geometry=geopandas.points_from_xy(stacje_powiaty["Lon"], stacje_powiaty["Lat"]))
stacje_powiaty = stacje_powiaty.sjoin(powiaty, how="inner", predicate="within")
stacje_powiaty.drop(["geometry"], axis=1, inplace=True)
data = {"KodSH" : stacje_powiaty["KodSH"], "Powiat" : stacje_powiaty["Powiat"]}
stacje_powiaty = pd.DataFrame(data)
return stacje_powiaty
def f_merge_stacje_wojewodztwa(effacility, wojewodztwa):
stacje_woj = effacility
stacje_woj = geopandas.GeoDataFrame(stacje_woj, crs="EPSG:4258", geometry=geopandas.points_from_xy(stacje_woj["Lon"], stacje_woj["Lat"]))
stacje_woj = stacje_woj.sjoin(wojewodztwa, how="inner", predicate="within")
stacje_woj.drop(["geometry"], axis=1, inplace=True)
data = {"KodSH" : stacje_woj["KodSH"], "Wojewodztwo" : stacje_woj["Wojewodztwo"]}
stacje_woj = pd.DataFrame(data)
return stacje_woj
def f_which_powiat(analysis_basic, stacje_powiaty):
which_powiat = analysis_basic
for key in which_powiat:
which_powiat[key] = pd.merge(which_powiat[key], stacje_powiaty, on=["KodSH"], how="left", right_index=False)
return which_powiat
def f_analysis_basic_powiat(analysis_basic, which_powiat):
analysis_basic_powiat = {}
for key in analysis_basic:
analysis_basic_powiat[key] = analysis_basic[key].groupby(["Date", "Powiat"])[["Mean_day", "Mean_night", "Median_day", "Median_night"]].mean()
analysis_basic_powiat[key] = analysis_basic_powiat[key].reset_index()
return analysis_basic_powiat
def f_which_woj(analysis_basic, stacje_woj):
which_woj = analysis_basic
for key in which_woj:
which_woj[key] = | pd.merge(which_woj[key], stacje_woj, on=["KodSH"], how="left", right_index=False) | pandas.merge |
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from ..calculator import Calculator
from ..utils import Chart
from .cycletime import CycleTimeCalculator
logger = logging.getLogger(__name__)
class AgeingWIPChartCalculator(Calculator):
"""Draw an ageing WIP chart"""
def run(self, today=None):
# short circuit relatively expensive calculation if it won't be used
if not self.settings["ageing_wip_chart"]:
return None
cycle_data = self.get_result(CycleTimeCalculator)
cycle_names = [s["name"] for s in self.settings["cycle"]]
start_column = self.settings["committed_column"]
end_column = self.settings["final_column"]
done_column = self.settings["done_column"]
if start_column not in cycle_names:
logger.error("Committed column %s does not exist", start_column)
return None
if end_column not in cycle_names:
logger.error("Final column %s does not exist", end_column)
return None
if done_column not in cycle_names:
logger.error("Done column %s does not exist", done_column)
return None
# add condition to allow testing
today = pd.Timestamp.now().date() if today is None else today
# remove items that are done
ageing_wip_data = cycle_data[pd.isnull(cycle_data[done_column])].copy()
# calculate current status and age for each item
def extract_status(row):
last_valid = row.last_valid_index()
if last_valid is None:
return np.NaN
return last_valid
def extract_age(row):
if start_column not in row:
return np.NaN
if pd.isna(row[start_column]):
# for all stages except backlog stage, get the first date and
# use it to compute age
dates = sorted(filter(pd.notna, row[start_column:end_column]))
if len(dates) > 0:
started = dates[0]
else:
return np.NaN
else:
started = row[start_column]
if | pd.isnull(started) | pandas.isnull |
# coding: utf8
import os
from multiprocessing.pool import ThreadPool
from os import path
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from clinica.pipelines.machine_learning import base
class KFoldCV(base.MLValidation):
def validate(self, y):
if self._validation_params["splits_indices"] is None:
skf = StratifiedKFold(
n_splits=self._validation_params["n_folds"], shuffle=True
)
self._validation_params["splits_indices"] = list(
skf.split(np.zeros(len(y)), y)
)
async_pool = ThreadPool(self._validation_params["n_threads"])
async_result = {}
for i in range(self._validation_params["n_folds"]):
train_index, test_index = self._validation_params["splits_indices"][i]
async_result[i] = async_pool.apply_async(
self._ml_algorithm.evaluate, (train_index, test_index)
)
async_pool.close()
async_pool.join()
for i in range(self._validation_params["n_folds"]):
self._validation_results.append(async_result[i].get())
self._classifier, self._best_params = self._ml_algorithm.apply_best_parameters(
self._validation_results
)
return self._classifier, self._best_params, self._validation_results
def save_results(self, output_dir):
if self._validation_results is None:
raise Exception(
"No results to save. Method validate() must be run before save_results()."
)
subjects_folds = []
results_folds = []
container_dir = path.join(output_dir, "folds")
if not path.exists(container_dir):
os.makedirs(container_dir)
for i in range(len(self._validation_results)):
subjects_df = pd.DataFrame(
{
"y": self._validation_results[i]["y"],
"y_hat": self._validation_results[i]["y_hat"],
"y_index": self._validation_results[i]["y_index"],
}
)
subjects_df.to_csv(
path.join(container_dir, "subjects_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
subjects_folds.append(subjects_df)
# fmt: off
results_df = pd.DataFrame(
{
"balanced_accuracy": self._validation_results[i]["evaluation"]["balanced_accuracy"],
"auc": self._validation_results[i]["auc"],
"accuracy": self._validation_results[i]["evaluation"]["accuracy"],
"sensitivity": self._validation_results[i]["evaluation"]["sensitivity"],
"specificity": self._validation_results[i]["evaluation"]["specificity"],
"ppv": self._validation_results[i]["evaluation"]["ppv"],
"npv": self._validation_results[i]["evaluation"]["npv"],
"train_balanced_accuracy": self._validation_results[i]["evaluation_train"]["balanced_accuracy"],
"train_accuracy": self._validation_results[i]["evaluation_train"]["accuracy"],
"train_sensitivity": self._validation_results[i]["evaluation_train"]["sensitivity"],
"train_specificity": self._validation_results[i]["evaluation_train"]["specificity"],
"train_ppv": self._validation_results[i]["evaluation_train"]["ppv"],
"train_npv": self._validation_results[i]["evaluation_train"]["npv"],
},
index=["i"],
)
# fmt: on
results_df.to_csv(
path.join(container_dir, "results_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
results_folds.append(results_df)
all_subjects = pd.concat(subjects_folds)
all_subjects.to_csv(
path.join(output_dir, "subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results = pd.concat(results_folds)
all_results.to_csv(
path.join(output_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
mean_results = pd.DataFrame(
all_results.apply(np.nanmean).to_dict(),
columns=all_results.columns,
index=[
0,
],
)
mean_results.to_csv(
path.join(output_dir, "mean_results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
print("Mean results of the classification:")
print(
"Balanced accuracy: %s"
% (mean_results["balanced_accuracy"].to_string(index=False))
)
print("specificity: %s" % (mean_results["specificity"].to_string(index=False)))
print("sensitivity: %s" % (mean_results["sensitivity"].to_string(index=False)))
print("auc: %s" % (mean_results["auc"].to_string(index=False)))
@staticmethod
def get_default_parameters():
parameters_dict = {
"n_folds": 10,
"n_threads": 15,
"splits_indices": None,
"inner_cv": True,
}
return parameters_dict
class RepeatedKFoldCV(base.MLValidation):
def validate(self, y):
if self._validation_params["splits_indices"] is None:
self._validation_params["splits_indices"] = []
for i in range(self._validation_params["n_iterations"]):
skf = StratifiedKFold(
n_splits=self._validation_params["n_folds"], shuffle=True
)
self._validation_params["splits_indices"].append(
list(skf.split(np.zeros(len(y)), y))
)
async_pool = ThreadPool(self._validation_params["n_threads"])
async_result = {}
for i in range(self._validation_params["n_iterations"]):
train_index, test_index = self._validation_params["splits_indices"][i]
async_result[i] = async_pool.apply_async(
self._ml_algorithm.evaluate, (train_index, test_index)
)
for r in range(self._validation_params["n_iterations"]):
async_result[r] = {}
self._validation_results.append([])
for i in range(self._validation_params["n_folds"]):
train_index, test_index = self._validation_params["splits_indices"][r][
i
]
async_result[r][i] = async_pool.apply_async(
self._ml_algorithm.evaluate, (train_index, test_index)
)
async_pool.close()
async_pool.join()
for r in range(self._validation_params["n_iterations"]):
for i in range(self._validation_params["n_folds"]):
self._validation_results[r].append(async_result[r][i].get())
# TODO Find a better way to estimate best parameter
flat_results = [result for fold in self._validation_results for result in fold]
self._classifier, self._best_params = self._ml_algorithm.apply_best_parameters(
flat_results
)
return self._classifier, self._best_params, self._validation_results
def save_results(self, output_dir):
if self._validation_results is None:
raise Exception(
"No results to save. Method validate() must be run before save_results()."
)
all_results_list = []
all_subjects_list = []
for iteration in range(len(self._validation_results)):
iteration_dir = path.join(output_dir, "iteration-" + str(iteration))
if not path.exists(iteration_dir):
os.makedirs(iteration_dir)
iteration_subjects_list = []
iteration_results_list = []
folds_dir = path.join(iteration_dir, "folds")
if not path.exists(folds_dir):
os.makedirs(folds_dir)
for i in range(len(self._validation_results[iteration])):
subjects_df = pd.DataFrame(
{
"y": self._validation_results[iteration][i]["y"],
"y_hat": self._validation_results[iteration][i]["y_hat"],
"y_index": self._validation_results[iteration][i]["y_index"],
}
)
subjects_df.to_csv(
path.join(folds_dir, "subjects_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
iteration_subjects_list.append(subjects_df)
# fmt: off
results_df = pd.DataFrame(
{
"balanced_accuracy": self._validation_results[iteration][i]["evaluation"]["balanced_accuracy"],
"auc": self._validation_results[iteration][i]["auc"],
"accuracy": self._validation_results[iteration][i]["evaluation"]["accuracy"],
"sensitivity": self._validation_results[iteration][i]["evaluation"]["sensitivity"],
"specificity": self._validation_results[iteration][i]["evaluation"]["specificity"],
"ppv": self._validation_results[iteration][i]["evaluation"]["ppv"],
"npv": self._validation_results[iteration][i]["evaluation"]["npv"],
"train_balanced_accuracy": self._validation_results[iteration][i]["evaluation_train"]["balanced_accuracy"],
"train_accuracy": self._validation_results[iteration][i]["evaluation_train"]["accuracy"],
"train_sensitivity": self._validation_results[iteration][i]["evaluation_train"]["sensitivity"],
"train_specificity": self._validation_results[iteration][i]["evaluation_train"]["specificity"],
"train_ppv": self._validation_results[iteration][i]["evaluation_train"]["ppv"],
"train_npv": self._validation_results[iteration][i]["evaluation_train"]["npv"],
},
index=["i"],
)
# fmt: on
results_df.to_csv(
path.join(folds_dir, "results_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
iteration_results_list.append(results_df)
iteration_subjects_df = pd.concat(iteration_subjects_list)
iteration_subjects_df.to_csv(
path.join(iteration_dir, "subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_subjects_list.append(iteration_subjects_df)
iteration_results_df = pd.concat(iteration_results_list)
iteration_results_df.to_csv(
path.join(iteration_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
mean_results_df = pd.DataFrame(
iteration_results_df.apply(np.nanmean).to_dict(),
columns=iteration_results_df.columns,
index=[
0,
],
)
mean_results_df.to_csv(
path.join(iteration_dir, "mean_results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results_list.append(mean_results_df)
all_subjects_df = pd.concat(all_subjects_list)
all_subjects_df.to_csv(
path.join(output_dir, "subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results_df = pd.concat(all_results_list)
all_results_df.to_csv(
path.join(output_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
mean_results_df = pd.DataFrame(
all_results_df.apply(np.nanmean).to_dict(),
columns=all_results_df.columns,
index=[
0,
],
)
mean_results_df.to_csv(
path.join(output_dir, "mean_results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
print("Mean results of the classification:")
print(
"Balanced accuracy: %s"
% (mean_results_df["balanced_accuracy"].to_string(index=False))
)
print(
"specificity: %s" % (mean_results_df["specificity"].to_string(index=False))
)
print(
"sensitivity: %s" % (mean_results_df["sensitivity"].to_string(index=False))
)
print("auc: %s" % (mean_results_df["auc"].to_string(index=False)))
@staticmethod
def get_default_parameters():
parameters_dict = {
"n_iterations": 100,
"n_folds": 10,
"n_threads": 15,
"splits_indices": None,
"inner_cv": True,
}
return parameters_dict
class RepeatedHoldOut(base.MLValidation):
def validate(self, y):
if self._validation_params["splits_indices"] is None:
splits = StratifiedShuffleSplit(
n_splits=self._validation_params["n_iterations"],
test_size=self._validation_params["test_size"],
)
self._validation_params["splits_indices"] = list(
splits.split(np.zeros(len(y)), y)
)
async_pool = ThreadPool(self._validation_params["n_threads"])
async_result = {}
for i in range(self._validation_params["n_iterations"]):
train_index, test_index = self._validation_params["splits_indices"][i]
if self._validation_params["inner_cv"]:
async_result[i] = async_pool.apply_async(
self._ml_algorithm.evaluate, (train_index, test_index)
)
else:
async_result[i] = async_pool.apply_async(
self._ml_algorithm.evaluate_no_cv, (train_index, test_index)
)
async_pool.close()
async_pool.join()
for i in range(self._validation_params["n_iterations"]):
self._validation_results.append(async_result[i].get())
self._classifier, self._best_params = self._ml_algorithm.apply_best_parameters(
self._validation_results
)
return self._classifier, self._best_params, self._validation_results
def save_results(self, output_dir):
if self._validation_results is None:
raise Exception(
"No results to save. Method validate() must be run before save_results()."
)
all_results_list = []
all_train_subjects_list = []
all_test_subjects_list = []
for iteration in range(len(self._validation_results)):
iteration_dir = path.join(output_dir, "iteration-" + str(iteration))
if not path.exists(iteration_dir):
os.makedirs(iteration_dir)
iteration_train_subjects_df = pd.DataFrame(
{
"iteration": iteration,
"y": self._validation_results[iteration]["y_train"],
"y_hat": self._validation_results[iteration]["y_hat_train"],
"subject_index": self._validation_results[iteration]["x_index"],
}
)
iteration_train_subjects_df.to_csv(
path.join(iteration_dir, "train_subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_train_subjects_list.append(iteration_train_subjects_df)
iteration_test_subjects_df = pd.DataFrame(
{
"iteration": iteration,
"y": self._validation_results[iteration]["y"],
"y_hat": self._validation_results[iteration]["y_hat"],
"subject_index": self._validation_results[iteration]["y_index"],
}
)
iteration_test_subjects_df.to_csv(
path.join(iteration_dir, "test_subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_test_subjects_list.append(iteration_test_subjects_df)
# fmt: off
iteration_results_df = pd.DataFrame(
{
"balanced_accuracy": self._validation_results[iteration]["evaluation"]["balanced_accuracy"],
"auc": self._validation_results[iteration]["auc"],
"accuracy": self._validation_results[iteration]["evaluation"]["accuracy"],
"sensitivity": self._validation_results[iteration]["evaluation"]["sensitivity"],
"specificity": self._validation_results[iteration]["evaluation"]["specificity"],
"ppv": self._validation_results[iteration]["evaluation"]["ppv"],
"npv": self._validation_results[iteration]["evaluation"]["npv"],
"train_balanced_accuracy": self._validation_results[iteration]["evaluation_train"]["balanced_accuracy"],
"train_accuracy": self._validation_results[iteration]["evaluation_train"]["accuracy"],
"train_sensitivity": self._validation_results[iteration]["evaluation_train"]["sensitivity"],
"train_specificity": self._validation_results[iteration]["evaluation_train"]["specificity"],
"train_ppv": self._validation_results[iteration]["evaluation_train"]["ppv"],
"train_npv": self._validation_results[iteration]["evaluation_train"]["npv"],
},
index=["i"],
)
# fmt: on
iteration_results_df.to_csv(
path.join(iteration_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
# mean_results_df = pd.DataFrame(iteration_results_df.apply(np.nanmean).to_dict(),
# columns=iteration_results_df.columns, index=[0, ])
# mean_results_df.to_csv(path.join(iteration_dir, 'mean_results.tsv'),
# index=False, sep='\t', encoding='utf-8')
all_results_list.append(iteration_results_df)
all_train_subjects_df = pd.concat(all_train_subjects_list)
all_train_subjects_df.to_csv(
path.join(output_dir, "train_subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_test_subjects_df = pd.concat(all_test_subjects_list)
all_test_subjects_df.to_csv(
path.join(output_dir, "test_subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results_df = | pd.concat(all_results_list) | pandas.concat |
## https://projects.datacamp.com/projects/20
## Dr. Semmelweis and the discovery of handwashing
## Task 1
# importing modules
import pandas as pd
import os as os
import matplotlib.pyplot as plt
# Read datasets/yearly_deaths_by_clinic.csv into yearly
fullpathyearly = os.path.abspath(os.path.join('dc_20_dr_semmelweis','datasets', 'yearly_deaths_by_clinic.csv'))
yearly = pd.read_csv(fullpathyearly)
# Print out yearly
print(yearly)
## Task 2
# Calculate proportion of deaths per no. births
yearly["proportion_deaths"] = yearly["deaths"]/yearly["births"]
# Extract clinic 1 data into yearly1 and clinic 2 data into yearly2
yearly1 = yearly[yearly["clinic"] == "clinic 1"]
yearly2 = yearly[yearly["clinic"] == "clinic 2"]
# Print out yearly1
print(yearly1)
## Task 3
# Plot yearly proportion of deaths at the two clinics
ax = yearly1.plot(x="year", y="proportion_deaths", label="Clinic 1")
yearly2.plot(x="year", y="proportion_deaths", label="Clinic 2", ax = ax)
ax.set_ylabel("Proportion deaths")
# Show the plot
plt.show()
## Task 4
# Read datasets/monthly_deaths.csv into monthly
fullpathmonthly = os.path.abspath(os.path.join('dc_20_dr_semmelweis','datasets', 'monthly_deaths.csv'))
monthly = | pd.read_csv(fullpathmonthly, parse_dates=["date"]) | pandas.read_csv |
"""
Copyright 2021 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import pandas as pd
import numpy as np
import networkx as nx
from absl import logging as lg
from gensim.models.doc2vec import Doc2Vec
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import manhattan_distances
from scipy.spatial import distance_matrix as d_matrix
from yacos.essential import IO
class Similarity:
"""Static class to measuse the similarity between programs."""
__version__ = '1.0.0'
__d2v_model_llvm_seq = None
__d2v_model_syntax_seq = None
__d2v_model_syntax_token_kind = None
__d2v_model_syntax_token_kind_variable = None
__d2v_dir = 'yacos/doc2vec'
@staticmethod
def __populate_data(training_benchmarks,
training_directory,
test_benchmarks,
test_directory):
"""Create test and training data.
Parameters
----------
training_benchmarks : list
training_directory : str
tests_benchmark : list
test_directory : str
Returns
-------
training_data : pandas.DataFrame
test_data : pandas.DataFrame
"""
training_data = {}
for training_benchmark in training_benchmarks:
index = training_benchmark.find('.')
suite_name = training_benchmark[:index]
bench_name = training_benchmark[index+1:]
benchmark_dir = os.path.join(training_directory,
suite_name)
data = IO.load_yaml_or_fail('{}/{}.yaml'.format(benchmark_dir,
bench_name))
if data:
training_data[training_benchmark] = data
if not training_data:
lg.error('Training features do not exist.')
sys.exit(1)
test_data = {}
for test_benchmark in test_benchmarks:
index = test_benchmark.find('.')
suite_name = test_benchmark[:index]
bench_name = test_benchmark[index+1:]
benchmark_dir = os.path.join(test_directory,
suite_name)
data = IO.load_yaml_or_fail('{}/{}.yaml'.format(benchmark_dir,
bench_name))
if data:
test_data[test_benchmark] = data
if not test_data:
lg.error('Training features do not exist.')
sys.exit(1)
training_data = pd.DataFrame.from_dict(training_data, orient='index')
test_data = pd.DataFrame.from_dict(test_data, orient='index')
return training_data, test_data
@staticmethod
def __get_root(g):
"""Find the root node.
Parameters
----------
g : networkx
"""
root = None
for node in g.nodes(data=True):
if 'root' in node[1]:
root = node[0]
break
else:
lg.warning('Root node not found (using node 0 as root).')
return 0
return root
@staticmethod
def __node_match_strong(g1_node, g2_node):
return g1_node == g2_node
@staticmethod
def __node_match_weak(g1_node, g2_node):
g1_attribute = g1_node['attr'] if 'attr' in g1_node else 'not found'
g2_attribute = g2_node['attr'] if 'attr' in g2_node else 'not found'
return g1_attribute == g2_attribute
@staticmethod
def __edge_match(g1_edge, g2_edge):
return g1_edge == g2_edge
@staticmethod
def __load_doc2vec_model_syntax_seq():
"""Load a doc2vec model."""
MODEL = 'd2v_syntax_seq.model'
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
Similarity.__d2v_model_syntax_seq = Doc2Vec.load(
os.path.join(top_dir, Similarity.__d2v_dir, MODEL)
)
@staticmethod
def __load_doc2vec_model_syntax_token_kind():
"""Load a doc2vec model."""
MODEL = 'd2v_syntax_token_kind.model'
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
Similarity.__d2v_model_syntax_token_kind = Doc2Vec.load(
os.path.join(top_dir, Similarity.__d2v_dir, MODEL)
)
@staticmethod
def __load_doc2vec_model_syntax_token_kind_variable():
"""Load a doc2vec model."""
MODEL = 'd2v_syntax_token_kind_variable.model'
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
Similarity.__d2v_model_syntax_token_kind = Doc2Vec.load(
os.path.join(top_dir, Similarity.__d2v_dir, MODEL)
)
@staticmethod
def __load_doc2vec_model_llvm_seq():
"""Load a doc2vec model."""
MODEL = 'd2v_llvm_seq.model'
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
Similarity.__d2v_model_syntax_token_kind = Doc2Vec.load(
os.path.join(top_dir, Similarity.__d2v_dir, MODEL)
)
@staticmethod
def euclidean_distance_from_data(training_data,
test_data):
"""Euclidean distance.
Parameters
----------
training_data : dict
test_data : dict
Returns
-------
training_data : list (rows)
test_data : list (rows)
distance : dict
"""
training_data = pd.DataFrame.from_dict(training_data, orient='index')
test_data = | pd.DataFrame.from_dict(test_data, orient='index') | pandas.DataFrame.from_dict |
from mpl_toolkits.mplot3d import axes3d
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.ticker as mtick
import plotly.graph_objects as go
import plotly.express as px
import publico as func
pd.options.mode.chained_assignment = None # default='warn'
from dateutil import parser
def MediaFileRede(res_select, interval_time=5):
res_select.drop_duplicates(subset=None, keep="first", inplace=True)
# # cria campos
# res_select['Timer2'] = 0
# res_select['Media2'] = 0.0
# velo_total = 0.0
# count=0
# timer_atual = 0.0
# timer_ant = 0.0
# elapset_atual= 0.0
# elapset_cumulativo = 0.0
# count_timer=interval_time
# for index, row in res_select.iterrows():
# timer_atual = row['Tempo']
# if (timer_ant!=0.0):
# elapset_atual = float(row['Tempo']) - float(timer_ant)
# # print(abs(elapset_atual))
# elapset_cumulativo+=float(elapset_atual)
# if ((elapset_cumulativo >= interval_time)):
# # print('Chegou')
# # break
# media_velo = velo_total / count
# res_select.at[index,"Media2"] = media_velo
# res_select.at[index,"Timer2"] = count_timer
# elapset_cumulativo=0.0
# timer_ant = 0.0
# velo_total=0.0
# media_velo=0.0
# count=0
# count_timer+=interval_time
# if (timer_atual != timer_ant):
# timer_ant = timer_atual
# velo_total = velo_total + row['Download']
# count+=1
# remove zeros
# res_select = res_select[(res_select['Timer2']!=0) & (res_select['Timer2']<=280) & (res_select['Media2']<300) ]
return res_select
EXP30="30"
EXP50="50"
EXP70="70"
print("Loading Dataframe...")
# BASELINE 30 ***************************************************
baseline_30 = pd.read_csv("../repositorio/" + EXP30 + "/REDE_GERAL.csv")
baseline_30['Download'] = baseline_30['Download'].astype(float)
baseline_30['Upload'] = baseline_30['Upload'].astype(float)
baseline_30['Tempo'] = baseline_30['Tempo'].astype(float)
baseline_30['Source'] = "BASELINE"
baseline_30['Carros'] = 30
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
baseline_30_select = baseline_30[['Download', 'Source', 'Tempo', 'Carros']]
baseline_30_select = MediaFileRede(baseline_30_select)
# *************************************************************************
# BASELINE 50 ***************************************************
baseline_50 = pd.read_csv("../repositorio/" + EXP50 + "/REDE_GERAL.csv")
baseline_50['Download'] = baseline_50['Download'].astype(float)
baseline_50['Upload'] = baseline_50['Upload'].astype(float)
baseline_50['Tempo'] = baseline_50['Tempo'].astype(float)
baseline_50['Source'] = "BASELINE"
baseline_50['Carros'] = 50
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
baseline_50_select = baseline_50[['Download', 'Source', 'Tempo', 'Carros']]
baseline_50_select = MediaFileRede(baseline_50_select)
# *************************************************************************
# BASELINE 70 ***************************************************
baseline_70 = pd.read_csv("../repositorio/" + EXP70 + "/REDE_GERAL.csv")
baseline_70['Download'] = baseline_70['Download'].astype(float)
baseline_70['Upload'] = baseline_70['Upload'].astype(float)
baseline_70['Tempo'] = baseline_70['Tempo'].astype(float)
baseline_70['Source'] = "BASELINE"
baseline_70['Carros'] = 70
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
baseline_70_select = baseline_70[['Download', 'Source', 'Tempo', 'Carros']]
baseline_70_select = MediaFileRede(baseline_70_select)
# *************************************************************************
baseline = res = pd.concat([baseline_30_select,baseline_50_select,baseline_70_select], sort=False)
#
#
#
# ONETO2 30 ***************************************************
oneTo2_30 = pd.read_csv("../repositorio/" + EXP30 + "/REDE_BASELINE_1TO2.csv")
oneTo2_30['Download'] = oneTo2_30['Download'].astype(float)
oneTo2_30['Upload'] = oneTo2_30['Upload'].astype(float)
oneTo2_30['Tempo'] = oneTo2_30['Tempo'].astype(float)
oneTo2_30['Source'] = "1to2"
oneTo2_30['Carros'] = 30
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
oneTo2_30_select = oneTo2_30[['Download', 'Source', 'Tempo', 'Carros']]
oneTo2_30_select = MediaFileRede(oneTo2_30_select)
# *************************************************************************
# ONETO2 50 ***************************************************
oneTo2_50 = pd.read_csv("../repositorio/" + EXP50 + "/REDE_BASELINE_1TO2.csv")
oneTo2_50['Download'] = oneTo2_50['Download'].astype(float)
oneTo2_50['Upload'] = oneTo2_50['Upload'].astype(float)
oneTo2_50['Tempo'] = oneTo2_50['Tempo'].astype(float)
oneTo2_50['Source'] = "1to2"
oneTo2_50['Carros'] = 50
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
oneTo2_50_select = oneTo2_50[['Download', 'Source', 'Tempo', 'Carros']]
oneTo2_50_select = MediaFileRede(oneTo2_50_select)
# *************************************************************************
# 1TO2 70 ***************************************************
oneTo2_70 = pd.read_csv("../repositorio/" + EXP70 + "/REDE_BASELINE_1TO2.csv")
oneTo2_70['Download'] = oneTo2_70['Download'].astype(float)
oneTo2_70['Upload'] = oneTo2_70['Upload'].astype(float)
oneTo2_70['Tempo'] = oneTo2_70['Tempo'].astype(float)
oneTo2_70['Source'] = "1to2"
oneTo2_70['Carros'] = 70
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
oneTo2_70_select = oneTo2_70[['Download', 'Source', 'Tempo', 'Carros']]
oneTo2_70_select = MediaFileRede(oneTo2_70_select)
# *************************************************************************
oneTo2 = res = pd.concat([oneTo2_30_select,oneTo2_50_select,oneTo2_70_select], sort=False)
#
#
#
# RANDOM 30 ***************************************************
random_30 = pd.read_csv("../repositorio/" + EXP30 + "/REDE_BASELINE_RANDOM.csv")
random_30['Download'] = random_30['Download'].astype(float)
random_30['Upload'] = random_30['Upload'].astype(float)
random_30['Tempo'] = random_30['Tempo'].astype(float)
random_30['Source'] = "Rand"
random_30['Carros'] = 30
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
random_30_select = random_30[['Download', 'Source', 'Tempo', 'Carros']]
random_30_select = MediaFileRede(random_30_select)
# *************************************************************************
# RANDOM 50 ***************************************************
random_50 = pd.read_csv("../repositorio/" + EXP50 + "/REDE_BASELINE_RANDOM.csv")
random_50['Download'] = random_50['Download'].astype(float)
random_50['Upload'] = random_50['Upload'].astype(float)
random_50['Tempo'] = random_50['Tempo'].astype(float)
random_50['Source'] = "Rand"
random_50['Carros'] = 50
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
random_50_select = random_50[['Download', 'Source', 'Tempo', 'Carros']]
random_50_select = MediaFileRede(random_50_select)
# *************************************************************************
# RANDOM 70 ***************************************************
random_70 = pd.read_csv("../repositorio/" + EXP70 + "/REDE_BASELINE_RANDOM.csv")
random_70['Download'] = random_70['Download'].astype(float)
random_70['Upload'] = random_70['Upload'].astype(float)
random_70['Tempo'] = random_70['Tempo'].astype(float)
random_70['Source'] = "Rand"
random_70['Carros'] = 70
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
random_70_select = random_70[['Download', 'Source', 'Tempo', 'Carros']]
random_70_select = MediaFileRede(random_70_select)
# *************************************************************************
random = res = pd.concat([random_30_select,random_50_select,random_70_select], sort=False)
#
#
#
# LIMITE 30 ***************************************************
limite_30 = pd.read_csv("../repositorio/" + EXP30 + "/REDE_BASELINE_THRESHOLD.csv")
limite_30['Download'] = limite_30['Download'].astype(float)
limite_30['Upload'] = limite_30['Upload'].astype(float)
limite_30['Tempo'] = limite_30['Tempo'].astype(float)
limite_30['Source'] = "Lim"
limite_30['Carros'] = 30
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
limite_30_select = limite_30[['Download', 'Source', 'Tempo', 'Carros']]
limite_30_select = MediaFileRede(limite_30_select)
# *************************************************************************
# LIMITE 50 ***************************************************
limite_50 = pd.read_csv("../repositorio/" + EXP50 + "/REDE_BASELINE_THRESHOLD.csv")
limite_50['Download'] = limite_50['Download'].astype(float)
limite_50['Upload'] = limite_50['Upload'].astype(float)
limite_50['Tempo'] = limite_50['Tempo'].astype(float)
limite_50['Source'] = "Lim"
limite_50['Carros'] = 50
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
limite_50_select = limite_50[['Download', 'Source', 'Tempo', 'Carros']]
limite_50_select = MediaFileRede(limite_50_select)
# *************************************************************************
# LIMITE 70 ***************************************************
limite_70 = pd.read_csv("../repositorio/" + EXP70 + "/REDE_BASELINE_THRESHOLD.csv")
limite_70['Download'] = limite_70['Download'].astype(float)
limite_70['Upload'] = limite_70['Upload'].astype(float)
limite_70['Tempo'] = limite_70['Tempo'].astype(float)
limite_70['Source'] = "Lim"
limite_70['Carros'] = 70
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
limite_70_select = limite_70[['Download', 'Source', 'Tempo', 'Carros']]
limite_70_select = MediaFileRede(limite_70_select)
# *************************************************************************
limite = res = pd.concat([limite_30_select,limite_50_select,limite_70_select], sort=False)
#
#
#
# DBSCAN 30 ***************************************************
dbscan_30 = pd.read_csv("../repositorio/" + EXP30 + "/REDE_DBSCAN.csv")
dbscan_30['Download'] = dbscan_30['Download'].astype(float)
dbscan_30['Upload'] = dbscan_30['Upload'].astype(float)
dbscan_30['Tempo'] = dbscan_30['Tempo'].astype(float)
dbscan_30['Source'] = "DNSCAN"
dbscan_30['Carros'] = 30
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
dbscan_30_select = dbscan_30[['Download', 'Source', 'Tempo', 'Carros']]
dbscan_30_select = MediaFileRede(dbscan_30_select)
# *************************************************************************
# BASELINE DBSCAN 50 ***************************************************
dbscan_50 = pd.read_csv("../repositorio/" + EXP50 + "/REDE_DBSCAN.csv")
dbscan_50['Download'] = dbscan_50['Download'].astype(float)
dbscan_50['Upload'] = dbscan_50['Upload'].astype(float)
dbscan_50['Tempo'] = dbscan_50['Tempo'].astype(float)
dbscan_50['Source'] = "DNSCAN"
dbscan_50['Carros'] = 50
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
dbscan_50_select = dbscan_50[['Download', 'Source', 'Tempo', 'Carros']]
dbscan_50_select = MediaFileRede(dbscan_50_select)
# *************************************************************************
# DBSCAN 70 ***************************************************
dbscan_70 = pd.read_csv("../repositorio/" + EXP70 + "/REDE_DBSCAN.csv")
dbscan_70['Download'] = dbscan_70['Download'].astype(float)
dbscan_70['Upload'] = dbscan_70['Upload'].astype(float)
dbscan_70['Tempo'] = dbscan_70['Tempo'].astype(float)
dbscan_70['Source'] = "DNSCAN"
dbscan_70['Carros'] = 70
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
dbscan_70_select = dbscan_70[['Download', 'Source', 'Tempo', 'Carros']]
dbscan_70_select = MediaFileRede(dbscan_70_select)
# *************************************************************************
dbscan = res = | pd.concat([dbscan_30_select,dbscan_50_select,dbscan_70_select], sort=False) | pandas.concat |
import heapq
import json
import math
import os
import re
import pandas
from IPython.display import display
import vocabularize as voc
# pre-process the query to get it the same kind of entity as the synopsis
def clean_query(porter, query):
pp_query = voc.info_pre_processing_f(query)
query = voc.stemSentence(pp_query, porter).split(" ")
return query
# execute a simple query and get results as list
def conjunctive_query(porter):
# enter the query
query_string = input("Search keys - conjunctive query : ")
# pre-process the query
query = clean_query(porter, query_string)
all_docs = list()
# load into Python dicts the necessary information from JSON files
j_codes = json.loads(open('vocabulary.json').read())
j_index = json.loads(open('inverted_index.json').read())
# for each word, get the docs in which it is present and intersect them to get the ones in which all of them are
for word in query:
code = j_codes[word]
documents = set(j_index[str(code)])
all_docs.append(documents)
docs = set.intersection(*all_docs)
# get a list of doc indeces
docs = [int(el.split("_")[1]) for el in docs]
return docs
# compute the cosine similarity
def q_cosine_similarity(query):
# save documents' id in min a heap data structure
# implement min heap multiplying values by -1
cos_sim_doc = list()
heapq.heapify(cos_sim_doc)
# found docs with each word, then intersect
all_docs = list()
query_codes = list()
# load JSON files to get access to data stored
j_codes = json.loads(open('vocabulary.json').read())
j_inv_index = json.loads(open('inverted_index.json').read())
j_complex_index = json.loads(open('tfIdf_complex_index.json').read())
j_docs_short = json.loads(open('docs_short.json').read())
# get the docs in which each word in query is present
for word in query:
code = j_codes[word]
query_codes.append(code)
all_docs.append(set(j_inv_index[str(code)]))
docs = set.intersection(*all_docs)
# numerator --> sum of the tfIdf of the words in the query & in the document
# since we got the docs from the search we know that the query words are in the docs
for doc in docs:
numerator, denumerator = 0, 0
doc_id = doc.split("_")[1]
synopsis = j_docs_short[doc_id][2]
for code in query_codes:
code = str(code)
word_related_doc = dict(j_complex_index[code])
tfIdf_q = word_related_doc[doc]
numerator += tfIdf_q
debug = list()
for word in synopsis.split(" "):
s_code = str(j_codes[word])
debug.append(s_code)
word_list = dict(j_complex_index[s_code])
tfIdf_d = word_list[doc]
denumerator += tfIdf_d ** 2
# for each doc compute cos similarity
cos_sim = numerator / (len(query_codes) * math.sqrt(denumerator))
heapq.heappush(cos_sim_doc, (round(cos_sim, 5)*(-1), doc))
return cos_sim_doc
def cosine_similarity_rank(porter, K):
query_string = input("Search keys - conjunctive query : ")
# found docs with each word, then intersect
query = clean_query(porter, query_string)
cos_sim_doc = q_cosine_similarity(query)
print(cos_sim_doc)
result = list()
if len(cos_sim_doc) > K:
for k in range(K):
result.append(heapq.heappop(cos_sim_doc))
else:
for k in range(len(cos_sim_doc)):
result.append(heapq.heappop(cos_sim_doc))
return [(el[1], el[0]*(-1)) for el in result]
def custom_rank(porter, K):
query_string = input("Search keys - custom query : ")
# found docs with each word, then intersect
query = clean_query(porter, query_string)
cos_sim_doc = q_cosine_similarity(query)
j_docs_short = json.loads(open('docs_short.json').read())
values = j_docs_short.values()
max_members = max([int(v[0]) for v in values])
max_popularity = max([int(v[1]) for v in values])
h_rank = list()
heapq.heapify(h_rank)
for sim, doc in cos_sim_doc:
doc_num = doc.split("_")[1]
mem, pop = int(j_docs_short[doc_num][0]), int(j_docs_short[doc_num][1])
measure = sim * 0.5 + (1 - pop/max_popularity) * 0.25 + (mem/max_members) * 0.25
heapq.heappush(h_rank, (measure*(-1), doc))
result = list()
if len(h_rank) > K:
for k in range(K):
q = heapq.heappop(h_rank)
result.append((q[1], round(q[0], 5)))
else:
for k in range(len(h_rank)):
q = heapq.heappop(h_rank)
result.append((q[1], round(q[0], 5)))
return [(el[0], el[1]*(-1)) for el in result]
def print_tables(docs, path, type):
with open("url_of_interest.txt", encoding="utf-8") as u:
urls = u.readlines()
table = list()
for i in docs:
if type == "tfIdf":
d, similarity = i[0].split("_")[1], i[1]
headers = ['animeTitle', 'animeDescription', 'Url', "Similarity"]
else:
d = i
headers = ['animeTitle', 'animeDescription', 'Url']
p = str(int(int(d) / 50))
sub_file = f"data/page_{p}/animes_{p}/anime_{d}.tsv"
file = os.path.join(path, sub_file)
with open(file, encoding="utf-8") as fp:
t = fp.readlines()
title = t[1].split("\t")[0]
synopsis = t[1].split("\t")[10]
synopsis = re.sub("(.{60})", "\\1\n", synopsis, 0, re.DOTALL)
url = urls[int(d)]
if type == "tfIdf":
table.append([title, synopsis, url, similarity])
else:
table.append([title, synopsis, url])
df = | pandas.DataFrame(table, columns=headers) | pandas.DataFrame |
from typing import Union, Tuple, Sequence, Any
import collections.abc
import glob
import logging
import os
import numpy as np
import pandas as pd
from numpy.random import RandomState
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from .label_behavior import LabelBehavior
logger = logging.getLogger(__name__)
"""
Module which contains functionality for generating experiments
"""
class ClassicExperiment:
"""
Defines a classic experiment, which consists of: 1) a specification of the clean data 2) a specification of the
modified (triggered) data, and 3) a specification of the split of triggered/clean data for training/testing
the model
"""
def __init__(self, data_root_dir: str, trigger_label_xform: LabelBehavior, stratify_split: bool = True) -> None:
"""
Initializes a Classic experiment object
:param data_root_dir: the root directory under which all data lives under. The expected directory structure
for any dataset is as follows:
root_dir
|- clean_data
|- modification_1
|- modification_2
|- ...
This is needed so that the proper relative path can be computed from the root directory.
Additionally, it is required that filenames correspond across the different subfolders under
root_dir. Practically, this means
:param trigger_label_xform: a LabelBehavior object specifying how triggered data is changed
:param stratify_split: if True, then data is split such that each class has the same number of samples in
the produced experiment
"""
self.data_root_dir = data_root_dir
self.stratify_split = stratify_split
self.trigger_label_xform = trigger_label_xform
def create_experiment(self, clean_data_csv: str, experiment_data_folder: str,
mod_filename_filter: str = '*', split_clean_trigger: bool = False,
trigger_frac: float = 0.2, triggered_classes: Union[str, Sequence[Any]] = 'all',
random_state_obj: RandomState = RandomState(1234)) \
-> Union[Tuple, pd.DataFrame]:
"""
Creates an "experiment," which is a dataframe defining the data that should be used, and whether that data is
triggered or not, and the true & actual label associated with that data point.
TODO:
[] - Have ability to accept multiple mod_data_folders such that we can sample from them all at a specified
probability to have different triggers
:param clean_data_csv: path to file which contains a CSV specification of the clean data. The CSV file is
expected to have the following columns: [file, label]
:param experiment_data_folder: the folder which contains the data to mix with for the experiment.
:param mod_filename_filter: a string filter for determining which files in the folder to consider, if only a
a subset is to be considered for sampling
:param split_clean_trigger: if True, then we return a list of DataFrames, where the triggered & non-triggered
data are combined into one DataFrame, if False, we concatenate the triggered and non-triggered data
into one DataFrame
:param trigger_frac: the fraction of data which which should be triggered
:param triggered_classes: either the string 'all', or a Sequence of labels which are to be triggered. If
this parameter is 'all', then all classes will be triggered in the created experiment. Otherwise,
only the classes in the list will be triggered at the percentage requested in the trigger_frac
argument of the create_experiment function.
:param random_state_obj: random state object
:return: a dataframe of the data which consists of the experiment. The DataFrame has the following columns:
file, true_label, train_label, triggered
file - the file path of the data
true_label - the actual label of the data
train_label - the label of the data the model should be trained on.
This will be equal to true_label *if* triggered==False
triggered - a boolean value indicating whether this particular sample has a Trigger or not
"""
logger.info("Creating experiment from clean_data:%s modified_data:%s" %
(clean_data_csv, experiment_data_folder))
# get absolute paths to avoid ambiguities when generating output paths
experiment_data_folder = os.path.abspath(experiment_data_folder)
clean_df = pd.read_csv(clean_data_csv)
clean_df['filename_only'] = clean_df['file'].map(os.path.basename)
if isinstance(triggered_classes, str) and triggered_classes == 'all':
num_trigger = int(len(clean_df) * trigger_frac)
else:
if isinstance(triggered_classes, collections.abc.Sequence):
num_total_in_triggered_classes = 0
for c in triggered_classes:
num_total_in_triggered_classes += len(clean_df[clean_df['label'] == c])
num_trigger = int(num_total_in_triggered_classes*trigger_frac)
else:
msg = "triggered_classes must either be 'all' or a list of labels to trigger"
logger.error(msg)
raise ValueError(msg)
# find list of files in the mod data folder that match the input filter & the trigger_classes specification
mod_flist = glob.glob(os.path.join(experiment_data_folder, mod_filename_filter))
mod_flist.sort()
if isinstance(triggered_classes, str):
# we need the if/elif b/c a str is also a collections.abc.Sequence
pass
elif isinstance(triggered_classes, collections.abc.Sequence):
# get only the filenames associated with each label of interest
mod_flist_fname_only = [os.path.basename(x) for x in mod_flist]
mod_flist = []
for c in triggered_classes:
class_clean_files = set(clean_df[clean_df['label'] == c]['filename_only'])
intersected_fname_only = class_clean_files.intersection(mod_flist_fname_only)
intersected_fname_with_path = [os.path.join(experiment_data_folder, x) for x in intersected_fname_only]
mod_flist.extend(intersected_fname_with_path)
if not self.stratify_split:
mod_flist_subset = random_state_obj.choice(mod_flist, num_trigger, replace=False)
logger.info("Created unstratified dataset from %s for including in experiment" % (experiment_data_folder,))
else:
# get overlap between files which exist in the directory and files which were converted
# and pick stratification based on the original label
orig_flist = set(clean_df['filename_only'])
mod_flist_fname_only = set([os.path.basename(x) for x in mod_flist])
common_flist = list(orig_flist.intersection(mod_flist_fname_only))
df_subset_to_stratify = clean_df[clean_df['filename_only'].isin(common_flist)]
# get the trigger fraction percentage based on class-label stratification
if trigger_frac > 0:
try:
num_trigger = min(len(df_subset_to_stratify)-1, num_trigger)
num_classes = len(df_subset_to_stratify['label'].unique())
if (len(df_subset_to_stratify) - num_trigger) < num_classes:
# ensure that we have enough to split
num_trigger -= num_classes
df_flist, _ = train_test_split(df_subset_to_stratify,
train_size=num_trigger,
random_state=random_state_obj,
stratify=df_subset_to_stratify['label'])
logger.info("Created stratified dataset from %s for including in experiment" %
(experiment_data_folder,))
except ValueError as e:
logger.exception(e)
logger.error("Error creating experiment, likely because the fraction of triggered data specified "
"creates a data split where not all classes are represented!")
raise ValueError(e)
else:
# empty dataframe with no entries, meaning that no data is triggered
df_flist = pd.DataFrame(columns=['file', 'label', 'filename_only'])
logger.info("Using all data points in %s for experiment" % (experiment_data_folder,))
mod_flist_subset = list(df_flist['filename_only'].map(lambda x: os.path.join(experiment_data_folder, x)))
# compose into an experiment CSV file
clean_df.rename(columns={'file': 'file',
'label': 'true_label',
'filename_only': 'filename_only'},
inplace=True)
clean_df['train_label'] = clean_df['true_label']
clean_df['triggered'] = False
# change filename to be relative to root-folder rather than subfolder
clean_data_folder = os.path.dirname(clean_data_csv)
clean_data_rootfolder_relpath = os.path.relpath(clean_data_folder, self.data_root_dir)
clean_df['file'] = clean_df['file'].map(lambda x: os.path.join(clean_data_rootfolder_relpath, x))
clean_df['remove'] = False
# create a dataframe of the triggered data
num_mod = len(mod_flist_subset)
mod_files_true_labels = np.empty(num_mod, dtype=clean_df['train_label'].dtype)
mod_files_triggered_labels = np.empty(num_mod, dtype=clean_df['train_label'].dtype)
for ii, f in enumerate(tqdm(mod_flist_subset)):
fname_only = os.path.basename(f)
# search for the filename in the original data to get the true label associated with this file
clean_data_assoc_label_series = clean_df[clean_df['filename_only'] == fname_only]['true_label']
clean_df.at[clean_data_assoc_label_series.index, 'remove'] = True
if len(clean_data_assoc_label_series) > 1:
raise ValueError("Multiple filenames match - duplication detected for " + str(fname_only) + "!")
if len(clean_data_assoc_label_series) == 0:
raise ValueError("File:" + str(f) + " seems to have disappeared!")
clean_data_assoc_label = clean_data_assoc_label_series.iat[0]
mod_files_true_labels[ii] = clean_data_assoc_label
# modify the label behavior according to the specified behavior
mod_files_triggered_labels[ii] = self.trigger_label_xform.do(clean_data_assoc_label)
# remove the data from the clean_df that has been modified
clean_df_subset = clean_df[~clean_df['remove']]
clean_df_subset.drop(['filename_only', 'remove'], axis=1, inplace=True)
triggered_df = pd.DataFrame(mod_flist_subset, columns=['file'])
# adjust the paths to the filename so that it is relative to the data root directory
mod_data_rootfolder_relpath = os.path.relpath(experiment_data_folder, self.data_root_dir)
triggered_df['file'] = triggered_df['file'].map(
lambda x: os.path.join(mod_data_rootfolder_relpath, os.path.basename(x)))
triggered_df['true_label'] = mod_files_true_labels
triggered_df['train_label'] = mod_files_triggered_labels
triggered_df['triggered'] = True
if split_clean_trigger:
return clean_df_subset, triggered_df
else:
# merge the dataframes
return | pd.concat([clean_df_subset, triggered_df]) | pandas.concat |
import functools
import numpy as np
import pandas as pd
def handle_na(func):
"""Decorator for scalar function so it returns nan when nan is input"""
@functools.wraps(func)
def func_wrapper(arg, *args, **kwargs):
if pd.isna(arg):
return arg
return func(arg, *args, **kwargs)
func_wrapper.__doc__ = func.__doc__ if func.__doc__ else ""
func_wrapper.__doc__ += "\n@about: return numpy.nan if arg is nan\n"
return func_wrapper
def notna(obj):
"""Detect none missing values for an array-like or scalar object."""
return np.logical_not( | pd.isna(obj) | pandas.isna |
#!/usr/local/bin/python
'''
Filaname : compile-gradesheet.py
Author : <NAME>
Creation Date : 2017-10-07
Python Version : 2.7
'''
import os
import argparse
import numpy as np
import pandas as pd
# Get command line arguments
parser = argparse.ArgumentParser(description="Compile gradsheet with grades of all homeworks")
parser.add_argument('homework',
type=str,
help="Homeworks to add grades from")
parser.add_argument('--course',
type=str,
default="cs565",
help="Course name")
parser.add_argument('--students',
type=str,
default="students.csv",
help='Enrollment file containing students "Name", "ID", "Username"')
args = parser.parse_args()
# Paths
CURRENT_DIR = os.path.abspath(".")
GRADES_DIR = os.path.join(CURRENT_DIR, "grades", args.homework)
# Read students csv file
enrolled_df = pd.read_csv(args.students, sep=",", header=0)
# Read course grade file if any or create one
try:
grades_df = | pd.read_csv(args.course + '_grades.csv', sep=",", header=0) | pandas.read_csv |
import numpy as np
import pytest
import pandas._libs.index as _index
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestMultiIndexBasic:
def test_multiindex_perf_warn(self):
df = DataFrame(
{
"jim": [0, 0, 1, 1],
"joe": ["x", "x", "z", "y"],
"jolie": np.random.rand(4),
}
).set_index(["jim", "joe"])
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(1, "z")]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0,)]
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n), MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
_index._SIZE_CUTOFF = old_cutoff
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
}
)
result = df.set_index(["a", "b"], drop=False)
expected = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
},
index=[
Index(["R1", "R2", np.nan, "R4"], name="a"),
Index(["C1", "C2", "C3", "C4"], name="b"),
],
)
tm.assert_frame_equal(result, expected)
def test_nested_tuples_duplicates(self):
# GH#30892
dti = pd.to_datetime(["20190101", "20190101", "20190102"])
idx = Index(["a", "a", "c"])
mi = pd.MultiIndex.from_arrays([dti, idx], names=["index1", "index2"])
df = DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi)
expected = DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi)
df2 = df.copy(deep=True)
df2.loc[(dti[0], "a"), "c2"] = 1.0
tm.assert_frame_equal(df2, expected)
df3 = df.copy(deep=True)
df3.loc[[(dti[0], "a")], "c2"] = 1.0
tm.assert_frame_equal(df3, expected)
def test_multiindex_get_loc_list_raises(self):
# https://github.com/pandas-dev/pandas/issues/35878
idx = | pd.MultiIndex.from_tuples([("a", 1), ("b", 2)]) | pandas.MultiIndex.from_tuples |
# Created by rahman at 14:34 2020-04-06 using PyCharm
# Created by rahman at 19:14 2020-01-26 using PyCharm
import math
import os
import sys
import pandas as pd
from joblib import Parallel, delayed
from attacks.kerasclassifier import *
from attacks.Linkability_crossval import Link
def add_padding(vf, padding):
"""
takes a dataframe and the value whose multiple the padding needs to achieve
:param vf: input vf
:param padding: the padding process will add zeros to the front and back of vf so that it is a multiple of this value
:return: padded vf
"""
data = vf.iloc[:,2:]
vec_len = data.shape[1]
if padding >= vec_len:
pad_len = padding - vec_len
else:
pad_len = vec_len - math.floor(vec_len / padding) * padding
# self.vec_len += pad_len
before = pad_len // 2
after = pad_len - before
data_pad= pd.np.pad(data, [(0, 0), (before, after)], 'constant')
df = pd.DataFrame(data= data_pad)
df.insert(0, 'user', vf['user'])
df.insert(1, 'desc', vf['desc'])
return df
def core_siamese(infile, params, cl, datapath, in_datapath,callback,aucfname, weekend, max_epochs, patience, regu, batchsize, combi, time_dim=0):
"""
core of the siamese model creation for the linkability attacks
:param infile:
:param params:
:param cl:
:param datapath:
:param in_datapath:
:param callback:
:param aucfname:
:param weekend:
:param max_epochs:
:param patience:
:param regu:
:param batchsize:
:param combi:
:return:
"""
if 'nor' in infile: # only use variance thresholded and normalized files 'vt' in infile and
print(infile)
arr = []
for i in range(0, 5):
# try:
link = Link(i, infile, time_dim, weekend, in_datapath, out_datapath=datapath + 'cv_folds/')
from sklearn.utils import shuffle
link.tr_pairs = shuffle(link.tr_pairs)
# first define the shared layers
if 'cnn' in cl:
link.vecframe = add_padding(link.vecframe, padding=params[2] ** params[3])
clf = CNNsiameseClassifier(link.vecframe.shape, regu, combi, params)
elif 'attntn' in cl:
clf = AttentionBiLSTMClassifier(link.vecframe.shape, regu, combi, time_dim, lstm_params=params, fixed_units=True)
elif 'lstm' in cl:
if 'bilstm' in cl:
clf = BiLSTMsiameseClassifier(link.vecframe.shape, regu, combi, time_dim, lstm_params=params, fixed_units=True)
else:
clf = LSTMsiameseClassifier(link.vecframe.shape, regu, combi, time_dim, lstm_params=params, fixed_units=True)
elif cl == 'dense':
clf = Dense_siameseClassifier(link.vecframe.shape, regu, combi, params)
# Next combine the layers
clf.combine(plot=False)
if callback:
auc = clf.fit_predict_callback(link, batchsize, max_epochs, patience, verbose=2)
else:
auc = clf.fit_predict(link, batchsize, max_epochs, verbose=2)
print("infile, cv fold, AUC: ", infile, i, auc)
# aucarr.append(auc)
arr.append([patience, regu, batchsize, i, infile, auc])
del clf
# except:
# print("infile skipped", infile)
aucs = pd.DataFrame(data=arr) # , names= epochs, regu, batchsize, i, infile, auc
aucs.to_csv(datapath + "results/" + aucfname, mode='a', header=False, index=False)
print("saved AUCs to " + datapath + "results/" + aucfname)
def linkability_siam(config, in_datapath, params, exp, cl, weekend, datapath, callback=True, parallelize=False):
"""
:param config: epochs, regu, batchsize, combi
:param in_datapath:
:param params: model layer params
:param exp: reqd for results filename
:param cl: reqd for results filename
:param datapath:
:param callback: use modelcheckpoint and early stopping
:return:
"""
# unpack config
max_epochs, patience, regu, batchsize, combi = config
if not weekend:
aucfname = "noweekend_" + "clf_" + str(cl) + "_exp_" + str(exp) + "_cv_siam.csv"
else:
aucfname = "weekend_" + "clf_" + str(cl) + "_exp_" + str(exp) + "_cv_siam.csv"
if parallelize:
threads = len(os.listdir(in_datapath))
Parallel(n_jobs=threads)(delayed(core_siamese)(infile,params, cl, datapath, in_datapath,callback,aucfname, weekend, max_epochs, patience, regu, batchsize, combi)
for infile in os.listdir(in_datapath))
else:
for infile in os.listdir(in_datapath):
core_siamese(infile,params, cl, datapath, in_datapath,callback,aucfname, weekend, max_epochs, patience, regu, batchsize, combi)
def linkability_bl(in_path, datapath, cl, clf, exp, weekend):
"""
baseline attacks
:param in_path:
:param cl:
:param clf:
:param weekend:
:return:
"""
if not weekend:
aucfname = "noweekend_" + "clf_" + str(cl) + "_exp_" + str(exp) + "_cv_BL.csv"
else:
aucfname = "weekend_" + "clf_" + str(cl) + "_exp_" + str(exp) + "_cv_BL.csv"
for infile in os.listdir(in_path): # all intervals for single stats + distributions 1, 6, 12 hrs
if 'vt' in infile and 'nor' in infile: #only use variance thresholded and normalized files
print (infile)
arr=[]
for i in range(0, 5):
link = Link(i, infile, weekend, in_path , out_datapath = datapath + 'cv_folds/')
for combi in ['l1']: # 'sql2', 'mul',
link.tr_data_fp = link.out_datapath + infile[:-4] + combi + "_" + str(weekend) + 'weekend_tr_data.csv'
link.te_data_fp = link.out_datapath + infile[:-4] + combi + "_" + str(weekend) + 'weekend_te_data.csv'
if not (os.path.exists(link.tr_data_fp) and os.path.exists(link.te_data_fp)):
link.prep_data(combi)
auc = link.attack(clf)
print (auc)
arr.append([i, infile, auc])
#print("infile skipped", infile)
aucs = pd.DataFrame(data=arr) # , names = i, infile, auc
aucs.to_csv(datapath + "results/" + aucfname, mode='a', header=False, index=False)
print("saved AUCs to " + datapath + "results/" + aucfname)
def linkability_unsup(in_path, datapath, metric, exp, weekend):
"""
:param in_path:
:param datapath:
:param metric:
:param exp:
:param weekend:
:return:
"""
if not weekend:
aucfname = "noweekend_" + metric + "_cv_BL.csv"
else:
aucfname = "weekend_" + metric + "_cv_BL.csv"
for infile in os.listdir(in_path): # all intervals for single stats + distributions 1, 6, 12 hrs
if 'vt' in infile and 'nor' in infile: #only use variance thresholded and normalized files
print (infile)
arr=[]
for i in range(0, 1): # no cross val for unsupervised,
try:
link = Link(i, infile, weekend, in_path , out_datapath = datapath + 'cv_folds/', unsup=True)
link.unsup_data_fp = link.out_datapath + infile[:-4] + metric + str(weekend) + 'weekend_unsup_data.csv'
if not (os.path.exists(link.unsup_data_fp)):
link.prep_data_unsup(metric)
auc = link.unsup_attack()
print(auc)
arr.append([infile, auc])
except Exception as e:
print (e)
print (infile, "skipped")
continue
aucs = pd.DataFrame(data=arr) # , names = infile, auc
aucs.to_csv(datapath + "results/" + aucfname, mode='a', header=False, index=False)
print("saved AUCs to " + datapath + "results/" + aucfname)
def convert(val):
return 1-val if val<0.5 else val
def merge_results():
cnn1 = pd.read_csv('../data/dzne/results/link_cnn1.csv')#, names = ['fold', 'infile', 'auc'])
lstm = pd.read_csv('../data/dzne/results/link_lstm.csv')#, names = ['fold', 'infile', 'auc'])
rf = pd.read_csv('../data/dzne/results/link_rf.csv')#, names = ['fold', 'infile', 'auc'])
dense = | pd.read_csv('../data/dzne/results/siam_dense.csv') | pandas.read_csv |
# -*- coding:utf-8 -*-
import math
import phate
import anndata
import shutil
import warnings
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.spatial.distance import cdist
from scipy.stats import wilcoxon, pearsonr
from scipy.spatial import distance_matrix
from sklearn.decomposition import PCA
# from python_codes.train.train import train
from python_codes.train.clustering import clustering
from python_codes.train.pseudotime import pseudotime
from python_codes.util.util import load_breast_cancer_data, preprocessing_data, save_features
from python_codes.util.exchangeable_loom import write_exchangeable_loom
warnings.filterwarnings("ignore")
from python_codes.util.util import *
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial','Roboto']
rcParams['savefig.dpi'] = 300
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable, inset_locator
title_sz = 16
####################################
#----------Get Annotations---------#
####################################
def get_adata_from_embeddings(args, sample_name, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
feature_fp = os.path.join(output_dir, "features.tsv")
adata = sc.read_csv(feature_fp, delimiter="\t", first_column_names=None)
return adata
def get_clusters(args, sample_name, method="leiden", dataset="breast_cancer"):
original_spatial = args.spatial
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(str)
args.spatial = original_spatial
cluster_color_dict = get_cluster_colors(args, sample_name)
unique_cluster_dict = {cluster:cluster_color_dict[cluster]["abbr"] for cluster in cluster_color_dict.keys()}
uniq_pred = np.unique(pred_clusters)
for cid, cluster in enumerate(uniq_pred):
pred_clusters[pred_clusters == cluster] = unique_cluster_dict[int(cluster)]
return pred_clusters
def get_cluster_colors_and_labels_original():
ann_dict = {
0: "Cancer 1",
1: "Immune:B/plasma",
2: "Adipose",
3: "Immune:APC/B/T cells",
4: "Cancer:Immune rich",
5: "Cancer 2",
6: "Cancer Connective"
}
color_dict = {
0: "#771122",
1: "#AA4488",
2: "#05C1BA",
3: "#F7E54A",
4: "#D55802",
5: "#137777",
6: "#124477"
}
return ann_dict, color_dict
def get_cluster_colors(args, sample_name):
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/putative_cell_type_colors/{sample_name}.csv'
df = pd.read_csv(fp)
clusters = df["Cluster ID"].values.astype(int)
annotations = df["Annotations"].values.astype(str)
colors = df["Color"].values.astype(str)
abbrs = df["Abbr"].values.astype(str)
cur_dict = {}
for cid, cluster in enumerate(clusters):
cur_dict[cluster] = {
"annotation" : annotations[cid],
"color" : colors[cid],
"abbr" : abbrs[cid]
}
return cur_dict
def get_top_n_cluster_specific_genes(args, sample_name, method, dataset="breast_cancer", top_n=3):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
cluster_marker_genes_fp = f'{output_dir}/marker_genes_pval_gby_{method}.tsv'
df = pd.read_csv(cluster_marker_genes_fp, sep="\t")
df = df.loc[:top_n-1, df.columns.str.endswith("_n")]
cluster_specific_genes_dict = {}
for cluster_abbr in df.columns:
cluster_specific_genes_dict[cluster_abbr.strip("_n")] = df[cluster_abbr].values.astype(str)
return cluster_specific_genes_dict
def save_cluster_specific_genes(args, adata, sample_name, method, dataset="breast_cancer", qval=0.05):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/putative_cell_type_colors/{sample_name}.csv'
df = pd.read_csv(fp)
abbrs = np.array(np.unique(df["Abbr"].values.astype(str)))
cluster_marker_genes_fp = f'{output_dir}/marker_genes_pval_gby_{method}.tsv'
df = pd.read_csv(cluster_marker_genes_fp, sep="\t", header=0)
for cid, cluster_name in enumerate(abbrs):
sub_df = df.loc[df.loc[:, f"{cluster_name}_p"] <= qval, f"{cluster_name}_n"]
genes = np.array(np.unique(sub_df.values.flatten().astype(str)))
output_fp = f'{output_dir}/cluster_specific_marker_genes/{cluster_name}.tsv'
mkdir(os.path.dirname(output_fp))
np.savetxt(output_fp, genes[:], delimiter="\n", fmt="%s")
print(f"Saved at {output_fp}")
all_genes = np.array(list(adata.var_names))
output_fp = f'{output_dir}/cluster_specific_marker_genes/background_genes.tsv'
mkdir(os.path.dirname(output_fp))
np.savetxt(output_fp, all_genes[:], delimiter="\n", fmt="%s")
print(f"Saved at {output_fp}")
def get_GO_term_dict(args):
base_dir = f"{args.dataset_dir}/Visium/Breast_Cancer/analysis"
genes_with_go_ids_fp = f'{base_dir}/genes_with_go_ids.csv'
go_id_to_genes_dict_pkl_fp = f"{base_dir}/go_id_to_genes_dict.pkl"
if os.path.exists(go_id_to_genes_dict_pkl_fp):
with open(go_id_to_genes_dict_pkl_fp, 'rb') as f:
go_terms_dict = pickle.load(f)
return go_terms_dict
else:
df = pd.read_csv(genes_with_go_ids_fp).values.astype(str)
go_terms = np.array(np.unique(df[:, 1]))
go_terms_dict = {go_id : df[df[:, 1] == go_id, 0] for go_id in go_terms}
with open(go_id_to_genes_dict_pkl_fp, 'wb') as f:
pickle.dump(go_terms_dict, f, -1)
print(f"Saved at {go_id_to_genes_dict_pkl_fp}")
return go_terms_dict
def get_GO_terms_with_spatial_coherent_expr(args, adata, sample_name, go_term_dict, dataset="breast_cancer"):
coords = adata.obsm["spatial"]
index = np.arange(coords.shape[0])
genes = np.array(adata.var_names)
GO_high_expressed = {}
GO_high_expressed_pvals = {}
n_go_terms = len(go_term_dict)
for gid, (go_id, go_genes) in enumerate(go_term_dict.items()):
if (gid + 1) % 500 == 0:
print(f"Processed {gid + 1}/{n_go_terms}: {100. * (gid + 1)/n_go_terms}% GO terms")
expr = adata.X[:, np.isin(genes, go_genes)].mean(axis=1)
avg_expr = expr.mean()
std_expr = expr.std()
outlier_val = avg_expr + std_expr
ind = np.array(np.where(expr > outlier_val)).flatten()
if ind.size > 5:
sub_coords = coords[ind, :]
sub_dists = distance.cdist(sub_coords, sub_coords, 'euclidean')
rand_index = np.random.choice(index, size=ind.size)
random_coord = coords[rand_index, :]
rand_dists = distance.cdist(random_coord, random_coord, 'euclidean')
pval = wilcoxon(sub_dists.flatten(), rand_dists.flatten(), alternative='greater')
if pval.pvalue < .05:
GO_high_expressed[go_id] = ind
GO_high_expressed_pvals[go_id] = pval.pvalue
else:
pass
print(f"Found {len(GO_high_expressed)} highly expressed GO terms")
args.spatial = True
go_terms_w_pv = np.array([[go_id, str(GO_high_expressed_pvals[go_id])] for go_id in sorted(GO_high_expressed_pvals.keys(), key= lambda key:GO_high_expressed_pvals[key], reverse=True)]).astype(str)
df = pd.DataFrame(go_terms_w_pv, columns=["GO_ID", "P-Val"])
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
high_expr_GO_out_fp = f"{output_dir}/highly_expr_go.tsv"
df.to_csv(high_expr_GO_out_fp, sep="\t", index=False)
print(f"Saved at {high_expr_GO_out_fp}")
high_expr_GO_out_pkl_fp = f"{output_dir}/highly_expr_go_w_spots_indices.pkl"
with open(high_expr_GO_out_pkl_fp, 'wb') as handle:
pickle.dump(GO_high_expressed, handle, -1)
print(f"Saved at {high_expr_GO_out_pkl_fp}")
def get_ovlp_GO_definitions(args, sample_name, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
high_expr_GO_out_fp = f"{output_dir}/highly_expr_go.tsv"
df = pd.read_csv(high_expr_GO_out_fp, sep="\t", header= 0)
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/analysis/genes_with_go_ids_and_def.csv'
go_id_def = pd.read_csv(fp).values.astype(str)
go_dict = {go_id: go_id_def[gid, 1] for gid, go_id in enumerate(go_id_def[:, 0])}
go_terms = df.loc[:, "GO_ID"].values.astype(str)
go_def = np.array([go_dict[go_id] for go_id in go_terms]).astype(str)
df["GO_DEF"] = go_def
df = df.sort_values(by="P-Val", ascending=True)
high_expr_GO_out_def_fp = f"{output_dir}/highly_expr_go_w_def.tsv"
df.to_csv(high_expr_GO_out_def_fp, sep="\t", index=False)
print(f"Saved at {high_expr_GO_out_def_fp}")
def get_clusters_annnotations(sample_name):
if sample_name[0] == "G":
clusters = ['APC,B,T-1', 'APC,B,T-2', 'Inva-Conn', 'Invasive-2', 'Invasive-1', 'Imm-Reg-1', 'Imm-Reg-2'
, 'Tu.Imm.Itfc-1', 'Tu.Imm.Itfc-1', 'Tu.Imm.Itfc-1']
return clusters
else:
return []
def find_ovlpd_go_terms_with_cluster_specific_go_pathways(args, sample_name, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
high_expr_GO_out_fp = f"{output_dir}/highly_expr_go.tsv"
high_expr_go_df = pd.read_csv(high_expr_GO_out_fp, sep="\t", header=0)
high_expr_go_terms = high_expr_go_df["GO_ID"].values.astype(str)
cluster_dir = f'{output_dir}/cluster_specific_marker_genes'
clusters = get_clusters_annnotations(sample_name)
for cid, cluster in enumerate(clusters):
cluster_go_term_fp = f"{cluster_dir}/{cluster}_topGO_terms.tsv"
df = pd.read_csv(cluster_go_term_fp, sep="\t", header=0)
go_ids = df["GO.ID"].values.astype(str)
ovlp_go_ids, x_ind, y_ind = np.intersect1d(high_expr_go_terms, go_ids, return_indices=True)
cluster_ovlp_go_terms_fp = f"{cluster_dir}/{cluster}_topGO_terms_w_high_expr_patterns.tsv"
sub_df = df.iloc[y_ind, :]
sub_df.to_csv(cluster_ovlp_go_terms_fp, sep="\t", index=False)
print(f"Saved at {cluster_ovlp_go_terms_fp}")
def cell_cell_communication_preprocessing_data(args, adata):
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
sc.pp.normalize_per_cell(adata, key_n_counts='n_counts_all', min_counts=0) # normalize with total UMI count per cell
sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
genes = np.array(adata.var_names)
cells = np.array(adata.obs_names)
return adata, genes, cells
def save_adata_to_preprocessing_dir(args, adata_pp, sample, cells):
pp_dir = f'{args.dataset_dir}/Visium/Breast_Cancer/preprocessed/{sample}'
mkdir(pp_dir)
cluster_annotations = get_clusters(args, sample)
concat_annotations = np.transpose(np.vstack([cells, cluster_annotations]))
annotation_fp = f'{pp_dir}/cluster_anno.tsv'
df = pd.DataFrame(data=concat_annotations, columns=["Cell", "Annotation"])
df.to_csv(annotation_fp, sep="\t", index=False)
print(f"{sample} annotation saved at {annotation_fp}")
adata_fp = f'{pp_dir}/anndata_pp.h5ad'
mkdir(os.path.dirname(adata_fp))
adata_pp.write(adata_fp)
print(f"{sample} adata saved at {adata_fp}")
####################################
#-------------Plotting-------------#
####################################
def plt_setting():
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 30
plt.rc('font', size=MEDIUM_SIZE, weight="bold") # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def figure(nrow, ncol, rsz=3., csz=3., wspace=.4, hspace=.5):
fig, axs = plt.subplots(nrow, ncol, figsize=(ncol * csz, nrow * rsz))
plt_setting()
plt.subplots_adjust(wspace=wspace, hspace=hspace)
return fig, axs
def plot_hne_and_annotation(args, adata, sample_name, nrow = 1, scale = 0.045, ncol=4, rsz=2.5, csz=2.8, wspace=.4, hspace=.5, annotation=True):
fig, axs = figure(nrow, ncol, rsz=rsz, csz=csz, wspace=wspace, hspace=hspace)
if nrow == 1:
for ax in axs:
ax.axis('off')
ax = axs[0]
if annotation:
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/ST-pat/img/{sample_name[0]}1_annotated.png'
else:
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/ST-imgs/{sample_name[0]}/{sample_name}/HE.jpg'
img = plt.imread(fp)
ax.imshow(img)
# ax.set_title("H & E", fontsize=title_sz)
x, y = adata.obsm["spatial"][:, 0]*scale, adata.obsm["spatial"][:, 1]*scale
if not annotation:
xlim = [np.min(x), np.max(x) * 1.05]
ylim = [np.min(y) * .75, np.max(y) * 1.1]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
else:
xlim, ylim = None, None
ax.invert_yaxis()
return fig, axs, x, y, img, xlim, ylim
def plot_clustering(args, adata, sample_name, method="leiden", dataset="breast_cancer", cm = plt.get_cmap("Paired"), scale = .62, scatter_sz=1.3, nrow = 1, annotation=True):
original_spatial = args.spatial
fig, axs, x, y, img, xlim, ylim = plot_hne_and_annotation(args, adata, sample_name, scale=scale, nrow=nrow, ncol=3, rsz=2.6, csz=3.2, wspace=.3, hspace=.4, annotation=annotation)
spatials = [False, True]
for sid, spatial in enumerate(spatials):
ax = axs[sid + 1]
args.spatial = spatial
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(int)
uniq_pred = np.unique(pred_clusters)
n_cluster = len(uniq_pred)
if not annotation:
ax.imshow(img)
for cid, cluster in enumerate(uniq_pred):
color = cm((cid * (n_cluster / (n_cluster - 1.0))) / n_cluster)
ind = pred_clusters == cluster
ax.scatter(x[ind], y[ind], s=scatter_sz, color=color, label=cluster)
title = args.arch if not spatial else "%s + SP" % args.arch
ax.set_title(title, fontsize=title_sz, pad=-30)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
box = ax.get_position()
height_ratio = 1.0
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height * height_ratio])
lgnd = ax.legend(loc='center left', fontsize=8, bbox_to_anchor=(1, 0.5), scatterpoints=1, handletextpad=0.1,
borderaxespad=.1)
for handle in lgnd.legendHandles:
handle._sizes = [8]
fig_fp = f"{output_dir}/{method}.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
args.spatial = original_spatial
def plot_pseudotime(args, adata, sample_name, dataset="breast_cancer", cm = plt.get_cmap("gist_rainbow"), scale = 0.62, scatter_sz=1.3, nrow = 1):
original_spatial = args.spatial
fig, axs, x, y, img, _, _ = plot_hne_and_annotation(args, adata, sample_name, scale=scale, nrow=nrow, ncol=3)
spatials = [False, True]
for sid, spatial in enumerate(spatials):
ax = axs[sid + 1]
ax.imshow(img)
args.spatial = spatial
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
fp = f"{output_dir}/pseudotime.tsv"
pseudotimes = pd.read_csv(fp, header=None).values.flatten().astype(float)
st = ax.scatter(x, y, s=1, c=pseudotimes, cmap=cm)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
clb = fig.colorbar(st, cax=cax)
clb.ax.set_ylabel("pseudotime", labelpad=10, rotation=270, fontsize=10, weight='bold')
title = args.arch if not spatial else "%s + SP" % args.arch
ax.set_title(title, fontsize=title_sz)
fig_fp = f"{output_dir}/psudotime.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
args.spatial = original_spatial
def plot_clustering_and_pseudotime(args, adata, sample_name, method="leiden", dataset="breast_cancer", scale = 1., scatter_sz=1.3, nrow = 1, annotation=False, alpha=.5):
original_spatial = args.spatial
args.spatial = True
fig, axs, x, y, img, xlim, ylim = plot_hne_and_annotation(args, adata, sample_name, scale=scale, nrow=nrow, ncol=5, rsz=2.6,
csz=3.9, wspace=1, hspace=.4, annotation=annotation)
ax = axs[1]
ax.imshow(img, alpha=alpha)
# fp = f'{args.dataset_dir}/Visium/Breast_Cancer/ST-pat/img/{sample_name[0]}1_annotated.png'
# img2 = plt.imread(fp)
# ax.imshow(img2)
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/ST-cluster/lbl/{sample_name}-cluster-annotation.tsv'
df = pd.read_csv(fp, sep="\t")
coords = df[["pixel_x", "pixel_y"]].values.astype(float)
pred_clusters = df["label"].values.astype(int)
cluster_dict, color_dict = get_cluster_colors_and_labels_original()
uniq_pred = np.unique(pred_clusters)
uniq_pred = sorted(uniq_pred, key=lambda cluster: cluster_dict[cluster])
for cid, cluster in enumerate(uniq_pred):
ind = pred_clusters == cluster
ax.scatter(coords[ind, 0], coords[ind, 1], s=scatter_sz, color=color_dict[cluster], label=cluster_dict[cluster])
ax.set_title("Annotation", fontsize=title_sz)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
ax.legend(fontsize=8, loc='center left', bbox_to_anchor=(1.0, 0.5))
ax = axs[2]
ax.imshow(img, alpha=alpha)
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(str)
uniq_pred = np.unique(pred_clusters)
cluster_color_dict = get_cluster_colors(args, sample_name)
unique_cluster_dict = {cluster: cluster_color_dict[cluster]["abbr"] for cluster in cluster_color_dict.keys()}
color_dict_for_cluster = {}
for cid, cluster in enumerate(uniq_pred):
label = unique_cluster_dict[int(cluster)]
color_dict_for_cluster[label] = f"#{cluster_color_dict[int(cluster)]['color']}"
pred_clusters[pred_clusters == cluster] = label
uniq_pred = sorted(np.unique(pred_clusters))
for cid, cluster in enumerate(uniq_pred):
ind = pred_clusters == cluster
ax.scatter(x[ind], y[ind], s=scatter_sz, color=color_dict_for_cluster[cluster], label=cluster)
ax.set_title("SpaceFlow\n(Segmentation)", fontsize=title_sz)
ax.legend(fontsize=8, loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
ax = axs[3]
ax.imshow(img, alpha=alpha)
pseudotimes = pd.read_csv(f"{output_dir}/pseudotime.tsv", header=None).values.flatten().astype(float)
pseudo_time_cm = plt.get_cmap("gist_rainbow")
st = ax.scatter(x, y, s=scatter_sz, c=pseudotimes, cmap=pseudo_time_cm)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%")
clb = fig.colorbar(st, cax=cax)
clb.ax.set_ylabel("pSM value", labelpad=10, rotation=270, fontsize=8, weight='bold')
ax.set_title("SpaceFlow\n(pSM)", fontsize=title_sz)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
ax = axs[4]
ax.imshow(img, alpha=alpha)
method = "stLearn" #"monocole"
pseudotimes = pd.read_csv(f"{args.output_dir}/{dataset}/{sample_name}/{method}/pseudotime.tsv", header=None).values.flatten().astype(float)
pseudo_time_cm = plt.get_cmap("gist_rainbow")
st = ax.scatter(x, y, s=scatter_sz, c=pseudotimes, cmap=pseudo_time_cm)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%")
clb = fig.colorbar(st, cax=cax)
clb.ax.set_ylabel("pseudotime", labelpad=10, rotation=270, fontsize=8, weight='bold')
ax.set_title(f"{method}\n(pseudotime)", fontsize=title_sz)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
fig_fp = f"{output_dir}/cluster+pseudotime.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
args.spatial = original_spatial
def plot_clustering_and_pseudotime_SI(args, adata, sample_name, method="leiden", dataset="breast_cancer", scale = 1., scatter_sz=1.3, nrow = 1, annotation=False, cluster_cm = plt.get_cmap("tab10"),pseudotime_cm = plt.get_cmap("gist_rainbow"), alpha=.5):
original_spatial = args.spatial
args.spatial = True
fig, axs, x, y, img, xlim, ylim = plot_hne_and_annotation(args, adata, sample_name, scale=scale, nrow=nrow, ncol=4, rsz=2.6,
csz=2.8, wspace=.3, hspace=.2, annotation=annotation)
ax = axs[1]
ax.imshow(img, alpha=alpha)
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/ST-cluster/lbl/{sample_name}-cluster-annotation.tsv'
df = pd.read_csv(fp, sep="\t")
coords = df[["pixel_x", "pixel_y"]].values.astype(float)
pred_clusters = df["label"].values.astype(int)
uniq_pred = np.unique(pred_clusters)
n_cluster = len(uniq_pred)
for cid, cluster in enumerate(uniq_pred):
ind = pred_clusters == cluster
color = cluster_cm((cid * (n_cluster / (n_cluster - 1.0))) / n_cluster)
ax.scatter(coords[ind, 0], coords[ind, 1], s=scatter_sz, color=color, label=str(cluster))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
ax = axs[2]
ax.imshow(img, alpha=alpha)
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(str)
uniq_pred = np.unique(pred_clusters)
n_cluster = len(uniq_pred)
for cid, cluster in enumerate(uniq_pred):
ind = pred_clusters == cluster
color = cluster_cm((cid * (n_cluster / (n_cluster - 1.0))) / n_cluster)
ax.scatter(x[ind], y[ind], s=scatter_sz, color=color, label=cluster)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
ax = axs[3]
ax.imshow(img, alpha=alpha)
pseudotimes = pd.read_csv(f"{output_dir}/pseudotime.tsv", header=None).values.flatten().astype(float)
st = ax.scatter(x, y, s=scatter_sz, c=pseudotimes, cmap=pseudotime_cm)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%")
clb = fig.colorbar(st, cax=cax)
clb.ax.set_ylabel("pSM value", labelpad=10, rotation=270, fontsize=8, weight='bold')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
fig_fp = f"{output_dir}/segmentation_pseudotime.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
args.spatial = original_spatial
def get_correlation_matrix_btw_clusters(args, sample_name, adata, method="cluster", dataset="breast_cancer"):
pred_clusters = get_clusters(args, sample_name)
uniq_clusters = np.array(np.unique(pred_clusters))
mean_exprs = []
for cid, uniq_cluster in enumerate(uniq_clusters):
ind = pred_clusters == uniq_cluster
mean_expr = adata.X[ind, :].mean(axis=0)
mean_exprs.append(mean_expr)
mean_exprs = np.array(mean_exprs).astype(float)
df = pd.DataFrame(data=mean_exprs.transpose(), columns=uniq_clusters, index=np.array(adata.var_names).astype(str))
corr_matrix = df.corr(method="pearson")
def plot_rank_marker_genes_group(args, sample_name, adata_filtered, method="cluster", dataset="breast_cancer", top_n_genes=3):
original_spatial = args.spatial
args.spatial = True
pred_clusters = get_clusters(args, sample_name)
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
adata_filtered.obs[method] = pd.Categorical(pred_clusters)
sc.tl.rank_genes_groups(adata_filtered, method, method='wilcoxon')
sc.pl.rank_genes_groups(adata_filtered, n_genes=25, ncols=5, fontsize=10, sharey=False, save=f"{sample_name}_ranks_gby_{method}.pdf")
sc.pl.rank_genes_groups_heatmap(adata_filtered, n_genes=top_n_genes, standard_scale='var', show_gene_labels=True, save=f"{sample_name}_heatmap_gby_{method}.pdf")
sc.pl.rank_genes_groups_dotplot(adata_filtered, n_genes=top_n_genes, standard_scale='var', cmap='bwr', save=f"{sample_name}_mean_expr_gby_{method}.pdf")
sc.pl.rank_genes_groups_dotplot(adata_filtered, n_genes=top_n_genes, values_to_plot="logfoldchanges", cmap='bwr', vmin=-4, vmax=4, min_logfoldchange=1.5, colorbar_title='log fold change', save=f"{sample_name}_dot_lfc_gby_{method}.pdf")
sc.pl.rank_genes_groups_matrixplot(adata_filtered, n_genes=top_n_genes, values_to_plot="logfoldchanges", cmap='bwr', vmin=-4, vmax=4, min_logfoldchange=1.5, colorbar_title='log fold change', save=f"{sample_name}_matrix_lfc_gby_{method}.pdf")
sc.pl.rank_genes_groups_matrixplot(adata_filtered, n_genes=top_n_genes, cmap='bwr', colorbar_title='Mean Expr.', save=f"{sample_name}_matrix_mean_expr_gby_{method}.pdf")
files = [f"rank_genes_groups_cluster{sample_name}_ranks_gby_{method}.pdf",
f"heatmap{sample_name}_heatmap_gby_{method}.pdf",
f"dotplot_{sample_name}_mean_expr_gby_{method}.pdf",
f"dotplot_{sample_name}_dot_lfc_gby_{method}.pdf",
f"matrixplot_{sample_name}_matrix_lfc_gby_{method}.pdf",
f"matrixplot_{sample_name}_matrix_mean_expr_gby_{method}.pdf"]
for file in files:
src_fp = f"./figures/{file}"
target_fp = f"{output_dir}/{file}"
shutil.move(src_fp, target_fp)
args.spatial = original_spatial
cluster_marker_genes_fp = f'{output_dir}/marker_genes_pval_gby_{method}.tsv'
mkdir(os.path.dirname(cluster_marker_genes_fp))
result = adata_filtered.uns['rank_genes_groups']
groups = result['names'].dtype.names
df = pd.DataFrame(
{group + '_' + key[:1]: result[key][group]
for group in groups for key in ['names', 'pvals']})
df.to_csv(cluster_marker_genes_fp, sep="\t", index=False)
def plot_expr_in_ST(args, adata, genes, sample_name, fig_name, dataset="breast_cancer", scatter_sz= 6., cm = plt.get_cmap("RdPu"), n_cols = 5, max_expr_threshold=.5):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}/ST_Expr'
mkdir(output_dir)
n_genes = len(genes)
n_rows = int(math.ceil(n_genes/n_cols))
fig, axs = figure(n_rows, n_cols, rsz=2.2, csz=3., wspace=.2, hspace=.2)
exprs = adata.X
all_genes = np.array(list(adata.var_names))
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/ST-imgs/{sample_name[0]}/{sample_name}/HE.jpg'
img = plt.imread(fp)
x, y = adata.obsm["spatial"][:, 0], adata.obsm["spatial"][:, 1]
xlim = [np.min(x), np.max(x) * 1.05]
ylim = [np.min(y) * .75, np.max(y) * 1.1]
for gid, gene in enumerate(genes):
row = gid // n_cols
col = gid % n_cols
ax = axs[row][col] if n_rows > 1 else axs[col]
expr = exprs[:, all_genes == gene]
expr = (expr - expr.mean())/expr.std()
expr_max = np.max(expr)
expr[expr > expr_max * max_expr_threshold] = expr_max * max_expr_threshold
# expr /= np.max(expr)
ax = set_ax_for_expr_plotting(ax)
# ax.imshow(img)
st = ax.scatter(x, y, s=scatter_sz, c=expr, cmap=cm)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
if col == n_cols - 1:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
clb = fig.colorbar(st, cax=cax)
clb.ax.set_ylabel("Z-score of Expr.", labelpad=10, rotation=270, fontsize=10, weight='bold')
ax.set_title(gene, fontsize=12)
fig_fp = f"{output_dir}/{fig_name}.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
def plot_expr_in_UMAP(args, adata, genes, sample_name, fig_name, dataset="breast_cancer", scatter_sz= 6., cm = plt.get_cmap("RdPu"), n_cols = 5, max_expr_threshold=.5):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
mkdir(output_dir)
n_genes = len(genes)
n_rows = int(math.ceil(n_genes/n_cols))
fig, axs = figure(n_rows, n_cols, rsz=2.2, csz=3., wspace=.2, hspace=.2)
exprs = adata.X
all_genes = np.array(list(adata.var_names))
umap_fp = f"{output_dir}/umap_embeddings_by_cluster.tsv"
df = pd.read_csv(umap_fp, sep="\t", header=0).values.astype(float)
x, y =df[:, 0], df[:, 1]
ylim = [-np.max(x) * 1.05, -np.min(x)]
xlim = [-np.max(y) * 1.1, -np.min(y) * .75]
for gid, gene in enumerate(genes):
row = gid // n_cols
col = gid % n_cols
ax = axs[row][col] if n_rows > 1 else axs[col]
expr = exprs[:, all_genes == gene]
expr = (expr - expr.mean())/expr.std()
expr_max = np.max(expr)
expr[expr > expr_max * max_expr_threshold] = expr_max * max_expr_threshold
# expr /= np.max(expr)
ax = set_ax_for_expr_plotting(ax)
# ax.imshow(img)
st = ax.scatter(-y, -x, s=scatter_sz, c=expr, cmap=cm)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
clb = fig.colorbar(st, cax=cax)
clb.ax.set_ylabel("Z-score of Expr.", labelpad=10, rotation=270, fontsize=10, weight='bold')
ax.set_title(gene, fontsize=12)
figure_out_dir = f"{output_dir}/UMAP_Expr"
mkdir(figure_out_dir)
fig_fp = f"{figure_out_dir}/{fig_name}.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
def set_ax_for_expr_plotting(ax):
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * .9, box.height])
ax.invert_yaxis()
return ax
def plot_GO_terms_w_spatial_cohr_expr(args, adata, sample_name, fig_fp, go_name, pvalue, genes_ass_go_terms, high_expr_spots_indices, dataset="breast_cancer", cm = plt.get_cmap("Reds"), scatter_sz=2):
genes = np.array(adata.var_names)
expr = adata.X[:, np.isin(genes, genes_ass_go_terms)].mean(axis=1)
avg_expr = expr.mean()
std_expr = expr.std()
fig, ax = figure(1, 1, rsz=2.8, csz=3., wspace=.2, hspace=.2)
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/ST-imgs/{sample_name[0]}/{sample_name}/HE.jpg'
img = plt.imread(fp)
x, y = adata.obsm["spatial"][:, 0], adata.obsm["spatial"][:, 1]
ax = set_ax_for_expr_plotting(ax)
ax.imshow(img)
ax.scatter(x, y, s=scatter_sz, color="#EDEDED")
standardized_expr = (expr[high_expr_spots_indices] - avg_expr)/std_expr
st = ax.scatter(x[high_expr_spots_indices], y[high_expr_spots_indices], s=scatter_sz, c=standardized_expr, cmap=cm)
# standardized_expr = (expr - avg_expr) / std_expr
# st = ax.scatter(x, y, s=scatter_sz, c=standardized_expr, cmap=cm)
xlim = [np.min(x), np.max(x) * 1.05]
ylim = [np.min(y) * .75, np.max(y) * 1.1]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
clb = fig.colorbar(st, cax=cax)
clb.ax.set_ylabel("z-score of Expr", labelpad=10, rotation=270, fontsize=10, weight='bold')
ax.set_title(f"GO:{go_name}\np=%.2e" % (pvalue), fontsize=8)
plt.savefig(fig_fp, dpi=300)
plt.close('all')
def load_high_expr_spot_indices(args, sample_name, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
high_expr_GO_out_pkl_fp = f"{output_dir}/highly_expr_go_w_spots_indices.pkl"
with open(high_expr_GO_out_pkl_fp, 'rb') as f:
indices = pickle.load(f)
return indices
def plot_cluster_specific_GO_terms_w_spatial_patterns(args, adata, sample_name, go_term_dict, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
cluster_dir = f'{output_dir}/cluster_specific_marker_genes'
clusters = get_clusters_annnotations(sample_name)
high_expr_spots_indices = load_high_expr_spot_indices(args, sample_name)
high_expr_GO_def = | pd.read_csv(f"{output_dir}/highly_expr_go_w_def.tsv", sep="\t", header=0) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.