prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from __future__ import absolute_import
#SymPy is a non-commercial alternative to Mathematica and Maple
# SymPy can map variable to a value or a matrix.
# SymPy's Symbolic Statistical Modelling uses scintific computing.
import sys
import numpy as np
import sympy as sp
import pandas as pd
from pathlib import Path
from .tokens import *
from .equation import *
class Equations(Equation):
def __init__(self):
path = Path(__file__).parent
self.filepath = path.joinpath("fixtures","equations.xlsx")
self.equations_sheet = "equations"
self.column_mapping_sheet = "col_var_mapping"
self.data_sheet = "values"
self.mappings = None
self.df = None
self.equations_df = pd.DataFrame()
self.equations = dict()
self.lhs = None
self.values = dict()
def upload_data_equations(self, filepath, equations_sheet, data_sheet, column_mapping_sheet=""):
if not self.validate_file_inputs(filepath, equations_sheet, data_sheet):
return False
self.filepath = filepath
self.equations_df = pd.read_excel(self.filepath, sheet_name=equations_sheet, mangle_dupe_cols=True)
self.df =
|
pd.read_excel(self.filepath, sheet_name=data_sheet, mangle_dupe_cols=True)
|
pandas.read_excel
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn import metrics
malware_df = []
benign_df = []
malwares = ['shade','meterpreter','poshc2']
benigns = ['google','web-browse']
for mal in malwares:
with open(f"./{mal}/spl.log") as fl:
malware_df.append(pd.read_json(fl, lines=True))
for name in benigns:
with open(f"./{name}/spl.log") as fl:
benign_df.append(pd.read_json(fl, lines=True))
malware_df =
|
pd.concat(malware_df)
|
pandas.concat
|
from pandas import Timestamp, DataFrame, concat, to_datetime
from .api_ref import kline_base
from .utls import fetch
BAR_MULTI = {
"1d": 1,
"1m": 360,
"5m": 75,
"15m": 25,
}
def historical_kline(
symbol: str,
start: Timestamp,
end: Timestamp,
period: str,
type: str = "before",
batch: int = 100
):
start_epoch = int(start.timestamp() * 1000)
end_epoch = int(end.timestamp() * 1000)
estimated_bar_num = ((end - start).days + 1) * BAR_MULTI[period]
all_dfs = []
while True:
query = f"&symbol={symbol}&begin={start_epoch}&period={period}&type={type}&count={min(batch, estimated_bar_num)}"
url = kline_base + query
raw = fetch(url)
df_tmp =
|
DataFrame(raw['item'], columns=raw['column'])
|
pandas.DataFrame
|
import torch
import numpy as np
import pandas as pd
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set_theme()
sns.set(font_scale=3, rc={'text.usetex' : False})
sns.set_theme()
sns.set_style('whitegrid')
import glob
import re
import math
import torch.optim
import torch
import argparse
#from torchvision import models, datasets, transforms
try:
from tqdm import tqdm
except:
def tqdm(x): return x
def process_epochs(epochs, dirname):
fig = plt.figure()
columns = pd.Index(range(0, len(epochs)), name='layer')
df = pd.DataFrame(epochs, index=['epoch'], columns=columns)
df = df.melt()
s = df.plot(x='layer', y='value', kind='scatter', ylabel='epoch')
s.set(ylabel="epoch")
plt.savefig(fname=os.path.join(dirname, 'epochs.pdf'))
return
def select_min(df):
"""Select the test with the minimal error (usually 0)"""
Idx = pd.IndexSlice
df_min = None
n_layers = len(df.columns.levels[0])
#columns = df.columns.name
indices = np.zeros(n_layers, dtype=int)
for idx in range(n_layers):
# replace NaN with 0
val_min = df.loc[:, (idx, 'error')].min()
mask = df.loc[:, (idx, 'error')] == val_min
indices[idx] = df.loc[mask, (idx, 'loss')].idxmin() # if several min, take the min of them
# the indices for the try that has the minimum training
# error at the epoch epoch
# remove the column index 'try'
cols = pd.MultiIndex.from_product(df.columns.levels, names=df.columns.names) # all but the try
df_min =
|
pd.DataFrame(columns=cols, index=[1])
|
pandas.DataFrame
|
# Databricks notebook source
slides_html="""
<iframe src="https://docs.google.com/presentation/d/1wNQaCy5drc7C5R-bZWp-PA7GLef09SAOr7jCs18uibg/embed?start=true&loop=true&delayms=4000" frameborder="0" width="900" height="560" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>
"""
displayHTML(slides_html)
# COMMAND ----------
# MAGIC %md
# MAGIC #Abstracting Real World Data from Oncology Notes: Entity Extraction
# MAGIC [MT ONCOLOGY NOTES](https://www.mtsamplereports.com/) comprises of millions of ehr records of patients. It contains semi-structured data like demographics, insurance details, and a lot more, but most importantly, it also contains free-text data like real encounters and notes.
# MAGIC Here we show how to use Spark NLP's existing models to process raw text and extract highly specialized cancer information that can be used for various downstream use cases, including:
# MAGIC - Staff demand analysis according to specialties.
# MAGIC - Preparing reimbursement-ready data with billable codes.
# MAGIC - Analysis of risk factors of patients and symptoms.
# MAGIC - Analysis of cancer disease and symptoms.
# MAGIC - Drug usage analysis for inventory management.
# MAGIC - Preparing timeline of procedures.
# MAGIC - Relations between internal body part and procedures.
# MAGIC - Analysis of procedures used on oncological events.
# MAGIC - Checking assertion status of oncological findings.
# COMMAND ----------
# MAGIC %md
# MAGIC #0. Initial configurations
# COMMAND ----------
import os
import json
import string
import numpy as np
import pandas as pd
import sparknlp
import sparknlp_jsl
from sparknlp.base import *
from sparknlp.util import *
from sparknlp.annotator import *
from sparknlp_jsl.base import *
from sparknlp_jsl.annotator import *
from sparknlp.pretrained import ResourceDownloader
from pyspark.sql import functions as F
from pyspark.ml import Pipeline, PipelineModel
from sparknlp.training import CoNLL
pd.set_option('max_colwidth', 100)
pd.set_option('display.max_columns', 100)
pd.set_option('display.expand_frame_repr', False)
print('sparknlp.version : ',sparknlp.version())
print('sparknlp_jsl.version : ',sparknlp_jsl.version())
spark
# COMMAND ----------
# MAGIC %md
# MAGIC ## Download oncology notes
# MAGIC
# MAGIC In this notebook we will use the transcribed medical reports in [www.mtsamples.com](www.mtsamples.com).
# MAGIC
# MAGIC You can download those reports by the script [here](https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/databricks/python/healthcare_case_studies/mt_scrapper.py).
# MAGIC
# MAGIC We will use slightly modified version of some clinical notes which are downloaded from [www.mtsamples.com](www.mtsamples.com).
# COMMAND ----------
notes_path='/FileStore/HLS/nlp/data/'
delta_path='/FileStore/HLS/nlp/delta/jsl/'
# COMMAND ----------
dbutils.fs.mkdirs(notes_path)
os.environ['notes_path']=f'/dbfs{notes_path}'
# COMMAND ----------
# MAGIC %sh
# MAGIC cd $notes_path
# MAGIC wget https://hls-eng-data-public.s3.amazonaws.com/data/mt_onc_50.zip
# MAGIC unzip mt_onc_50.zip
# COMMAND ----------
dbutils.fs.ls(f'{notes_path}/mt_onc_50/')
# COMMAND ----------
# MAGIC %md
# MAGIC ## Read Data and Write to Bronze Delta Layer
# MAGIC
# MAGIC There are 50 clinical notes stored in delta table. We read the data nd write the raw notes data into bronze delta tables
# COMMAND ----------
df = sc.wholeTextFiles(f'{notes_path}/mt_onc_50/').toDF().withColumnRenamed('_1','path').withColumnRenamed('_2','text')
display(df.limit(5))
# COMMAND ----------
df.count()
# COMMAND ----------
df.write.format('delta').mode('overwrite').save(f'{delta_path}/bronze/mt-oc-notes')
display(dbutils.fs.ls(f'{delta_path}/bronze/mt-oc-notes'))
# COMMAND ----------
sample_text = df.limit(1).select("text").collect()[0]
# COMMAND ----------
# MAGIC %md
# MAGIC ## Setup initial NLP pipelines and stages
# MAGIC First let's define all stages that are common among all downstream pipelines
# COMMAND ----------
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
documentAssemblerResolver = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("ner_chunks")
sentenceDetector = SentenceDetectorDLModel.pretrained("sentence_detector_dl_healthcare","en","clinical/models") \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer()\
.setInputCols(["sentence"])\
.setOutputCol("token")
word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\
.setInputCols(["sentence", "token"])\
.setOutputCol("embeddings")
# COMMAND ----------
base_stages = [
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings
]
# COMMAND ----------
# MAGIC %md
# MAGIC ## Vizualize the Entities Using Spark NLP Display Library
# COMMAND ----------
# MAGIC %md
# MAGIC At first, we will create a NER pipeline. And then, we can see the labbeled entities on text.
# COMMAND ----------
# Cancer
bionlp_ner = MedicalNerModel.pretrained("ner_bionlp", "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("bionlp_ner")\
.setBatchSize(128)\
.setIncludeConfidence(False)
bionlp_ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "bionlp_ner"]) \
.setOutputCol("bionlp_ner_chunk")\
.setWhiteList(["Cancer"])
# Clinical Terminology
jsl_ner = MedicalNerModel.pretrained("jsl_ner_wip_clinical", "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("jsl_ner")\
.setBatchSize(128)\
.setIncludeConfidence(False)
jsl_ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "jsl_ner"]) \
.setOutputCol("jsl_ner_chunk")\
.setWhiteList(["Oncological", "Symptom", "Treatment"])
# COMMAND ----------
# MAGIC %md
# MAGIC We used two diferent NER models (`jsl_ner_wip_clinical` and `bionlp_ner`) and we need to merge them by a chunk merger. There are two different entities related to oncology. So we will change `Cancer` entities to `Oncological` by `setReplaceDictResource` parameter. This parameter gets the list from a csv file. Before merging the entities, we are creating the csv file with a row `Cancer,Oncological`.
# COMMAND ----------
dbutils.fs.put('/tmp/replace_dict.csv','Cancer,Oncological',overwrite=True)
chunk_merger = ChunkMergeApproach()\
.setInputCols("bionlp_ner_chunk","jsl_ner_chunk")\
.setOutputCol("final_ner_chunk")\
.setReplaceDictResource('/tmp/replace_dict.csv',"text", {"delimiter":","})
ner_pipeline= Pipeline(
stages = base_stages+[
bionlp_ner,
bionlp_ner_converter,
jsl_ner,
jsl_ner_converter,
chunk_merger]
)
empty_data = spark.createDataFrame([['']]).toDF("text")
ner_model = ner_pipeline.fit(empty_data)
# COMMAND ----------
# MAGIC %md
# MAGIC Now we will visualize a sample text with `NerVisualizer`. Since `NerVisualizer` woks with Lightpipeline, so we will create a `light_model` with our `ner_model_model`.
# COMMAND ----------
light_model = LightPipeline(ner_model)
ann_text = light_model.fullAnnotate(sample_text)[0]
ann_text.keys()
# COMMAND ----------
from sparknlp_display import NerVisualizer
visualiser = NerVisualizer()
# Change color of an entity label
visualiser.set_label_colors({'ONCOLOGICAL':'#ff2e51', 'TREATMENT': '#3bdeff', 'SYMPTOM': '#00ff40' })
ner_vis = visualiser.display(ann_text, label_col='final_ner_chunk',return_html=True)
displayHTML(ner_vis)
# COMMAND ----------
# MAGIC %md
# MAGIC # 1. ICD-10 code extraction
# MAGIC In this step we get ICD-10 codes using entity resolvers and use the data for various use cases.
# MAGIC We can use `hcc_billable` entity resolver to get ICD10-CM codes for identified entities. The unique this about this resolver is it also provides HCC risk factor and billable status for each ICD code. We can use this information for a lot of tasks.
# COMMAND ----------
# MAGIC %md
# MAGIC Now we will transform our dataframe by using `ner_model` that we already created, and then we will get the `ner_chunks` into a list to use for the resolver LightPipeline.
# COMMAND ----------
ner_res = ner_model.transform(df)
# COMMAND ----------
# MAGIC %md
# MAGIC Optionally we can also store `ner_res` data into the broze delta laeyer for future accesibility
# COMMAND ----------
ner_res.repartition('path').write.format('delta').mode('overwrite').save(f'{delta_path}/bronze/ner-res-notes')
# COMMAND ----------
ner_pdf = ner_res.select("path", F.explode(F.arrays_zip('final_ner_chunk.result',
'final_ner_chunk.metadata')).alias("cols"))\
.select("path", F.expr("cols['0']").alias("final_chunk"),
F.expr("cols['1']['entity']").alias("entity"))\
.toPandas()
ner_chunks = list(ner_pdf.final_chunk)
display(ner_pdf)
# COMMAND ----------
# MAGIC %md
# MAGIC We are creating resolver PipelineModel with `document_assembler`, `sbert_jsl_medium_uncased` embedding and `sbertresolve_icd10cm_slim_billable_hcc_med` resolver.
# COMMAND ----------
sbert_embedder = BertSentenceEmbeddings.pretrained("sbert_jsl_medium_uncased", 'en', 'clinical/models')\
.setInputCols(["ner_chunks"])\
.setOutputCol("sentence_embeddings")
icd10_resolver = SentenceEntityResolverModel.pretrained("sbertresolve_icd10cm_slim_billable_hcc_med","en", "clinical/models")\
.setInputCols(["ner_chunks", "sentence_embeddings"]) \
.setOutputCol("icd10_code")\
.setDistanceFunction("EUCLIDEAN")
icd_pipelineModel = PipelineModel(stages=[
documentAssemblerResolver,
sbert_embedder,
icd10_resolver
])
# COMMAND ----------
icd10_hcc_lp = LightPipeline(icd_pipelineModel)
icd10_hcc_result = icd10_hcc_lp.fullAnnotate(ner_chunks)
# COMMAND ----------
# MAGIC %md
# MAGIC Now we will create a pandas dataframe to show the results obviously. We will walk on the `icd10_hcc_result` line by line and take icd10 code (`icd10_code`), confidence levels (`confidence`), all possible codes (`all_k_results`), resolutions of the all possible codes (`all_k_resolutions`) and HCC details (`all_k_aux_labels`) of the icd10 code.
# COMMAND ----------
tuples = []
for i in range(len(icd10_hcc_result)):
for x,y in zip(icd10_hcc_result[i]["ner_chunks"], icd10_hcc_result[i]["icd10_code"]):
tuples.append((ner_pdf.path.iloc[i],x.result, ner_pdf.entity.iloc[i], y.result, y.metadata["confidence"], y.metadata["all_k_results"], y.metadata["all_k_resolutions"], y.metadata["all_k_aux_labels"]))
icd10_hcc_pdf = pd.DataFrame(tuples, columns=["path", "final_chunk", "entity", "icd10_code", "confidence", "all_codes", "resolutions", "hcc_list"])
codes = []
resolutions = []
hcc_all = []
for code, resolution, hcc in zip(icd10_hcc_pdf['all_codes'], icd10_hcc_pdf['resolutions'], icd10_hcc_pdf['hcc_list']):
codes.append( code.split(':::'))
resolutions.append(resolution.split(':::'))
hcc_all.append(hcc.split(":::"))
icd10_hcc_pdf['all_codes'] = codes
icd10_hcc_pdf['resolutions'] = resolutions
icd10_hcc_pdf['hcc_list'] = hcc_all
# COMMAND ----------
# MAGIC %md
# MAGIC The values in `billable`, `hcc_store` and `hcc_status` columns are seperated by `||` and we will change them to a list.
# COMMAND ----------
def extract_billable(bil):
billable = []
status = []
score = []
for b in bil:
billable.append(b.split("||")[0])
status.append(b.split("||")[1])
score.append(b.split("||")[2])
return (billable, status, score)
icd10_hcc_pdf["hcc_status"] = icd10_hcc_pdf["hcc_list"].apply(extract_billable).apply(pd.Series).iloc[:,1]
icd10_hcc_pdf["hcc_score"] = icd10_hcc_pdf["hcc_list"].apply(extract_billable).apply(pd.Series).iloc[:,2]
icd10_hcc_pdf["billable"] = icd10_hcc_pdf["hcc_list"].apply(extract_billable).apply(pd.Series).iloc[:,0]
icd10_hcc_pdf.drop("hcc_list", axis=1, inplace= True)
icd10_hcc_pdf['icd_codes_names'] = icd10_hcc_pdf['resolutions'].apply(lambda x : x[0].split("[")[0])
icd10_hcc_pdf['icd_code_billable'] = icd10_hcc_pdf['billable'].apply(lambda x : x[0])
# COMMAND ----------
# MAGIC %md
# MAGIC ### Write `icd10_hcc_df` to Delta
# MAGIC Now we proceed to write resolved ICD10 codes which also contain information regarding HCC status corresponding to each code as one of the silver delta tables in our clinical lakehouse
# COMMAND ----------
icd10_hcc_df = spark.createDataFrame(icd10_hcc_pdf)
icd10_hcc_df.write.format('delta').mode('overwrite').save(f'{delta_path}/silver/icd10-hcc-df')
# COMMAND ----------
display(icd10_hcc_df.limit(10))
# COMMAND ----------
# MAGIC %md
# MAGIC ## Preparing reimbursement-ready data with billable codes
# MAGIC
# MAGIC Here, we will check how many of the ICD codes are billable.
# COMMAND ----------
print(icd10_hcc_pdf['icd_code_billable'].value_counts())
# COMMAND ----------
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(3,4), dpi=200)
plt.pie(icd10_hcc_pdf['icd_code_billable'].value_counts(),
labels = ["billable", "not billable"],
autopct = "%1.1f%%"
)
plt.title("Ratio Billable & Non-billable Codes", size=10)
plt.show()
# COMMAND ----------
# MAGIC %md
# MAGIC As we can see, some of the best matching codes are not billable. For such indications we can find codes that are relevant as well as billable
# COMMAND ----------
icd10_oncology_mapping = {"C81-C96": "Malignant neoplasms of lymphoid, hematopoietic and related tissue",
"C76-C80": "Malignant neoplasms of ill-defined, other secondary and unspecified sites",
"D00-D09": "In situ neoplasms",
"C51-C58": "Malignant neoplasms of female genital organs",
"C43-C44": "Melanoma and other malignant neoplasms of skin",
"C15-C26": "Malignant neoplasms of digestive organs",
"C73-C75": "Malignant neoplasms of thyroid and other endocrine glands",
"D60-D64": "Aplastic and other anemias and other bone marrow failure syndromes",
"E70-E88": "Metabolic disorders",
"G89-G99": "Other disorders of the nervous system",
"R50-R69": "General symptoms and signs",
"R10-R19": "Symptoms and signs involving the digestive system and abdomen",
"Z00-Z13": "Persons encountering health services for examinations"}
def map_to_parent(x):
charcode = x[0].lower()
numcodes = int(x[1])
for k, v in icd10_oncology_mapping.items():
lower, upper = k.split('-')
if charcode >= lower[0].lower() and numcodes >= int(lower[1]):
if charcode < upper[0].lower():
return v
elif charcode == upper[0].lower() and numcodes <= int(upper[1]):
return v
# COMMAND ----------
icd10_hcc_pdf["onc_code_desc"] = icd10_hcc_pdf["icd10_code"].apply(map_to_parent).fillna("-")
# COMMAND ----------
best_paid_icd_matches = []
indication_with_no_billable_icd = []
for i_, row in icd10_hcc_pdf.iterrows():
if '1' not in row['billable']:
indication_with_no_billable_icd.append([row['final_chunk'],
row['resolutions'][0],
row['all_codes'][0],
row['billable'][0],
row['hcc_score'][0],
row['onc_code_desc'],
"-" ])
else:
n_zero_ind = list(row['billable']).index('1')
best_paid_icd_matches.append([row['final_chunk'],
row['resolutions'][n_zero_ind],
row['all_codes'][n_zero_ind],
row['billable'][n_zero_ind],
row['hcc_score'][n_zero_ind],
row['onc_code_desc'],
n_zero_ind])
best_icd_mapped_pdf = pd.DataFrame(best_paid_icd_matches, columns=['ner_chunk', 'code_desc', 'code' , 'billable',
'corresponding_hcc_score', 'onc_code_desc', 'nearest_billable_code_pos'])
best_icd_mapped_pdf['corresponding_hcc_score'] = pd.to_numeric(best_icd_mapped_pdf['corresponding_hcc_score'], errors='coerce')
best_icd_mapped_pdf.head()
# COMMAND ----------
# MAGIC %md
# MAGIC **All chunks have been mapped to payable ICD codes**
# COMMAND ----------
print(best_icd_mapped_pdf.billable.value_counts())
# COMMAND ----------
# MAGIC %md
# MAGIC ### Write `best_icd_mapped_df` to Delta
# MAGIC Now we can write the reimbursement-ready data with billable codes into a gold delta layer, which can be accessed for reporting and BI
# COMMAND ----------
best_icd_mapped_df = spark.createDataFrame(best_icd_mapped_pdf)
best_icd_mapped_df.write.format('delta').mode('overwrite').save(f'{delta_path}/gold/best-icd-mapped')
# COMMAND ----------
display(best_icd_mapped_df.limit(10))
# COMMAND ----------
# MAGIC %md
# MAGIC # 2. Get Drug codes from the notes
# MAGIC
# MAGIC We will create a new pipeline to get drug codes. As NER model, we are using `ner_posology_large` and setting NerConverter's WhiteList `['DRUG']` in order to get only drug entities.
# COMMAND ----------
## to get drugs
drugs_ner_ing = MedicalNerModel.pretrained("ner_posology_large", "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("ner_drug")\
.setIncludeConfidence(False)
drugs_ner_converter_ing = NerConverter() \
.setInputCols(["sentence", "token", "ner_drug"]) \
.setOutputCol("ner_chunk")\
.setWhiteList(["DRUG"])
pipeline_rxnorm_ingredient = Pipeline(
stages = [
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings,
drugs_ner_ing,
drugs_ner_converter_ing])
data_ner = spark.createDataFrame([['']]).toDF("text")
rxnorm_ner_model = pipeline_rxnorm_ingredient.fit(data_ner)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Visualize Drug Entities
# MAGIC
# MAGIC Now we will visualize a sample text with `NerVisualizer`.
# COMMAND ----------
# MAGIC %md
# MAGIC `NerVisualizer` woks with LightPipeline, so we will create a `rxnorm_lp` with our `rxnorm_model`.
# COMMAND ----------
rxnorm_ner_lp = LightPipeline(rxnorm_ner_model)
ann_text = rxnorm_ner_lp.fullAnnotate(sample_text)[0]
print(ann_text.keys())
# COMMAND ----------
#Creating the vizualizer
from sparknlp_display import NerVisualizer
visualiser = NerVisualizer()
# Change color of an entity label
visualiser.set_label_colors({'DRUG':'#008080'})
ner_vis = visualiser.display(ann_text, label_col='ner_chunk',return_html=True)
#Displaying the vizualizer
displayHTML(ner_vis)
# COMMAND ----------
# MAGIC %md
# MAGIC Now we will take rxnorm ner_chunks into a list for using in resolver pipeline
# COMMAND ----------
rxnorm_code_res_df = rxnorm_ner_lp.transform(df)
# COMMAND ----------
rxnorm_code_res_pdf = rxnorm_code_res_df.select("path", F.explode(F.arrays_zip('ner_chunk.result',
'ner_chunk.metadata')).alias("cols"))\
.select("path", F.expr("cols['0']").alias("ner_chunk"),
F.expr("cols['1']['entity']").alias("entity")).toPandas()
rxnorm_ner_chunks = list(rxnorm_code_res_pdf.ner_chunk)
# COMMAND ----------
# MAGIC %md
# MAGIC We will create our resolver pipeline and get rxnorm codes of these ner_chunks.
# COMMAND ----------
sbert_embedder = BertSentenceEmbeddings.pretrained("sbiobert_base_cased_mli", 'en', 'clinical/models')\
.setInputCols(["ner_chunks"])\
.setOutputCol("sentence_embeddings")
rxnorm_resolver = SentenceEntityResolverModel.pretrained("sbiobertresolve_rxnorm","en", "clinical/models")\
.setInputCols(["ner_chunks", "sentence_embeddings"]) \
.setOutputCol("rxnorm_code")\
.setDistanceFunction("EUCLIDEAN")
rxnorm_pipelineModel = PipelineModel(stages=[
documentAssemblerResolver,
sbert_embedder,
rxnorm_resolver
])
# COMMAND ----------
rxnorm_resolver_lp = LightPipeline(rxnorm_pipelineModel)
# COMMAND ----------
rxnorm_code_res = rxnorm_resolver_lp.fullAnnotate(rxnorm_ner_chunks)
# COMMAND ----------
# MAGIC %md
# MAGIC We are selecting the columns which we need and convert to Pandas DataFrame. The values in `all_codes` and `resolitions` columns are seperated by ":::" and we are converting these columns to lists.
# COMMAND ----------
tuples = []
for i in range(len(rxnorm_code_res)):
for x,y in zip(rxnorm_code_res[i]["ner_chunks"], rxnorm_code_res[i]["rxnorm_code"]):
tuples.append((rxnorm_code_res_pdf.path.iloc[i],x.result, y.result, y.metadata["confidence"], y.metadata["all_k_results"], y.metadata["all_k_resolutions"]))
rxnorm_res_cleaned_pdf = pd.DataFrame(tuples, columns=["path", "drug_chunk", "rxnorm_code", "confidence", "all_codes", "resolutions"])
codes = []
resolutions = []
for code, resolution in zip(rxnorm_res_cleaned_pdf['all_codes'], rxnorm_res_cleaned_pdf['resolutions']):
codes.append(code.split(':::'))
resolutions.append(resolution.split(':::'))
rxnorm_res_cleaned_pdf['all_codes'] = codes
rxnorm_res_cleaned_pdf['resolutions'] = resolutions
rxnorm_res_cleaned_pdf['drugs'] = rxnorm_res_cleaned_pdf['resolutions'].apply(lambda x : x[0])
# COMMAND ----------
display(rxnorm_res_cleaned_pdf)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Write `rxnorm_res_cleaned_df `to Delta
# COMMAND ----------
rxnorm_res_cleaned_df = spark.createDataFrame(rxnorm_res_cleaned_pdf)
rxnorm_res_cleaned_df.write.format('delta').mode('overwrite').save(f'{delta_path}/gold/rxnorm-res-cleaned')
# COMMAND ----------
display(rxnorm_res_cleaned_df.limit(10))
# COMMAND ----------
# MAGIC %md
# MAGIC Checking all posology entities `DRUG`, `FREQUENCY`, `DURATION`, `STRENGTH`, `FORM`, `DOSAGE` and `ROUTE` and their RXNORM Code by using `ner_posology_greedy` model without WhiteList.
# COMMAND ----------
## to get drugs
drugs_ner_greedy = MedicalNerModel.pretrained("ner_posology_greedy", "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("ner_drug")\
.setIncludeConfidence(False)
drugs_ner_converter_ing = NerConverter() \
.setInputCols(["sentence", "token", "ner_drug"]) \
.setOutputCol("ner_chunk")
pipeline_rxnorm_greedy = Pipeline(
stages = [
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings,
drugs_ner_greedy,
drugs_ner_converter_ing])
data_ner = spark.createDataFrame([['']]).toDF("text")
rxnorm_model_greedy = pipeline_rxnorm_greedy.fit(data_ner)
# COMMAND ----------
# MAGIC %md
# MAGIC Visualize Greedy Algorithm Entities
# COMMAND ----------
# sample_text = df.limit(1).select("text").collect()[0]
rxnorm_greedy_lp = LightPipeline(rxnorm_model_greedy)
ann_text = rxnorm_greedy_lp.fullAnnotate(sample_text)[0]
print(ann_text.keys())
# COMMAND ----------
#Creating the vizualizer
from sparknlp_display import NerVisualizer
visualiser = NerVisualizer()
# Change color of DRUG entity label
visualiser.set_label_colors({'DRUG':'#008080'})
ner_vis = visualiser.display(ann_text, label_col='ner_chunk',return_html=True)
#Displaying the vizualizer
displayHTML(ner_vis)
# COMMAND ----------
# MAGIC %md
# MAGIC Lets take our greedy chunks into rxnorm resolver to see what will change
# COMMAND ----------
rxnorm_code_greedy_res_df = rxnorm_model_greedy.transform(df)
# COMMAND ----------
rxnorm_code_greedy_res_pdf = rxnorm_code_greedy_res_df.select("path", F.explode(F.arrays_zip('ner_chunk.result',
'ner_chunk.metadata')).alias("cols"))\
.select("path", F.expr("cols['0']").alias("ner_chunk"),
F.expr("cols['1']['entity']").alias("entity")).toPandas()
rxnorm_ner_greedy_chunks = list(rxnorm_code_greedy_res_pdf.ner_chunk)
# COMMAND ----------
sbert_embedder = BertSentenceEmbeddings.pretrained("sbiobert_base_cased_mli", 'en', 'clinical/models')\
.setInputCols(["ner_chunks"])\
.setOutputCol("sentence_embeddings")
rxnorm_resolver = SentenceEntityResolverModel.pretrained("sbiobertresolve_rxnorm","en", "clinical/models")\
.setInputCols(["ner_chunks", "sentence_embeddings"]) \
.setOutputCol("rxnorm_code")\
.setDistanceFunction("EUCLIDEAN")
rxnorm_greedy_pipelineModel = PipelineModel(stages=[
documentAssemblerResolver,
sbert_embedder,
rxnorm_resolver
])
# COMMAND ----------
rxnorm_greedy_lp = LightPipeline(rxnorm_greedy_pipelineModel)
# COMMAND ----------
rxnorm_greedy_code_res = rxnorm_greedy_lp.fullAnnotate(rxnorm_ner_greedy_chunks)
# COMMAND ----------
tuples = []
for i in range(len(rxnorm_greedy_code_res)):
for x,y in zip(rxnorm_greedy_code_res[i]["ner_chunks"], rxnorm_greedy_code_res[i]["rxnorm_code"]):
tuples.append((rxnorm_code_greedy_res_pdf.path.iloc[i],x.result, y.result, y.metadata["confidence"], y.metadata["all_k_results"], y.metadata["all_k_resolutions"]))
rxnorm_greedy_res =
|
pd.DataFrame(tuples, columns=["path", "drug_chunk", "rxnorm_code", "confidence", "all_codes", "resolutions"])
|
pandas.DataFrame
|
"""Module providing functions to plot data collected during sleep studies."""
import datetime
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as mticks
import pandas as pd
import seaborn as sns
from fau_colors import colors_all
from biopsykit.utils.datatype_helper import AccDataFrame, GyrDataFrame, ImuDataFrame, SleepEndpointDict
_sleep_imu_plot_params = {
"background_color": ["#e0e0e0", "#9e9e9e"],
"background_alpha": [0.3, 0.3],
}
_bbox_default = dict(
fc=(1, 1, 1, plt.rcParams["legend.framealpha"]),
ec=plt.rcParams["legend.edgecolor"],
boxstyle="round",
)
def sleep_imu_plot(
data: Union[AccDataFrame, GyrDataFrame, ImuDataFrame],
datastreams: Optional[Union[str, Sequence[str]]] = None,
sleep_endpoints: Optional[SleepEndpointDict] = None,
downsample_factor: Optional[int] = None,
**kwargs,
) -> Tuple[plt.Figure, Iterable[plt.Axes]]:
"""Draw plot to visualize IMU data during sleep, and, optionally, add sleep endpoints information.
Parameters
----------
data : :class:`~pandas.DataFrame`
data to plot. Data must either be acceleration data (:obj:`~biopsykit.utils.datatype_helper.AccDataFrame`),
gyroscope data (:obj:`~biopsykit.utils.datatype_helper.GyrDataFrame`), or IMU data
(:obj:`~biopsykit.utils.datatype_helper.ImuDataFrame`).
datastreams : str or list of str, optional
list of datastreams indicating which type of data should be plotted or ``None`` to only plot acceleration data.
If more than one type of datastream is specified each datastream is plotted row-wise in its own subplot.
Default: ``None``
sleep_endpoints : :obj:`~biopsykit.utils.datatype_helper.SleepEndpointDict`
dictionary with sleep endpoints to add to plot or ``None`` to only plot IMU data.
downsample_factor : int, optional
downsample factor to apply to raw input data before plotting or ``None`` to not downsample data before
plotting (downsample factor 1). Default: ``None``
**kwargs
optional arguments for plot configuration.
To configure which type of sleep endpoint annotations to plot:
* ``plot_sleep_onset``: whether to plot sleep onset annotations or not: Default: ``True``
* ``plot_wake_onset``: whether to plot wake onset annotations or not: Default: ``True``
* ``plot_bed_start``: whether to plot bed interval start annotations or not: Default: ``True``
* ``plot_bed_end``: whether to plot bed interval end annotations or not: Default: ``True``
* ``plot_sleep_wake``: whether to plot vspans of detected sleep/wake phases or not: Default: ``True``
To style general plot appearance:
* ``axs``: pre-existing axes for the plot. Otherwise, a new figure and axes objects are created and
returned.
* ``figsize``: tuple specifying figure dimensions
* ``palette``: color palette to plot different axes from input data
To style axes:
* ``xlabel``: label of x axis. Default: "Time"
* ``ylabel``: label of y axis. Default: "Acceleration :math:`[m/s^2]`" for acceleration data and
"Angular Velocity :math:`[°/s]`" for gyroscope data
To style legend:
* ``legend_loc``: location of legend. Default: "lower left"
* ``legend_fontsize``: font size of legend labels. Default: "smaller"
Returns
-------
fig : :class:`~matplotlib.figure.Figure`
figure object
axs : list of :class:`~matplotlib.axes.Axes`
list of subplot axes objects
"""
axs: List[plt.Axes] = kwargs.pop("ax", kwargs.pop("axs", None))
sns.set_palette(kwargs.get("palette", sns.light_palette(getattr(colors_all, "fau"), n_colors=4, reverse=True)[:-1]))
if datastreams is None:
datastreams = ["acc"]
if isinstance(datastreams, str):
# ensure list
datastreams = [datastreams]
fig, axs = _sleep_imu_plot_get_fig_axs(axs, len(datastreams), **kwargs)
downsample_factor = _sleep_imu_plot_get_downsample_factor(downsample_factor)
if len(datastreams) != len(axs):
raise ValueError(
"Number of datastreams to be plotted must match number of provided subplots! Expected {}, got {}.".format(
len(datastreams), len(axs)
)
)
for ax, ds in zip(axs, datastreams):
_sleep_imu_plot(
data=data,
datastream=ds,
downsample_factor=downsample_factor,
sleep_endpoints=sleep_endpoints,
ax=ax,
**kwargs,
)
fig.tight_layout()
fig.autofmt_xdate(rotation=0, ha="center")
return fig, axs
def _sleep_imu_plot_get_fig_axs(axs: List[plt.Axes], nrows: int, **kwargs):
figsize = kwargs.get("figsize", None)
if isinstance(axs, plt.Axes):
# ensure list (if only one Axes object is passed to sleep_imu_plot() instead of a list of Axes objects)
axs = [axs]
if axs is None:
fig, axs = plt.subplots(figsize=figsize, nrows=nrows)
else:
fig = axs[0].get_figure()
if isinstance(axs, plt.Axes):
# ensure list (if nrows == 1 only one axes object will be created, not a list of axes)
axs = [axs]
return fig, axs
def _sleep_imu_plot_get_downsample_factor(downsample_factor: int):
if downsample_factor is None:
downsample_factor = 1
# ensure int
downsample_factor = int(downsample_factor)
if downsample_factor < 1:
raise ValueError("'downsample_factor' must be >= 1!")
return downsample_factor
def _sleep_imu_plot(
data: pd.DataFrame,
datastream: str,
downsample_factor: int,
sleep_endpoints: SleepEndpointDict,
ax: plt.Axes,
**kwargs,
):
legend_loc = kwargs.get("legend_loc", "lower left")
legend_fontsize = kwargs.get("legend_fontsize", "smaller")
ylabel = kwargs.get("ylabel", {"acc": "Acceleration [$m/s^2$]", "gyr": "Angular Velocity [$°/s$]"})
xlabel = kwargs.get("xlabel", "Time")
if isinstance(data.index, pd.DatetimeIndex):
plt.rcParams["timezone"] = data.index.tz.zone
data_plot = data.filter(like=datastream)[::downsample_factor]
data_plot.plot(ax=ax)
if sleep_endpoints is not None:
kwargs.setdefault("ax", ax)
_sleep_imu_plot_add_sleep_endpoints(sleep_endpoints=sleep_endpoints, **kwargs)
if isinstance(data_plot.index, pd.DatetimeIndex):
# TODO add axis style for non-Datetime axes
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S"))
ax.xaxis.set_minor_locator(mticks.AutoMinorLocator(6))
ax.set_ylabel(ylabel[datastream])
ax.set_xlabel(xlabel)
ax.legend(loc=legend_loc, fontsize=legend_fontsize, framealpha=1.0)
def _sleep_imu_plot_add_sleep_endpoints(sleep_endpoints: SleepEndpointDict, **kwargs):
bed_start = pd.to_datetime(sleep_endpoints["bed_interval_start"])
bed_end = pd.to_datetime(sleep_endpoints["bed_interval_end"])
sleep_onset = pd.to_datetime(sleep_endpoints["sleep_onset"])
wake_onset =
|
pd.to_datetime(sleep_endpoints["wake_onset"])
|
pandas.to_datetime
|
import logging
import itertools
import numpy
import pandas
def trade_pair(pair_code, bid, ask, volume):
"""
Computes the balance after the operation takes place.
Example:
XXLMXXBT 38092.21 0.000008210 0.000008340 121.618 --> With a volume of 1 we go long 0.000008210 XXBT and short 1 XXLM
:param pair_code:
:param bid:
:param ask:
:param volume:
:return:
"""
currency_first = pair_code[:4]
currency_second = pair_code[4:]
balance = {currency_first: 0, currency_second: 0}
trade = None
if volume > 0:
allowed_volume = min(volume, bid['volume'])
capped = numpy.NaN
if allowed_volume < volume:
capped = allowed_volume
balance = {currency_first: allowed_volume * -1, currency_second: allowed_volume * bid['price']}
trade = {'direction': 'buy', 'pair': pair_code, 'quantity': allowed_volume, 'price': bid['price'],
'capped': capped}
elif volume < 0:
allowed_volume = min(abs(volume), ask['volume'])
capped = numpy.NaN
if allowed_volume < abs(volume):
capped = allowed_volume
balance = {currency_first: allowed_volume, currency_second: allowed_volume * ask['price'] * -1}
trade = {'direction': 'sell', 'pair': pair_code, 'quantity': allowed_volume, 'price': ask['price'],
'capped': capped}
return balance, trade
def buy_currency_using_pair(currency, volume, pair_code, bid, ask):
"""
:param currency:
:param volume: amount to buy denominated in currency
:param pair_code:
:param bid:
:param ask:
:return:
"""
logging.info('buying {} {} using {}'.format(volume, currency, pair_code))
if pair_code[4:] == currency:
# Direct quotation
logging.debug('direct quotation')
target_volume = volume / bid['price']
balance, performed_trade = trade_pair(pair_code, bid, ask, round(target_volume, 10))
else:
# Indirect quotation
logging.debug('indirect quotation')
balance, performed_trade = trade_pair(pair_code, bid, ask, volume * -1)
return balance, performed_trade
def sell_currency_using_pair(currency, volume, pair_code, bid, ask):
"""
:param currency:
:param volume: amount to buy denominated in currency
:param pair_code:
:param bid:
:param ask:
:return:
"""
logging.info('selling {} {} using {}'.format(volume, currency, pair_code))
if pair_code[4:] == currency:
# Direct quotation
logging.debug('direct quotation')
target_volume = -1 * volume / ask['price']
balance, performed_trade = trade_pair(pair_code, bid, ask, round(target_volume, 10))
else:
# Indirect quotation
logging.debug('indirect quotation')
balance, performed_trade = trade_pair(pair_code, bid, ask, volume)
return balance, performed_trade
def calculate_arbitrage_opportunity(pair_1, pair_bid_1, pair_ask_1, pair_2, pair_bid_2, pair_ask_2, pair_3, pair_bid_3,
pair_ask_3, skip_capped=True):
"""
:param pair_1:
:param pair_bid_1:
:param pair_ask_1:
:param pair_2:
:param pair_bid_2:
:param pair_ask_2:
:param pair_3:
:param pair_bid_3:
:param pair_ask_3:
:param skip_capped:
:return: (trades, balances)
"""
pairs = [pair_1, pair_2, pair_3]
pair_bids = [pair_bid_1, pair_bid_2, pair_bid_3]
pair_asks = [pair_ask_1, pair_ask_2, pair_ask_3]
results = list()
for first, second, third in itertools.permutations([0, 1, 2]):
currency_initial = pairs[first][4:]
initial_bid = pair_bids[first]
initial_ask = pair_asks[first]
if currency_initial in pairs[second]:
next_pair = pairs[second]
next_bid = pair_bids[second]
next_ask = pair_asks[second]
final_pair = pairs[third]
final_bid = pair_bids[third]
final_ask = pair_asks[third]
else:
next_pair = pairs[third]
next_bid = pair_bids[third]
next_ask = pair_asks[third]
final_pair = pairs[second]
final_bid = pair_bids[second]
final_ask = pair_asks[second]
if next_pair[:4] != currency_initial:
currency_next = next_pair[:4]
else:
currency_next = next_pair[4:]
balance_initial, trade_initial = buy_currency_using_pair(currency_initial, 1, pairs[first], initial_bid,
initial_ask)
balance_next, trade_next = sell_currency_using_pair(currency_initial, balance_initial[currency_initial],
next_pair, next_bid, next_ask)
balance_final, trade_final = sell_currency_using_pair(currency_next, balance_next[currency_next], final_pair,
final_bid, final_ask)
balance1_series = pandas.Series(balance_initial, name='initial')
balance2_series = pandas.Series(balance_next, name='next')
balance3_series =
|
pandas.Series(balance_final, name='final')
|
pandas.Series
|
from collections import defaultdict
from datetime import datetime
from fleiss_kappa import fleiss_kappa
import csv
import convert_m2
import sys
import fce_api as fd
import itertools
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import re
import settings
def get_annotations(golden=defaultdict(list)):
"""
Retrieves all annotations per sentence from AMT batch
Args:
golden: expert annotation per sentence: opt
Returns:
result_annotations: dict of the form sentence-> annotations
"""
result_annotations = defaultdict(list)
with open(settings.AMT_FILE) as file:
csv_reader = csv.DictReader(file)
for row in csv_reader:
sentence = row['Input.sentence']
answer = json.loads(row['Answer.ChosenWord'])
for annotation in answer['selectedTokens']:
result_annotations[sentence].append(annotation['start'])
for key in result_annotations.keys():
for error_index in golden[key]:
result_annotations[key].append(error_index)
return result_annotations
def create_confusion_matrix(data, predictions):
"""
Produces a confusion matrix in a form of a dictionary from (gold_label,guess_label)` pairs to counts.
Args:
data: list containing the gold labels.
predictions: list containing the prediction labels
Returns:
confusion matrix in form of dictionary with counts for (gold_label, guess_label)
"""
confusion = defaultdict(int)
for y_gold, y_guess in zip(data, predictions):
confusion[(y_gold, y_guess)] += 1
return confusion
def plot_confusion_matrix_dict(matrix_dict, classes=None, rotation=45, outside_label=''):
"""
Plots the confusion matrix
Args:
matrix_dict: the dict of confusion matrix - output of create_confusion_matrix
classes: list containing the classes for the category labels, if empty, whole numbering will be used for
category names
rotation: the degree orientation of the axis labels
outside_label: the label to disregard - excluded by default
"""
labels = set([y for y, _ in matrix_dict.keys()] + [y for _, y in matrix_dict.keys()])
sorted_labels = sorted(labels, key=lambda x: -x)
matrix = np.zeros((len(sorted_labels), len(sorted_labels)))
for i1, y1 in enumerate(sorted_labels):
for i2, y2 in enumerate(sorted_labels):
if y1 != outside_label or y2 != outside_label:
matrix[i1, i2] = matrix_dict[y1, y2]
threshold = matrix.max() / 2.
for i, j in itertools.product(range(matrix.shape[0]), range(matrix.shape[1])):
plt.text(j, i, int(matrix[i, j]),
horizontalalignment="center",
color="white" if matrix[i, j] > threshold else "black")
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.Blues)
plt.colorbar()
if classes is None:
classes = sorted_labels
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=rotation)
plt.yticks(tick_marks, classes)
plt.xlabel('turker labels')
plt.ylabel('gold labels')
plt.tight_layout()
plt.show()
def full_evaluation_table(confusion_matrix, classes=list()):
"""
Produces a pandas data-frame with Precision, F1 and Recall for all labels.
Args:
confusion_matrix: the confusion matrix to calculate metrics from.
classes: the categories of the confusion matrix
Returns:
a pandas Dataframe with one row per gold label, and one more row for the aggregate of all labels.
"""
labels = sorted(list({l for l, _ in confusion_matrix.keys()} | {l for _, l in confusion_matrix.keys()}))
if len(labels) == len(classes):
labels = classes
gold_counts = defaultdict(int)
guess_counts = defaultdict(int)
for (gold_label, guess_label), count in confusion_matrix.items():
if gold_label != "None":
gold_counts[gold_label] += count
gold_counts["[All]"] += count
if guess_label != "None":
guess_counts[guess_label] += count
guess_counts["[All]"] += count
result_table = []
for label in labels:
if label != "None":
result_table.append((label, gold_counts[label], guess_counts[label], *evaluate(confusion_matrix, {label})))
result_table.append(("[All]", gold_counts["[All]"], guess_counts["[All]"], *evaluate(confusion_matrix)))
return
|
pd.DataFrame(result_table, columns=('Label', 'Gold', 'Guess', 'Precision', 'Recall', 'F1'))
|
pandas.DataFrame
|
from pathlib import Path
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from message_ix import Scenario, macro
from message_ix.models import MACRO
from message_ix.testing import SCENARIO, make_westeros
W_DATA_PATH = Path(__file__).parent / "data" / "westeros_macro_input.xlsx"
MR_DATA_PATH = Path(__file__).parent / "data" / "multiregion_macro_input.xlsx"
class MockScenario:
def __init__(self):
self.data = pd.read_excel(MR_DATA_PATH, sheet_name=None, engine="openpyxl")
for name, df in self.data.items():
if "year" in df:
df = df[df.year >= 2030]
self.data[name] = df
def has_solution(self):
return True
def var(self, name, **kwargs):
df = self.data["aeei"]
# Add extra commodity to be removed
extra_commod = df[df.sector == "i_therm"].copy()
extra_commod["sector"] = "bar"
# Add extra region to be removed
extra_region = df[df.node == "R11_AFR"].copy()
extra_region["node"] = "foo"
df = pd.concat([df, extra_commod, extra_region])
if name == "DEMAND":
df = df.rename(columns={"sector": "commodity"})
elif name in ["COST_NODAL_NET", "PRICE_COMMODITY"]:
df = df.rename(columns={"sector": "commodity", "value": "lvl"})
df["lvl"] = 1e3
return df
@pytest.fixture(scope="class")
def westeros_solved(test_mp):
yield make_westeros(test_mp, solve=True, quiet=True)
@pytest.fixture(scope="class")
def westeros_not_solved(westeros_solved):
yield westeros_solved.clone(keep_solution=False)
def test_calc_valid_data_file(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
def test_calc_invalid_data(westeros_solved):
with pytest.raises(TypeError, match="neither a dict nor a valid path"):
macro.Calculate(westeros_solved, list())
with pytest.raises(ValueError, match="not an Excel data file"):
macro.Calculate(westeros_solved, Path(__file__).joinpath("other.zip"))
def test_calc_valid_data_dict(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None, engine="openpyxl")
c = macro.Calculate(s, data)
c.read_data()
# Test for selecting desirable years specified in config from the Excel input
def test_calc_valid_years(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None, engine="openpyxl")
# Adding an arbitrary year
arbitrary_yr = 2021
gdp_extra_yr = data["gdp_calibrate"].iloc[0, :].copy()
gdp_extra_yr["year"] = arbitrary_yr
data["gdp_calibrate"] = data["gdp_calibrate"].append(gdp_extra_yr)
# Check the arbitrary year is not in config
assert arbitrary_yr not in data["config"]["year"]
# But it is in gdp_calibrate
assert arbitrary_yr in set(data["gdp_calibrate"]["year"])
# And macro does calibration without error
c = macro.Calculate(s, data)
c.read_data()
def test_calc_no_solution(westeros_not_solved):
s = westeros_not_solved
pytest.raises(RuntimeError, macro.Calculate, s, W_DATA_PATH)
def test_config(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
assert "config" in c.data
assert "sector" in c.data["config"]
# Removing a column from config and testing
data = c.data.copy()
data["config"] = c.data["config"][["node", "sector"]]
try:
macro.Calculate(s, data)
except KeyError as error:
assert 'Missing config data for "level"' in str(error)
# Removing config completely and testing
data.pop("config")
try:
macro.Calculate(s, data)
except KeyError as error:
assert "Missing config in input data" in str(error)
c.read_data()
assert c.nodes == set(["Westeros"])
assert c.sectors == set(["light"])
def test_calc_data_missing_par(westeros_solved):
s = westeros_solved
data =
|
pd.read_excel(W_DATA_PATH, sheet_name=None, engine="openpyxl")
|
pandas.read_excel
|
#! /usr/bin/python
import json
import altair
import pandas
import datetime
import bs4
import os
import csv
import statistics
import locale
#define constants
#TODO Clean up to removal duplicate calls for yesterday
workingDir = os.getcwd()
yesterdayDate = datetime.date.today() - datetime.timedelta(1)
yesterday = yesterdayDate.strftime('%Y-%m-%d')
yesterdayDay = yesterdayDate.day
yesterdayDayName = yesterdayDate.strftime("%A")
yesterdayMonth = yesterdayDate.month
yesterdayMonthName = yesterdayDate.strftime("%B")
yesterdayYear= yesterdayDate.year
yesterdayYearName = yesterdayDate.strftime("%Y")
locale.setlocale(locale.LC_ALL, 'en_CA')
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
#load weather data
#weatherData = pandas.read_csv('weatherData'+counterList['VictoriaWeatherStation'][0]+'.csv',parse_dates=['Date']).set_index('Date')
weatherData = pandas.read_csv('weatherData114.csv',parse_dates=['Date']).set_index('Date')
#read in counters list
counterList = pandas.read_csv('countersToVisualize.csv',parse_dates=['FirstDate','FirstFullYear'], dtype={'VictoriaWeatherStation': str})
for index,row in counterList.iterrows():
print(row[['CounterID']][0])
counterName = row[['CounterName']][0]
counterStartDate = row[['FirstDate']][0]
#load data
countFile = "counts-" + str(row[['CounterID']][0]) + ".csv"
countExportFile = "counts-" + str(row[['CounterID']][0]) + "-export.csv"
countData = pandas.read_csv(countFile,parse_dates=['Date'])
specialDateFile = "specialDates.csv"
specialDateData =
|
pandas.read_csv(specialDateFile,parse_dates=['Date'])
|
pandas.read_csv
|
import sys, os, gzip, shutil
import datetime as dt
import pandas as pd
import numpy as np
import holidays
#UK path
# PEMSDIR = r'D:\PeMS'
# OUTDIR = r'D:\PeMS'
#SFCTA paths
PEMSDIR = r'Q:\Data\Observed\Streets\PeMS\CMP'
OUTDIR = r'Q:\CMP\LOS Monitoring 2021\PeMS\test_output'
monitor_loc = pd.read_csv(os.path.join(PEMSDIR,'pems_monitoring_locations.csv'))
stations = pd.read_csv(os.path.join(PEMSDIR,'D4_Data_2021\station_meta\d04_text_meta_2021_03_19.txt'), delimiter = "\t")
#PeMS stations in SF
sf_stations = stations[stations['County']==75]
data_type = 'station_5min'
district = 4
ca_holidays = holidays.UnitedStates(state='CA')
obs_pct_min = 20 # Minimum observation percentage requirement
sample_pct_min = 50 # Minimum sample percentage requirement
def get_dir(base, year=2021, data_type='station_hour', district=4):
if data_type in ['station_hour','station_5min','station_meta']:
return os.path.join(PEMSDIR,'D{}_Data_{}\{}'.format(district,year,data_type))
def get_columns(data_type, num_cols):
if data_type == 'station_meta':
columns = ['station','route','dir','district','county','city','state_postmile','abs_postmile','latitude','longitude',
'length','type','lanes','name','user_id_1','user_id_2','user_id_3','user_id_4']
if data_type == 'station_hour':
columns = ['timestamp', 'station', 'district', 'route', 'dir', 'lane_type', 'station_length',
'samples', 'obs_pct', 'total_flow', 'avg_occupancy', 'avg_speed',
'delay_35','delay_40','delay_45','delay_50','delay_55','delay_60']
for i in range(0, int((num_cols - 18) / 3)):
columns += [f'lane_{i}_flow',
f'lane_{i}_avg_occ',
f'lane_{i}_avg_speed',
]
if data_type == 'station_5min':
columns = ['timestamp', 'station', 'district', 'route', 'dir', 'lane_type', 'station_length',
'samples', 'obs_pct', 'total_flow', 'avg_occupancy', 'avg_speed']
for i in range(0, int((num_cols - 12) / 5)):
columns += [f'lane_{i}_samples',
f'lane_{i}_flow',
f'lane_{i}_avg_occ',
f'lane_{i}_avg_speed',
f'lane_{i}_avg_obs',
]
return columns
unzip = False
source = 'gz' # or 'zip','text','txt'
save_h5 = True
data_type = 'station_5min'
sep = ','
for year in np.arange(2021,2022):
year_dfs = []
path = get_dir(PEMSDIR, year, data_type, district)
outpath = os.path.join(OUTDIR,'pems')
contents = os.listdir(path)
gzs = filter(lambda x: os.path.splitext(x)[1] == '.gz', contents)
txts = filter(lambda x: os.path.splitext(x)[1] == '.txt', contents)
if source == 'gz':
files = gzs
compression = 'gzip'
else:
files = txts
compression = None
header = 0 if data_type == 'station_meta' else None
for f in files:
print(f)
try:
df = pd.read_csv(os.path.join(path, f),
sep=sep,
header=header,
index_col=False,
parse_dates=[0],
infer_datetime_format=True,
compression=compression)
except Exception as e:
print(e)
print('trying no quotechar...')
try:
df = pd.read_csv(os.path.join(path, f),
sep=sep,
header=header,
index_col=False,
parse_dates=[0],
infer_datetime_format=True,
quotechar=None,
compression=compression)
except Exception as e2:
print(e2)
continue
try:
df.columns = get_columns(data_type, len(df.columns))
except Exception as e3:
print(e3)
continue
if data_type == 'station_meta':
y, m, d = f.replace('d{:02d}_text_meta_'.format(district),'').replace('.txt','').split('_')
ts = dt.datetime(int(y), int(m), int(d))
date = ts.date()
df['timestamp'] = ts
df['date'] = date
df['year'] = y
df['month'] = m
df['day'] = d
meta.append(df)
elif data_type == 'station_hour':
df['date'] = df['timestamp'].map(lambda x: x.date())
df['year'] = df['timestamp'].map(lambda x: x.year)
df['month'] = df['timestamp'].map(lambda x: x.month)
df['day'] = df['timestamp'].map(lambda x: x.day)
df['hour'] = df['timestamp'].map(lambda x: x.hour)
df['day_of_week'] = df['timestamp'].map(lambda x: x.weekday())
df['is_holiday'] = df['timestamp'].map(lambda x: x.date() in ca_holidays)
year_dfs.append(df)
elif data_type == 'station_5min':
df['date'] = df['timestamp'].map(lambda x: x.date())
df['year'] = df['timestamp'].map(lambda x: x.year)
df['month'] = df['timestamp'].map(lambda x: x.month)
df['day'] = df['timestamp'].map(lambda x: x.day)
df['hour'] = df['timestamp'].map(lambda x: x.hour)
df['minute'] = df['timestamp'].map(lambda x: x.minute)
df['day_of_week'] = df['timestamp'].map(lambda x: x.weekday())
df['is_holiday'] = df['timestamp'].map(lambda x: x.date() in ca_holidays)
year_dfs.append(df)
y =
|
pd.concat(year_dfs)
|
pandas.concat
|
import shutil
from logging import Logger
from pathlib import Path
import pandas as pd
from the_census._config import Config
from the_census._persistence.interface import ICache
from the_census._utils.log.factory import ILoggerFactory
from the_census._utils.timer import timer
LOG_PREFIX = "[On-Disk Cache]"
class OnDiskCache(ICache[pd.DataFrame]):
_config: Config
_logger: Logger
def __init__(self, config: Config, logger_factory: ILoggerFactory) -> None:
self._config = config
self._logger = logger_factory.getLogger(__name__)
self._cache_path = Path(
f"{config.cache_dir}/{config.year}/{config.dataset}/{config.survey}"
)
if not self._config.should_cache_on_disk:
self._logger.debug("Not creating an on-disk cache")
return
self._logger.debug(f"creating cache for {self._cache_path}")
self.__set_up_on_disk_cache()
@timer
def __set_up_on_disk_cache(self) -> None:
self._logger.debug("setting up on disk cache")
if not self._config.should_load_from_existing_cache:
self._logger.debug("purging on disk cache")
if Path(self._config.cache_dir).exists():
shutil.rmtree(self._config.cache_dir)
self._cache_path.mkdir(parents=True, exist_ok=True)
@timer
def put(self, resource: str, data: pd.DataFrame) -> bool:
if not self._config.should_cache_on_disk:
return True
path = self._cache_path.joinpath(Path(resource))
if path.exists():
self._logger.debug(f'resource "{resource}" already exists; terminating')
return False
path.parent.mkdir(parents=True, exist_ok=True)
self._logger.debug(f'persisting "{path}" on disk')
data.to_csv(str(path.absolute()), index=False)
return True
@timer
def get(self, resource: str) -> pd.DataFrame:
if (
not self._config.should_load_from_existing_cache
or not self._config.should_cache_on_disk
):
return
|
pd.DataFrame()
|
pandas.DataFrame
|
import argparse
import time
from timm.utils.metrics import mAP_score
from torch._C import dtype
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
import numpy as np
from timm.data import create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, resume_checkpoint, load_checkpoint, convert_splitbn_model
from timm.utils import *
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
from timm.optim import create_optimizer
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
# from timm.data import LoadImagesAndLabels,preprocess,LoadImagesAndLabelsV2,LoadImagesAndSoftLabels
from timm.utils import ApexScaler, auc_score
from timm.utils import Visualizer
from timm.data import get_riadd_train_transforms, get_riadd_valid_transforms,get_riadd_test_transforms
from timm.data import RiaddDataSet,RiaddDataSet9Classes
import os
from tqdm import tqdm
import random
import torch.distributed as dist
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
#os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7"
CFG = {
'seed': 42,
'img_size': 224,
'valid_bs': 10,
'num_workers': 4,
'num_classes': 29,
'tta': 3,
'models': [#'b6-ns-768/tf_efficientnet_b6_ns-768-fold0-model_best.pth.tar',
#'b5-ns-960/tf_efficientnet_b5_ns-960-fold0-model_best.pth.tar',
'20210910-205105-vit_base_patch16_384-384/model_best.pth.tar'],
'base_img_path': 'C:/Users/AI/Desktop/student_Manuel/datasets/RIADD_cropped/Evaluation_Set/Evaluation',
'weights': [1]
}
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def validate(model, loader):
model.eval()
preds = []
pbar = tqdm(enumerate(loader), total=len(loader))
with torch.no_grad():
for batch_idx, (input, target) in pbar:
input = input.cuda()
target = target.cuda()
target = target.float()
output = model(input)
preds.append(output.sigmoid().to('cpu').numpy())
predictions = np.concatenate(preds)
return predictions
if __name__ == '__main__':
from sklearn.model_selection import KFold,StratifiedKFold,GroupKFold
import pandas as pd
import torch.utils.data as data
seed_everything(CFG['seed'])
data_ = pd.read_csv('C:/Users/AI/Desktop/student_Manuel/datasets/RIADD_cropped/Evaluation_Set/RFMiD_Validation_Labels.csv')
test_index = [i for i in range(data_.shape[0])]
test_data = data_.iloc[test_index, :].reset_index(drop=True)
#print(test_data.head())
#print(test_index)
test_transforms = get_riadd_test_transforms(CFG)
test_dataset = RiaddDataSet(image_ids = test_data,transform = test_transforms, baseImgPath = CFG['base_img_path'])
test_data_loader = data.DataLoader( test_dataset,
batch_size=CFG['valid_bs'],
shuffle=False,
num_workers=CFG['num_workers'],
pin_memory=True,
drop_last=False,
sampler = None)
imgIds = test_data.iloc[test_index,0].tolist()
target_cols = test_data.iloc[test_index, 1:].columns.tolist()
#print(target_cols)
test =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Class to process raw TomTom MultiNet data into a network dataset.
Copyright 2022 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
"""
import pandas as pd
import numpy as np
import time
import functools
import datetime
import os
import uuid
import enum
from lxml import etree
import arcpy
CURDIR = os.path.dirname(os.path.abspath(__file__))
LNG_CODES = {
"ALB": "sq", # Albanian
"ALS": "", # Alsacian
"ARA": "ar", # Arabic
"BAQ": "eu", # Basque
"BAT": "", # Baltic (Other)
"BEL": "be", # Belarusian
"BET": "be", # Belarusian (Latin)
"BOS": "bs", # Bosnian
"BRE": "br", # Breton
"BUL": "bg", # Bulgarian
"BUN": "bg", # Bulgarian (Latin)
"BUR": "my", # Burmese
"CAT": "ca", # Catalan
"CEL": "", # Celtic (Other)
"CHI": "zh", # Chinese, Han Simplified
"CHL": "zh", # Chinese, Mandarin Pinyin
"CHT": "zh", # Chinese, Han Traditional
"CTN": "zh", # Chinese, Cantonese Pinyin
"CZE": "cs", # Czech
"DAN": "da", # Danish
"DUT": "nl", # Dutch
"ENG": "en", # English
"EST": "et", # Estonian
"FAO": "fo", # Faroese
"FIL": "", # Filipino
"FIN": "fi", # Finnish
"FRE": "fr", # French
"FRY": "fy", # Frisian
"FUR": "", # Friulian
"GEM": "", # Franco-Provencal
"GER": "de", # German
"GLA": "gd", # Gaelic (Scots)
"GLE": "ga", # Irish
"GLG": "gl", # Galician
"GRE": "el", # Greek (Modern)
"GRL": "el", # Greek (Latin Transcription)
"HEB": "he", # Hebrew
"HIN": "hi", # Hindi
"HUN": "hu", # Hungarian
"ICE": "is", # Icelandic
"IND": "id", # Indonesian
"ITA": "it", # Italian
"KHM": "km", # Khmer
"KOL": "ko", # Korean (Latin)
"KOR": "ko", # Korean
"LAD": "", # Ladin
"LAO": "lo", # Lao
"LAT": "la", # Latin
"LAV": "lv", # Latvian
"LIT": "lt", # Lithuanian
"LTZ": "lb", # Letzeburgesch
"MAC": "mk", # Macedonian
"MAP": "", # Austronesian (Other)
"MAT": "mk", # Macedonian (Latin Transcription)
"MAY": "ms", # Malaysian
"MLT": "mt", # Maltese
"MOL": "mo", # Moldavian
"MYN": "", # Mayan Languages
"NOR": "no", # Norwegian
"OCI": "oc", # Occitan
"PAA": "", # Papuan-Australian (Other)
"POL": "pl", # Polish
"POR": "pt", # Portuguese
"PRO": "", # Provencal
"ROA": "", # Romance (Other)
"ROH": "rm", # Raeto-Romance
"ROM": "", # Romani
"RUL": "ru", # Russian (Latin Transcription)
"RUM": "ro", # Romanian
"RUS": "ru", # Russian
"SCC": "sh", # Serbian (Latin)
"SCO": "gd", # Scots
"SCR": "sh", # Croatian
"SCY": "sh", # Serbian (Cyrillic)
"SLA": "cu", # Slavic
"SLO": "sk", # Slovak
"SLV": "sv", # Slovenian
"SMC": "", # Montenegrin (Cyrillic)
"SMI": "se", # Lapp (Sami)
"SML": "", # Montenegrin (Latin)
"SPA": "es", # Spanish
"SRD": "sc", # Sardinian
"SWE": "sv", # Swedish
"THA": "th", # Thai
"THL": "th", # Thai (Latin)
"TUR": "tr", # Turkish
"UKL": "uk", # Ukranian (Latin)
"UKR": "uk", # Ukranian
"UND": "", # Undefined
"VAL": "ca", # Valencian
"VIE": "vi", # Vietnamese
"WEL": "cy", # Welsh
"WEN": "", # Sorbian (Other)
}
PRINT_TIMINGS = False # Set to True to log timings for various methods (primarily for debugging and development)
def timed_exec(func):
"""Measure time in seconds to execute a function.
This function is meant to be used as a decorator on a function.
Args:
func: The decorated function that is being timed
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Wrap the function to be run."""
# Using an inner function so the timing can happen directly around the function under test.
def inner_func():
t0 = time.time()
return_val = func(*args, **kwargs)
if PRINT_TIMINGS:
arcpy.AddMessage(f"Time to run {func.__name__}: {time.time() - t0}")
return return_val
return inner_func()
return wrapper
class TimeZoneType(enum.Enum):
"""Defines the time zone type to use."""
NoTimeZone = 1
Single = 2
Table = 3
class UnitType(enum.Enum):
"""Defines whether the units are imperial or metric."""
Imperial = 1
Metric = 2
class MultiNetInputData:
"""Defines a collection of MultiNet inputs to process."""
def __init__(
self, network_geometry_fc, maneuvers_geometry_fc, maneuver_path_idx_table, sign_info_table, sign_path_table,
restrictions_table, network_profile_link_table=None, historical_speed_profiles_table=None,
rds_tmc_info_table=None, logistics_truck_routes_table=None, logistics_lrs_table=None, logistics_lvc_table=None
):
"""Initialize an input MultiNet dataset with all the appropriate feature classes and tables"""
self.nw = network_geometry_fc
self.mn = maneuvers_geometry_fc
self.mp = maneuver_path_idx_table
self.si = sign_info_table
self.sp = sign_path_table
self.rs = restrictions_table
self.hsnp = network_profile_link_table
self.hspr = historical_speed_profiles_table
self.rd = rds_tmc_info_table
self.ltr = logistics_truck_routes_table
self.lrs = logistics_lrs_table
self.lvc = logistics_lvc_table
self.required_tables = [self.nw, self.mn, self.mp, self.si, self.sp, self.rs]
self.required_traffic_tables = [self.hsnp, self.hspr]
self.required_logistics_tables = [self.lrs, self.lvc]
def validate_data(self, check_traffic, check_logistics):
"""Validate that the data exists and has the required fields."""
# Check that all tables that need to be specified are specified.
for table in self.required_tables:
if not table:
arcpy.AddError("Required MultiNet input table not specified.")
return False
if check_traffic:
for table in self.required_traffic_tables:
if not table:
arcpy.AddError("Required MultiNet traffic input table not specified.")
return False
if check_logistics:
for table in self.required_logistics_tables:
if not table:
arcpy.AddError("Required MultiNet Logistics input table not specified.")
return False
# Verify existence of tables and appropriate schema
required_fields = {
self.nw: [
("ID", "Double"),
("FEATTYP", "SmallInteger"),
("F_JNCTID", "Double"),
("T_JNCTID", "Double"),
("PJ", "SmallInteger"),
("METERS", "Double"),
("NET2CLASS", "SmallInteger"),
("NAME", "String"),
("FOW", "SmallInteger"),
("FREEWAY", "SmallInteger"),
("BACKRD", "SmallInteger"),
("TOLLRD", "SmallInteger"),
("RDCOND", "SmallInteger"),
("PRIVATERD", "SmallInteger"),
("CONSTATUS", "String"),
("ONEWAY", "String"),
("F_ELEV", "SmallInteger"),
("T_ELEV", "SmallInteger"),
("KPH", "SmallInteger"),
("MINUTES", "Single"), # Float
("NTHRUTRAF", "SmallInteger"),
("ROUGHRD", "SmallInteger"),
],
self.mn: [
("ID", "Double"),
("JNCTID", "Double"),
("FEATTYP", "SmallInteger")
],
self.mp: [
("ID", "Double"),
("TRPELID", "Double"),
("SEQNR", "Integer")
],
self.si: [
("ID", "Double"),
("INFOTYP", "String"),
("TXTCONT", "String"),
("TXTCONTLC", "String"),
("CONTYP", "SmallInteger"),
("SEQNR", "Integer"),
("DESTSEQ", "Integer"),
("RNPART", "SmallInteger")
],
self.sp: [
("ID", "Double"),
("TRPELID", "Double"),
("SEQNR", "Integer")
],
self.rs: [
("ID", "Double"),
("VT", "SmallInteger"),
("DIR_POS", "SmallInteger"),
("RESTRTYP", "String")
],
self.hsnp: [
("NETWORK_ID", "Double"),
("VAL_DIR", "SmallInteger"),
("SPFREEFLOW", "SmallInteger"),
("SPWEEKDAY", "SmallInteger"),
("SPWEEKEND", "SmallInteger"),
("SPWEEK", "SmallInteger"),
("PROFILE_1", "SmallInteger"),
("PROFILE_2", "SmallInteger"),
("PROFILE_3", "SmallInteger"),
("PROFILE_4", "SmallInteger"),
("PROFILE_5", "SmallInteger"),
("PROFILE_6", "SmallInteger"),
("PROFILE_7", "SmallInteger")
],
self.hspr: [
("PROFILE_ID", "SmallInteger"),
("TIME_SLOT", "Integer"),
("REL_SP", "Single")
],
self.rd: [
("ID", "Double"),
("RDSTMC", "String")
],
self.ltr: [
("ID", "Double"),
("PREFERRED", "SmallInteger"),
("RESTRICTED", "SmallInteger")
],
self.lrs: [
("ID", "Double"),
("SEQNR", "SmallInteger"),
("RESTRTYP", "String"),
("VT", "SmallInteger"),
("RESTRVAL", "SmallInteger"),
("LIMIT", "Double"),
("UNIT_MEAS", "SmallInteger")
],
self.lvc: [
("ID", "Double"),
("SEQNR", "SmallInteger")
]
}
for table in [t for t in required_fields if t]:
if not arcpy.Exists(table):
arcpy.AddError(f"Input table {table} does not exist.")
return False
actual_fields = {(f.name, f.type) for f in arcpy.ListFields(table)}
if not set(required_fields[table]).issubset(actual_fields):
arcpy.AddError(
f"Input table {table} does not have the correct schema. Required fields: {required_fields[table]}")
return False
if int(arcpy.management.GetCount(table).getOutput(0)) == 0:
arcpy.AddWarning(f"Input table {table} has no rows.")
# Everything is valid
return True
class MultiNetProcessor:
def __init__(
self, out_folder: str, gdb_name: str, in_multinet: MultiNetInputData, unit_type: UnitType,
include_historical_traffic: bool, include_logistics: bool,
time_zone_type: TimeZoneType, time_zone_name: str = "", in_time_zone_table=None,
time_zone_ft_field: str = None, time_zone_tf_field: str = None, build_network: bool = True
):
"""Initialize a class to process MultiNet data into a network dataset."""
self.in_multinet = in_multinet
self.unit_type = unit_type
self.include_historical_traffic = include_historical_traffic
self.include_logistics = include_logistics
self.time_zone_type = time_zone_type
self.time_zone_name = time_zone_name
self.in_time_zone_table = in_time_zone_table
self.time_zone_ft_field = time_zone_ft_field
self.time_zone_tf_field = time_zone_tf_field
self.build_network = build_network
self.out_folder = out_folder
self.gdb_name = gdb_name
if not self.gdb_name.endswith(".gdb"):
self.gdb_name += ".gdb"
self.feature_dataset = os.path.join(self.out_folder, self.gdb_name, "Routing")
self.streets = os.path.join(self.feature_dataset, "Streets")
self.turns = os.path.join(self.feature_dataset, "RestrictedTurns")
self.road_splits = os.path.join(self.out_folder, self.gdb_name, "Streets_RoadSplits")
self.signposts = os.path.join(self.feature_dataset, "Signposts")
self.signposts_streets = os.path.join(self.out_folder, self.gdb_name, "Signposts_Streets")
self.streets_profiles = os.path.join(self.out_folder, self.gdb_name, "Streets_DailyProfiles")
self.profiles = os.path.join(self.out_folder, self.gdb_name, "DailyProfiles")
self.streets_tmc = os.path.join(self.out_folder, self.gdb_name, "Streets_TMC")
self.time_zone_table = os.path.join(self.out_folder, self.gdb_name, "TimeZones")
self.network = os.path.join(self.feature_dataset, "Routing_ND")
self.out_sr = arcpy.Describe(self.in_multinet.nw).spatialReference
# Maps VT field codes to restriction names
self.vt_field_map = {
0: "AllVehicles_Restricted",
11: "PassengerCars_Restricted",
12: "ResidentialVehicles_Restricted",
16: "Taxis_Restricted",
17: "PublicBuses_Restricted"
}
self.restriction_field_names = [self.vt_field_map[vt] for vt in sorted(self.vt_field_map)]
# Historical traffic base field names
self.historical_traffic_fields = ["Weekday", "Weekend", "AllWeek"]
# Logistics Truck Routes field names
self.ltr_fields = [
"NationalSTAARoute", "NationalRouteAccess", "DesignatedTruckRoute", "TruckBypassRoad",
"NoCommercialVehicles", "ImmediateAccessOnly", "TrucksRestricted"
]
# Global dataframes and variables used by multiple processes and initialized later
self.r_df = None # Restrictions table indexed by ID for quick lookups
self.mp_df = None # Maneuver paths table indexed by ID for quick lookups
self.streets_df = None # Dataframe of output streets indexed by ID for quick lookups
self.lrs_df = None # Dataframe of logistics LRS table
self.unique_lrs_df = None # Dataframe holding unique combinations of logistics restriction data
self.max_turn_edges = None # Maximum number of edges participating in a turn
self.fc_id = None # Streets feature class dataset ID used in Edge#FCID fields
def process_multinet_data(self):
"""Process multinet data into a network dataset."""
# Validate the input data
if not self._validate_inputs():
return
# Create the output location
self._create_feature_dataset()
# Read in some tables we're going to need to reference later in multiple places
self._read_and_index_restrictions()
self._read_and_index_maneuver_paths()
self._read_and_index_logistics_tables()
# Create the output Streets feature class and populate it
self._copy_streets()
self._detect_and_delete_duplicate_streets()
self._populate_streets_fields()
# We're now done with the Logistics restrictions table, so clear the variable to free up memory
del self.lrs_df
self.lrs_df = None
# Read in output streets for future look-ups
self._read_and_index_streets()
# Create and populate the turn feature class
self._create_turn_fc()
self._generate_turn_features()
# We're now done with the restrictions table, so clear the variable to free up memory
del self.r_df
self.r_df = None
# Create and populate the road forks table
self._create_road_forks_table()
self._populate_road_forks()
# We're now done with the maneuver path table, so clear the variable to free up memory
del self.mp_df
self.mp_df = None
# Create and populate Signposts and Signposts_Streets
self._create_signposts_fc()
self._create_signposts_streets_table()
self._populate_signposts_and_signposts_streets()
# Create and populate historical traffic tables
if self.include_historical_traffic:
self._create_and_populate_streets_profiles_table()
self._create_and_populate_profiles_table()
self._create_and_populate_streets_tmc_table()
# We're done with the streets table, so clear the variable to free up memory
del self.streets_df
self.streets_df = None
# Handle time zone table if needed
if self.time_zone_type != TimeZoneType.NoTimeZone:
self._handle_time_zone()
# Create the network dataset from a template and build it
self._create_and_build_nd()
@timed_exec
def _validate_inputs(self):
"""Validate the input data."""
arcpy.AddMessage("Validating inputs...")
# Do some simple checks
if not os.path.exists(self.out_folder):
arcpy.AddMessage(f"Output folder {self.out_folder} does not exist.")
return False
if os.path.exists(os.path.join(self.out_folder, self.gdb_name)):
arcpy.AddMessage(f"Output geodatabase {os.path.join(self.out_folder, self.gdb_name)} already exists.")
return False
if self.out_sr.name == "Unknown":
arcpy.AddError("The input data has an unknown spatial reference.")
return False
# Make sure the license is available.
if arcpy.CheckExtension("network").lower() == "available":
arcpy.CheckOutExtension("network")
else:
arcpy.AddError("The Network Analyst extension license is unavailable.")
return False
# Check the input data
if not self.in_multinet.validate_data(self.include_historical_traffic, self.include_logistics):
return False
arcpy.AddMessage("Inputs validated successfully.")
return True
@timed_exec
def _create_feature_dataset(self):
"""Create the output geodatabase and feature dataset."""
arcpy.AddMessage(f"Creating output geodatabase and feature dataset at {self.feature_dataset}...")
arcpy.management.CreateFileGDB(self.out_folder, self.gdb_name)
arcpy.management.CreateFeatureDataset(
os.path.dirname(self.feature_dataset),
os.path.basename(self.feature_dataset),
self.out_sr
)
@timed_exec
def _copy_streets(self):
"""Copy the network geometry feature class to the target feature dataset and add fields."""
arcpy.AddMessage("Copying input network geometry feature class to target feature dataset...")
# Filter out address area boundary elements
nw_layer = arcpy.management.MakeFeatureLayer(self.in_multinet.nw, "NW layer", "FEATTYP <> 4165").getOutput(0)
# Construct field mappings to use when copying the original data.
field_mappings = arcpy.FieldMappings()
# Add all the fields from the input data
field_mappings.addTable(self.in_multinet.nw)
# Add the TOLLRDDIR and restriction fields and historical traffic fields if relevant.
field_mappings = field_mappings.exportToString()
field_mappings += self._create_string_field_map("TOLLRDDIR", "Text", 2)
for restr_field in self.restriction_field_names:
field_mappings += self._create_string_field_map(f"FT_{restr_field}", "Text", 1)
field_mappings += self._create_string_field_map(f"TF_{restr_field}", "Text", 1)
if self.include_historical_traffic:
for trf_fld in self.historical_traffic_fields:
field_mappings += self._create_string_field_map(f"FT_{trf_fld}", "Short")
field_mappings += self._create_string_field_map(f"TF_{trf_fld}", "Short")
field_mappings += self._create_string_field_map(f"FT_{trf_fld}Minutes", "Float")
field_mappings += self._create_string_field_map(f"TF_{trf_fld}Minutes", "Float")
if self.in_multinet.ltr:
for ltr_field in self.ltr_fields:
field_mappings += self._create_string_field_map(ltr_field, "Text", 1)
if self.include_logistics:
# Derive a list of logistics restriction field names based on data from the LRS table
for _, record in self.unique_lrs_df.iterrows():
field_mappings += self._create_string_field_map(
record["FieldName"], record["FieldType"], record["FieldLength"])
# Copy the input network geometry feature class to the target feature dataset
arcpy.conversion.FeatureClassToFeatureClass(
nw_layer, self.feature_dataset, os.path.basename(self.streets), field_mapping=field_mappings)
# Update the fc_id that will be used to relate back to this Streets feature class in Edge#FCID fields
self.fc_id = arcpy.Describe(self.streets).DSID
@timed_exec
def _detect_and_delete_duplicate_streets(self):
"""Determine if there are duplicate street IDs, and if so, delete them."""
# Duplicate street features occur along tile boundaries.
# Use Pandas to identify duplicate ID values and associated OIDs to delete.
with arcpy.da.SearchCursor(self.streets, ["OID@", "ID"]) as cur:
id_df = pd.DataFrame(cur, columns=["OID", "ID"])
duplicate_streets = id_df[id_df.duplicated(subset="ID")]["OID"].to_list()
# If there are any duplicates, delete them.
if duplicate_streets:
duplicate_streets = [str(oid) for oid in duplicate_streets]
oid_field = arcpy.Describe(self.streets).OIDFieldName
arcpy.AddMessage("Duplicate streets were detected and will be removed.")
where = f"{oid_field} IN ({', '.join(duplicate_streets)})"
layer_name = "Temp_Streets"
arcpy.management.MakeFeatureLayer(self.streets, layer_name, where)
arcpy.management.DeleteRows(layer_name)
@timed_exec
def _create_turn_fc(self):
"""Create the turn feature class and add necessary fields."""
assert self.max_turn_edges is not None
arcpy.AddMessage("Creating turn feature class...")
arcpy.na.CreateTurnFeatureClass(self.feature_dataset, os.path.basename(self.turns), self.max_turn_edges)
# Add restriction fields
# The ID field is added to easily relate this back to the original data but is not required by the network.
field_defs = [["ID", "DOUBLE"]] + [[field, "TEXT", "", 1] for field in self.restriction_field_names]
arcpy.management.AddFields(self.turns, field_defs)
@timed_exec
def _create_road_forks_table(self):
"""Create the road forks table Streets_RoadSplits with the correct schema."""
arcpy.AddMessage("Creating road forks table...")
arcpy.management.CreateTable(os.path.dirname(self.road_splits), os.path.basename(self.road_splits))
# Schema for the road forks table:
# https://pro.arcgis.com/en/pro-app/latest/tool-reference/data-management/add-fields.htm
# The ID field is added to easily relate this back to the original data but is not required by the schema.
field_defs = [
["ID", "DOUBLE"],
["EdgeFCID", "LONG"],
["EdgeFID", "LONG"],
["EdgeFrmPos", "DOUBLE"],
["EdgeToPos", "DOUBLE"],
["Branch0FCID", "LONG"],
["Branch0FID", "LONG"],
["Branch0FrmPos", "DOUBLE"],
["Branch0ToPos", "DOUBLE"],
["Branch1FCID", "LONG"],
["Branch1FID", "LONG"],
["Branch1FrmPos", "DOUBLE"],
["Branch1ToPos", "DOUBLE"],
["Branch2FCID", "LONG"],
["Branch2FID", "LONG"],
["Branch2FrmPos", "DOUBLE"],
["Branch2ToPos", "DOUBLE"]
]
arcpy.management.AddFields(self.road_splits, field_defs)
@timed_exec
def _create_signposts_fc(self):
"""Create the Signposts feature class with correct schema."""
arcpy.AddMessage("Creating Signposts feature class...")
arcpy.management.CreateFeatureclass(
os.path.dirname(self.signposts), os.path.basename(self.signposts),
"POLYLINE", has_m="DISABLED", has_z="DISABLED"
)
# Schema for the signposts feature class:
# https://pro.arcgis.com/en/pro-app/latest/help/analysis/networks/signposts.htm
field_defs = [
["ExitName", "TEXT", "ExitName", 24],
]
for i in range(10):
field_defs += [
[f"Branch{i}", "TEXT", f"Branch{i}", 180],
[f"Branch{i}Dir", "TEXT", f"Branch{i}Dir", 5],
[f"Branch{i}Lng", "TEXT", f"Branch{i}Lng", 2],
[f"Toward{i}", "TEXT", f"Toward{i}", 180],
[f"Toward{i}Lng", "TEXT", f"Toward{i}Lng", 2],
]
arcpy.management.AddFields(self.signposts, field_defs)
@timed_exec
def _create_signposts_streets_table(self):
"""Create the Signposts_Streets table with correct schema."""
arcpy.AddMessage("Creating Signposts_Streets table...")
arcpy.management.CreateTable(os.path.dirname(self.signposts_streets), os.path.basename(self.signposts_streets))
# Schema for the Signposts_Streets table:
# https://pro.arcgis.com/en/pro-app/latest/help/analysis/networks/signposts.htm
field_defs = [
["SignpostID", "LONG"],
["Sequence", "LONG"],
["EdgeFCID", "LONG"],
["EdgeFID", "LONG"],
["EdgeFrmPos", "DOUBLE"],
["EdgeToPos", "DOUBLE"]
]
arcpy.management.AddFields(self.signposts_streets, field_defs)
@timed_exec
def _create_and_populate_streets_profiles_table(self):
"""Create the Streets_DailyProfiles table."""
if not self.include_historical_traffic:
return
arcpy.AddMessage("Creating and populating Streets_DailyProfiles table...")
assert self.streets_df is not None # Sanity check
# Create the table with desired schema
arcpy.management.CreateTable(
os.path.dirname(self.streets_profiles),
os.path.basename(self.streets_profiles),
self.in_multinet.hsnp # Template table used to define schema
)
field_defs = [
["EdgeFCID", "LONG"],
["EdgeFID", "LONG"],
["EdgeFrmPos", "DOUBLE"],
["EdgeToPos", "DOUBLE"]
]
arcpy.management.AddFields(self.streets_profiles, field_defs)
# Insert rows
desc = arcpy.Describe(self.in_multinet.hsnp)
input_fields = [f.name for f in desc.fields if f.name != desc.OIDFieldName]
output_fields = input_fields + [f[0] for f in field_defs]
network_id_idx = input_fields.index("NETWORK_ID")
val_dir_idx = input_fields.index("VAL_DIR")
with arcpy.da.InsertCursor(self.streets_profiles, output_fields) as cur:
for row in arcpy.da.SearchCursor(
self.in_multinet.hsnp, input_fields, "SPFREEFLOW > 0 And VAL_DIR IN (2, 3)"
):
# Initialize the row to insert with all the values from the original table
new_row = [val for val in row]
# Calculate the additional, new fields: EdgeFCID, EdgeFID, EdgeFrmPos, EdgeToPos
street_id = np.int64(row[network_id_idx])
try:
# Find the street record associated with this street profile record
edge_fid = self.streets_df.loc[street_id]["OID"]
except KeyError:
arcpy.AddWarning((
f"The Streets table is missing an entry with ID {row[network_id_idx]}, which is used in the "
"network profile link historical traffic table."))
# Just skip this row and don't add it
continue
val_dir = row[val_dir_idx]
if val_dir == 2:
edge_from_pos = 0
edge_to_pos = 1
elif val_dir == 3:
edge_from_pos = 1
edge_to_pos = 0
else:
# This should never happen because of our where clause, but check just in case
continue
new_row += [self.fc_id, edge_fid, edge_from_pos, edge_to_pos]
# Insert the row
cur.insertRow(new_row)
@timed_exec
def _create_and_populate_profiles_table(self):
"""Create the DailyProfiles table."""
if not self.include_historical_traffic:
return
arcpy.AddMessage("Creating and populating DailyProfiles table...")
# Create the table with correct schema
arcpy.management.CreateTable(os.path.dirname(self.profiles), os.path.basename(self.profiles))
field_defs = [["ProfileID", "SHORT"]]
added_minutes = 0
midnight = datetime.datetime(2021, 1, 1, 0, 0, 0) # Initialize midnight on an arbitrary date
# Add a field for each 5-minute increment until 11:55 at night
while added_minutes < 1440:
current_time = midnight + datetime.timedelta(minutes=added_minutes)
field_defs.append([f"SpeedFactor_{current_time.strftime('%H%M')}", "FLOAT"])
added_minutes += 5
arcpy.management.AddFields(self.profiles, field_defs)
# Read the Historical Speed Profiles table into a temporary dataframe so we can quickly sort it. Normally we
# could sort it using the da.SearchCursor's sql clause, but the ORDER BY option doesn't work with shapefile
# tables.
fields = ["PROFILE_ID", "TIME_SLOT", "REL_SP"]
with arcpy.da.SearchCursor(self.in_multinet.hspr, fields) as cur:
hspr_df = pd.DataFrame(cur, columns=fields)
hspr_df = hspr_df.sort_values("PROFILE_ID").groupby(["PROFILE_ID"])
# Insert the rows
output_fields = [f[0] for f in field_defs]
with arcpy.da.InsertCursor(self.profiles, output_fields) as cur:
# Loop through the records in the HSPR table and calculate the SpeedFactor fields accordingly
for profile_id, group in hspr_df:
# Initialize a new row with the ProfileID and defaulting all the SpeedFactor fields to None.
new_row = [profile_id] + [None] * (len(output_fields) - 1)
# Iterate through the records in this group and populate the SpeedFactor fields
for _, record in group.iterrows():
# Figure out which SpeedFactor field this record is for based on the TIME_SLOT field value
# The TIME_SLOT field indicates the time of day as measured in seconds since midnight. Since the
# granularity is 5 minutes, the TIME_SLOT values are all multiples of 300 (e.g., TIME_SLOT=0
# represents 12:00am, TIME_SLOT=300 represents 12:05am, TIME_SLOT=600 represents 12:10am, etc.).
# Add 1 to the index because ProfileID is the first field in the row
time_slot_index = int((record['TIME_SLOT'] / 300)) + 1
new_row[time_slot_index] = record["REL_SP"] / 100
# Check if the row is missing any values, and if so, default them to 1 and add a warning.
if None in new_row:
arcpy.AddWarning((
"The Historical Speed Profiles table has incomplete TIME_SLOT records for PROFILE_ID "
f"{profile_id}. The missing values have been filled in with a value of 1."
))
new_row = [val if val is not None else 1 for val in new_row]
# Finally, insert the row
cur.insertRow(new_row)
@timed_exec
def _create_and_populate_streets_tmc_table(self):
if not self.include_historical_traffic or not self.in_multinet.rd:
return
arcpy.AddMessage("Creating and populating Streets_TMC table...")
assert self.streets_df is not None # Sanity check
arcpy.management.CreateTable(os.path.dirname(self.streets_tmc), os.path.basename(self.streets_tmc))
field_defs = [
["ID", "DOUBLE"],
["TMC", "TEXT", "TMC", 9],
["EdgeFCID", "LONG"],
["EdgeFID", "LONG"],
["EdgeFrmPos", "DOUBLE"],
["EdgeToPos", "DOUBLE"]
]
arcpy.management.AddFields(self.streets_tmc, field_defs)
with arcpy.da.InsertCursor(self.streets_tmc, [f[0] for f in field_defs]) as cur:
for row in arcpy.da.SearchCursor(self.in_multinet.rd, ["ID", "RDSTMC"]):
id = row[0]
# The TMC field value comes from the last 9 characters of the RDSTMC field of the RD table.
rdstmc = row[1]
tmc = rdstmc[-9:]
try:
# Find the street record associated with this street profile record
edge_fid = self.streets_df.loc[np.int64(row[0])]["OID"]
except KeyError:
arcpy.AddWarning((
f"The Streets table is missing an entry with ID {id}, which is used in the RDS-TMC Information "
"(RD) historical traffic table."))
# Just skip this row and don't add it
continue
if rdstmc[0] == "+":
edge_from_pos = 0
edge_to_pos = 1
elif rdstmc[0] == "-":
edge_from_pos = 1
edge_to_pos = 0
else:
arcpy.AddWarning((
"The RDS-TMC Information (RD) historical traffic table has an invalid RDSTMC field value for "
f"ID {id}."
))
continue
cur.insertRow([id, tmc, self.fc_id, edge_fid, edge_from_pos, edge_to_pos])
@timed_exec
def _read_and_index_restrictions(self):
"""Read in the restrictions table and index it for quick lookups."""
arcpy.AddMessage("Reading and grouping restrictions table...")
where = f"VT IN ({', '.join([str(vt) for vt in self.vt_field_map])})"
fields = ["ID", "VT", "DIR_POS", "RESTRTYP"]
with arcpy.da.SearchCursor(self.in_multinet.rs, fields, where) as cur:
self.r_df = pd.DataFrame(cur, columns=fields)
# Cast the ID column from its original double to an explicit int64 so we can use it for indexing and lookups
self.r_df = self.r_df.astype({"ID": np.int64})
# Index the dataframe by ID for quick retrieval later, and sort the index to make those lookups even faster
self.r_df.set_index("ID", inplace=True)
self.r_df.sort_index(inplace=True)
@timed_exec
def _read_and_index_maneuver_paths(self):
"""Read in the maneuver paths table and index it for quick lookups."""
arcpy.AddMessage("Reading and grouping maneuver paths table...")
fields = ["ID", "TRPELID", "SEQNR"]
with arcpy.da.SearchCursor(self.in_multinet.mp, fields) as cur:
# Explicitly read it in using int64 to convert the double-based ID field for easy indexing and lookups
self.mp_df = pd.DataFrame(cur, columns=fields, dtype=np.int64)
# Index the dataframe by ID for quick retrieval later, and sort the index to make those lookups even faster
self.mp_df.set_index("ID", inplace=True)
self.mp_df.sort_index(inplace=True)
# Determine the max number of edges participating in a turn. This will be used when creating the turn feature
# class to initialize the proper number of fields.
self.max_turn_edges = int(self.mp_df["SEQNR"].max())
@timed_exec
def _read_and_index_historical_traffic(self):
"""Read and index historical traffic tables."""
if not self.include_historical_traffic:
# Sanity check
return None
fields = ["NETWORK_ID", "VAL_DIR", "SPWEEKDAY", "SPWEEKEND", "SPWEEK"]
with arcpy.da.SearchCursor(self.in_multinet.hsnp, fields, "VAL_DIR IN (2, 3)") as cur:
# Explicitly read it in using int64 to convert the double-based ID field for easy indexing and lookups
hsnp_df =
|
pd.DataFrame(cur, columns=fields, dtype=np.int64)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 23 19:41:56 2021
@author: u0117123
"""
#Import modules
import pandas as pd
import numpy as np
import sklearn
from sklearn.linear_model import LogisticRegression
#Input variables
Validation_Area="Tervuren"
#Referece objects with features path
refObjectPath = r'C:\Users\u0117123\Box Sync\FWO\WP1\Point-cloud-extractions\processing\5_Clustering_classification\Reference'
ClusteredObjectPath = r'C:\Users\u0117123\Box Sync\FWO\WP1\Point-cloud-extractions\processing\5_Clustering_classification\3SA\r10\RF_all'
#%% LOGISTIC REGRESSION MODEL
### STEP 1 ### IMPORT DATA
data_density_loop_all = pd.read_csv(refObjectPath + "\data_density_loop_Reference.csv", sep=";", index_col=(0))
data_density_loop = data_density_loop_all.loc[data_density_loop_all['location'] != Validation_Area]
data_density_loop['height7_1'] = data_density_loop['height7']/data_density_loop['height1']
data_density_loop['height7_2'] = data_density_loop['height7']/data_density_loop['height2']
data_density_loop['height5_1'] = data_density_loop['height5']/data_density_loop['height1']
data_density_loop['height10_2'] = data_density_loop['height10']/data_density_loop['height2']
data_density_loop['height10_1'] = data_density_loop['height10']/data_density_loop['height1']
columns_x = ["min_z", "max_z", "min_slope_rel", "max_slope_rel", "area",
"m_z_chm","m_nr_returns", "3D_dens","height7_1", "height5_1",
"height10_2", "height10_1", "height7_2"]
data_density_loop_x = data_density_loop[columns_x] #independent variables
data_density_loop_ground_p_density = data_density_loop[["ground_p_density"]]
data_density_loop_y = data_density_loop[["Type"]] #Response variable
#Convert response variable to binary values (shrub = 1; tree = 0)
shrub = ["shrub"]
data_density_loop_y["y"] = np.where(data_density_loop_y["Type"].isin(shrub), "1", "0")
data_density_loop_y = data_density_loop_y.drop(['Type'], axis=1)
# convert dataframe response variable to matrix
conv_arr = data_density_loop_y.values
y_array = conv_arr.ravel()
#%%## STEP 2 ### Check for correlations
import matplotlib.pyplot as plt
import seaborn as sns
# Create correlation matrix & selecting upper triangle
cor_matrix = data_density_loop_x.corr().abs()
plt.figure(figsize = (20,10)) # Size of the figure
sns.heatmap(data_density_loop_x.corr().abs(),annot = True)
plt.show()
upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape),k=1).astype(np.bool))
#print(upper_tri)
# Droping the column with correlation > 95%
to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > 0.95)] #height5_1, height10_1
#print(); print(to_drop)
data_density_loop_x_dropCorr = data_density_loop_x.drop(to_drop, axis=1)
#print(); print(data_density_loop_x_dropCorr.head())
#%%## STEP 3 ### Cross validation loop
#merge independent variables and dependent variable
data_density_loop_xy_dropCorr =
|
pd.concat([data_density_loop_x_dropCorr,data_density_loop_y], axis=1)
|
pandas.concat
|
"""
Prepare training and testing datasets as CSV dictionaries 2.0 (Further modification required for GBM)
Created on 04/26/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
# pair tiles of 20x, 10x, 5x of the same area
def paired_tile_ids_in(slide, label, root_dir, sldnum):
dira = os.path.isdir(root_dir + 'level0')
dirb = os.path.isdir(root_dir + 'level1')
dirc = os.path.isdir(root_dir + 'level2')
if dira and dirb and dirc:
fac = 500
ids = []
for level in range(3):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '_{}.png'.format(str(sldnum)) in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = int(re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0])
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 0]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 1]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 2]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
return idsa
# Get all svs images with its label as one file; level is the tile resolution level
def big_image_sum(pmd, path='../tiles/', dict_file='../tcia_pathology_slides.tsv',
ref_file='../gbm_all_subtype_collections.2019-10-13.tsv'):
refdict = {'low': 0, 'high': 1, False: 0, True: 1, 'normal': 0, 'short': 1, 'long': 2}
dct = pd.read_csv(dict_file, sep='\t', header=0)
# dct = dct.loc[dct['used_in_proteome'] == True]
ref = pd.read_csv(ref_file, sep='\t', header=0)
ref = ref.dropna(subset=[pmd])
ref[pmd] = ref[pmd].replace(refdict)
big_images = []
if pmd == 'telomere':
normalimg = intersection(ref.loc[ref[pmd] == 0]['case'].tolist(), dct['case_id'].tolist())
normalsld = dct[dct['case_id'].isin(normalimg)]['slide_id'].tolist()
shortimg = intersection(ref.loc[ref[pmd] == 1]['case'].tolist(), dct['case_id'].tolist())
shortsld = dct[dct['case_id'].isin(shortimg)]['slide_id'].tolist()
longimg = intersection(ref.loc[ref[pmd] == 2]['case'].tolist(), dct['case_id'].tolist())
longsld = dct[dct['case_id'].isin(longimg)]['slide_id'].tolist()
for i in normalsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 0, path + "{}/".format(pctnum), sldnum])
for i in shortsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 1, path + "{}/".format(pctnum), sldnum])
for i in longsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 2, path + "{}/".format(pctnum), sldnum])
else:
negimg = intersection(ref.loc[ref[pmd] == 0]['case'].tolist(), dct['case_id'].tolist())
negsld = dct[dct['case_id'].isin(negimg)]['slide_id'].tolist()
posimg = intersection(ref.loc[ref[pmd] == 1]['case'].tolist(), dct['case_id'].tolist())
possld = dct[dct['case_id'].isin(posimg)]['slide_id'].tolist()
for i in negsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 0, path + "{}/".format(pctnum), sldnum])
for i in possld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 1, path + "{}/".format(pctnum), sldnum])
datapd = pd.DataFrame(big_images, columns=['slide', 'label', 'path', 'sldnum'])
return datapd
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cls, cut=0.3, batchsize=24):
trlist = []
telist = []
valist = []
CPTAC = alll
for i in range(cls):
subset = CPTAC.loc[CPTAC['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut / 2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq) * cut / 2):int(len(unq) * cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
test =
|
pd.concat(telist)
|
pandas.concat
|
"""
Prepare model outputs from SAC SMA to processing with the FFC
<NAME>, UC Davis, 2021
"""
import glob
import pandas as pd
from datetime import datetime as dt
files = glob.glob('data_inputs/SAC_SMA_outputs/*')
for file in files:
df = pd.read_table(file, sep=" ")
year = df['Year']
month = df['Month']
day = df['Day']
new_date_col = pd.Series(index=range(len(year)), name='date')
for index, date in enumerate(day):
date_string = str(day[index]) + '-' + str(month[index]) + '-' + str(year[index])
dt_obj = dt.strptime(date_string, '%d-%m-%Y')
dt_string = dt_obj.strftime('%m/%d/%Y')
new_date_col[index] = dt_string
flow = df['Flow_cfs']
ffc_file =
|
pd.concat([new_date_col, flow], axis=1)
|
pandas.concat
|
import pandas as pd
import numpy as np
import copy
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.feature_selection import mutual_info_classif, SelectKBest
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from datetime import datetime
from os import listdir
from os.path import isfile, join
import sys
import math
from sklearn.metrics import accuracy_score, f1_score
import re
from Extractor import get_word_length_matrix, get_word_length_matrix_with_interval, get_average_word_length, \
get_word_length_matrix_with_margin, get_char_count, get_digits, get_sum_digits, get_word_n_grams, \
get_char_affix_n_grams, get_char_word_n_grams, get_char_punct_n_grams, get_pos_tags_n_grams, get_bow_matrix, \
get_yules_k, get_special_char_matrix, get_function_words, get_pos_tags, get_sentence_end_start, \
get_flesch_reading_ease_vector, get_sentence_count, get_word_count
from sklearn.preprocessing import StandardScaler, Normalizer
# Chapter 7.1.1. method to trim a feature with low sum e.g. ngrams lower then 5
def trim_df_sum_feature(par_df, par_n):
par_df = par_df.fillna(value=0)
columns = par_df.columns.to_numpy()
data_array = par_df.to_numpy(dtype=float)
sum_arr = data_array.sum(axis=0)
# reduce n if 0 features would be returned
while len(par_df.columns) - len(np.where(sum_arr < par_n)[0]) == 0:
par_n -= 1
positions = list(np.where(sum_arr < par_n))
columns = np.delete(columns, positions)
data_array = np.delete(data_array, positions, axis=1)
return pd.DataFrame(data=data_array, columns=columns)
# Chapter 7.1.1. method to trim feature with low occurrence over all article
def trim_df_by_occurrence(par_df, n):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum()
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] > n:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.1. Process of filtering the data with low occurrence and save the filtered features in a new file
def filter_low_occurrence():
df_bow = pd.read_csv("daten/raw/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_occurrence(df_bow, 1)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/2_filter_low_occurrence/bow.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/raw/word_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_occurrence(word_n_gram, 1)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", index=False)
for n in range(2, 6):
char_affix_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_affix_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_affix_{n}_gram before: {len(char_affix_n_gram.columns)}")
char_affix_n_gram = trim_df_sum_feature(char_affix_n_gram, 5)
print(f"char_affix_{n}_gram after: {len(char_affix_n_gram.columns)}")
char_affix_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_affix_{n}_gram.csv", index=False)
char_word_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_word_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_word_{n}_gram before: {len(char_word_n_gram.columns)}")
char_word_n_gram = trim_df_sum_feature(char_word_n_gram, 5)
print(f"char_word_{n}_gram after: {len(char_word_n_gram.columns)}")
char_word_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_word_{n}_gram.csv", index=False)
char_punct_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_punct_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_punct_{n}_gram before: {len(char_punct_n_gram.columns)}")
char_punct_n_gram = trim_df_sum_feature(char_punct_n_gram, 5)
print(f"char_punct_{n}_gram after: {len(char_punct_n_gram.columns)}")
char_punct_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_punct_{n}_gram.csv", index=False)
df_f_word = pd.read_csv("daten/raw/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Words before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_occurrence(df_f_word, 1)
print(f"Function Words after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/2_filter_low_occurrence/function_words.csv", index=False)
for n in range(2, 6):
pos_tags_n_gram = pd.read_csv(f"daten/raw/pos_tag_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"pos_tag_{n}_gram before: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram = trim_df_by_occurrence(pos_tags_n_gram, 1)
print(f"pos_tag_{n}_gram after: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram.to_csv(f"daten/2_filter_low_occurrence/pos_tag_{n}_gram.csv", index=False)
# Chapter 7.1.2. method to filter words based on document frequency
def trim_df_by_doc_freq(par_df, par_doc_freq):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum() / len(par_df)
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] < par_doc_freq:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.2 Process of filtering the data with high document frequency and save the filtered features in a new file
def filter_high_document_frequency():
# Filter words with high document frequency
df_bow = pd.read_csv("daten/2_filter_low_occurrence/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/3_fiter_high_frequency/bow.csv", index=False)
df_f_word = pd.read_csv("daten/2_filter_low_occurrence/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Word before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_doc_freq(df_f_word, 0.5)
print(f"Function Word after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/3_fiter_high_frequency/function_words.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", sep=',', encoding="utf-8",
nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_doc_freq(word_n_gram, 0.5)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/3_fiter_high_frequency/word_{n}_gram.csv", index=False)
# Chapter 7.1.4. get the relative frequency based on a length metric (char, word, sentence)
def get_rel_frequency(par_df_count, par_df_len_metric_vector):
df_rel_freq = pd.DataFrame(columns=par_df_count.columns)
for index, row in par_df_count.iterrows():
df_rel_freq = df_rel_freq.append(row.div(par_df_len_metric_vector[index]))
return df_rel_freq
# Chapter 7.1.4. whole process of the chapter. Get the individual relative frequency of a feature and compare
# the correlation to the article length from the absolute and relative feature, save the feature with the estimated
# relative frequency in a new file
def individual_relative_frequency():
df_len_metrics = pd.read_csv(f"daten/1_raw/length_metrics.csv", sep=',', encoding="utf-8", nrows=2500)
# different metrics for individual relative frequencies
metrics = ['word_count', 'char_count', 'sentence_count']
for m in metrics:
# The csv is placed in a folder based on the metric for the individual relative frequency
path = f'daten/4_relative_frequency/{m}'
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
x = pd.read_csv(f"daten/4_relative_frequency/{m}/{f}",
sep=',', encoding="utf-8", nrows=2500).fillna(value=0)
x_rel = get_rel_frequency(x, df_len_metrics[m])
# Save the CSV with relative frequency
x_rel.to_csv(
f"daten/4_relative_frequency/{f.split('.')[0]}"
f"_rel.csv", index=False)
# Correlation is always between the metrics and the word_count
x['word_count'] = df_len_metrics['word_count']
x_rel['word_count'] = df_len_metrics['word_count']
# only on the test data 60/40 split
x_train, x_test = train_test_split(x, test_size=0.4, random_state=42)
x_train_rel, x_test_rel = train_test_split(x_rel, test_size=0.4, random_state=42)
# Calculate the median correlation
print(f"{f}_abs: {x_train.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
print(f"{f}_rel: {x_train_rel.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
# Chapter 7.2.1 First step of the iterative filter: Rank the features
def sort_features_by_score(par_x, par_y, par_select_metric):
# Get a sorted ranking of all features by the selected metric
selector = SelectKBest(par_select_metric, k='all')
selector.fit(par_x, par_y)
# Sort the features by their score
return pd.DataFrame(dict(feature_names=par_x.columns, scores=selector.scores_)).sort_values('scores',
ascending=False)
# Chapter 7.2.1 method to get the best percentile for GNB
def get_best_percentile_gnb(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
gnb = GaussianNB()
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# GNB Training
result_list.append(
cross_val_score(gnb, x_new_training, par_y_train, cv=cv, n_jobs=-1, scoring='accuracy').mean())
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"GNB Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 0.5% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for SVC
def get_best_percentile_svc(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
# Parameter for SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# SVC Test
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"SVC Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for KNN
def get_best_percentile_knn(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# Parameter for KNN
# Some Values from 3 to square of samples
neighbors = [i for i in range(3, int(math.sqrt(len(x_new_training.index))), 13)]
neighbors += [1, 3, 5, 11, 19, 36]
if int(math.sqrt(len(feature_list))) not in neighbors:
neighbors.append(int(math.sqrt(len(x_new_training.index))))
# Not more neighbors then samples-2
neighbors = [x for x in neighbors if x < len(x_new_training.index) - 2]
# remove duplicates
neighbors = list(set(neighbors))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN Training
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"KNN Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y >= best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 Filter the feature based on the estimated best percentile and save it into a new file
def print_filter_feature_percentile(par_path, par_df_sorted_features, par_percent, par_x, par_file_name):
# select the 1 percent of the features (len/100) multiplied by par_best_percent
number_features = round(par_percent * (len(par_x.columns) / 100))
# If the 1st percent is less then 1
number_features = 1 if number_features < 1 else number_features
feature_list = par_df_sorted_features['feature_names'][:number_features].tolist()
# print the name of the features in a file
original_stdout = sys.stdout
with open(f'{par_path}selected_features/{par_file_name}_filtered.txt', 'w', encoding="utf-8") as f:
sys.stdout = f
print(f"Features: {len(feature_list)}")
print(f"{feature_list}")
sys.stdout = original_stdout
# select the best features from the original dataset
par_x[feature_list].to_csv(f"{par_path}csv_after_filter/{par_file_name}_filtered.csv", index=False)
# Chapter 7.2.1 Complete process of the iterative Filter
def iterative_filter_process(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb, best_round_gnb, result_list_gnb = get_best_percentile_gnb(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_knn, best_round_knn, result_list_knn = get_best_percentile_knn(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_gnb, x, "gnb_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, "svc_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_knn, x, "knn_" + filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as f:
sys.stdout = f
print(f"best_perc_gnb: ({best_perc_gnb}|{result_list_gnb[best_round_gnb]})\n"
f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n"
f"best_perc_knn: ({best_perc_knn}|{result_list_knn[best_round_knn]})")
sys.stdout = original_stdout
# draw diagram
len_list = [len(result_list_gnb), len(result_list_svc), len(result_list_knn)]
plt.plot([i * step_perc for i in range(1, len(result_list_gnb) + 1)], result_list_gnb, 'r-', label="gnb")
plt.plot(best_perc_gnb, result_list_gnb[best_round_gnb], 'rx')
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.plot([i * step_perc for i in range(1, len(result_list_knn) + 1)], result_list_knn, 'b-', label="knn")
plt.plot(best_perc_knn, result_list_knn[best_round_knn], 'bx')
plt.axis([step_perc, (max(len_list) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, max(len_list) + 1)], columns=['percent'])
df_gnb = pd.DataFrame(data=result_list_gnb, columns=['gnb'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_knn = pd.DataFrame(data=result_list_knn, columns=['knn'])
df_accuracy = pd.concat([df_percent, df_gnb, df_svc, df_knn], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 8.1. and later, basically the process of the iterative filter only with the svc classifier
def iterative_filter_process_svm(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for svc
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as out_f:
sys.stdout = out_f
print(f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n")
sys.stdout = original_stdout
# draw diagram
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.axis([step_perc, (len(result_list_svc) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, len(result_list_svc) + 1)], columns=['percent'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_accuracy = pd.concat([df_percent, df_svc], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 7.2.1. Get the accuracy of the features before the iterative filter, results in table 18
def get_accuracy_before_iterative_filter():
gnb_result_list, svc_result_list, knn_result_list, gnb_time_list, svc_time_list, knn_time_list \
= [], [], [], [], [], []
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
path = f'daten/5_iterative_filter/csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# Get the feature names for the table
feature_list = [re.search("(.+?(?=_rel))", f).group(1) for f in files]
for f in files:
print(f)
x = pd.read_csv(f"daten/5_iterative_filter/csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
gnb_result_list.append(score)
gnb_time_list.append(time_taken)
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC time for {f}: {time_taken}")
svc_result_list.append(score)
svc_time_list.append(time_taken)
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN time for {f}: {time_taken}")
knn_result_list.append(score)
knn_time_list.append(time_taken)
# create dataframe with the scores and times
df_results = pd.DataFrame()
df_results['feature'] = feature_list
df_results['score_gnb'] = gnb_result_list
df_results['time_gnb'] = gnb_time_list
df_results['score_svc'] = svc_result_list
df_results['time_svc'] = svc_time_list
df_results['score_knn'] = knn_result_list
df_results['time_knn'] = knn_time_list
return df_results
# Chapter 7.2.1. Get the accuracy of the features after the iterative filter, results in table 18
def get_accuracy_after_iterative_filter():
df_gnb_result = pd.DataFrame(columns=['feature', 'score_gnb', 'time_gnb'])
df_svc_result = pd.DataFrame(columns=['feature', 'score_svc', 'time_svc'])
df_knn_result = pd.DataFrame(columns=['feature', 'score_knn', 'time_knn'])
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
# path = f'daten/5_iterative_filter/csv_after_filter'
path = f'daten/5_iterative_filter/5_iterative_filter/csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
for f in files:
print(f)
# Get the feature name for the table
feature = re.search(".{4}(.+?(?=_rel))", f).group(1)
# x = pd.read_csv(f"daten/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x = pd.read_csv(f"daten/5_iterative_filter/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8",
nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# Select the classifier by the start of the filename
if f.split("_")[0] == "gnb":
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
df_gnb_result = df_gnb_result.append(pd.DataFrame(data={'feature': [feature], 'score_gnb': [score],
'time_gnb': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "svc":
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC training time for {f}: {time_taken}")
df_svc_result = df_svc_result.append(pd.DataFrame(data={'feature': [feature], 'score_svc': [score],
'time_svc': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "knn":
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
start_time = datetime.now()
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN test time for {f}: {time_taken}")
df_knn_result = df_knn_result.append(pd.DataFrame(data={'feature': [feature], 'score_knn': [score],
'time_knn': [time_taken]}), ignore_index=True)
df_merge = pd.merge(df_gnb_result, df_knn_result, on="feature", how='outer')
df_merge = pd.merge(df_merge, df_svc_result, on="feature", how='outer')
return df_merge
# Get n article for a given number of authors. Required for setups with different numbers of authors and article
def get_n_article_by_author(par_df, par_label_count, par_article_count):
df_articles = pd.DataFrame(columns=['label_encoded', 'text'])
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if list_article_count[labels.index(row['label_encoded'])] != 0:
d = {'label_encoded': [row['label_encoded']], 'text': [row['text']]}
df_articles = df_articles.append(pd.DataFrame.from_dict(d), ignore_index=True)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return df_articles
# Return indices for n article for a given number of authors. Required for setups with different
# numbers of authors and article
def get_n_article_index_by_author(par_df, par_label_count, par_article_count):
index_list = []
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if row['label_encoded'] in labels:
if list_article_count[labels.index(row['label_encoded'])] != 0:
index_list.append(index)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return index_list
# Method to estimate the f1 score of the test data for GNB
def get_f1_for_gnb(par_x_train, par_x_test, par_y_train, par_y_test):
gnb = GaussianNB()
# GNB fit
gnb.fit(par_x_train, par_y_train)
# score on test data
gnb_score = f1_score(gnb.predict(par_x_test), par_y_test, average='micro')
return gnb_score
# Method to estimate the f1 score of the test data for SVC
def get_f1_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = f1_score(svc.predict(par_x_test), par_y_test, average='micro')
return svc_score
# Method to estimate the f1 score of the test data for KNN
def get_f1_for_knn(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# define param grid for knn, neighbors has the be lower than samples
neighbors = [1, 3, 5, 11, 19, 36, 50]
# number of neighbors must be less than number of samples
neighbors = [x for x in neighbors if x < len(par_x_test)]
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(par_x_train, par_y_train)
# predict test data
knn_score = f1_score(knn.predict(par_x_test), par_y_test, average='micro')
return knn_score
# Method to estimate the accuracy of the test data for SVC
def get_accuracy_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = accuracy_score(svc.predict(par_x_test), par_y_test)
return svc_score
# Chapter 7.3.1. comparison of the word length feature alternatives
def compare_word_length_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'wl_matrix_gnb': [], 'wl_matrix_svc': [], 'wl_matrix_knn': [],
'wl_matrix_bins_20_30_gnb': [], 'wl_matrix_bins_20_30_svc': [], 'wl_matrix_bins_20_30_knn': [],
'wl_matrix_bins_10_20_gnb': [], 'wl_matrix_bins_10_20_svc': [], 'wl_matrix_bins_10_20_knn': [],
'wl_matrix_20_gnb': [], 'wl_matrix_20_svc': [], 'wl_matrix_20_knn': [],
'wl_avg_gnb': [], 'wl_avg_svc': [], 'wl_avg_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
word_count = get_word_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["wl_matrix", "wl_matrix_bins_20_30", "wl_matrix_bins_10_20", "wl_avg", "wl_matrix_20"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "wl_matrix":
x = get_rel_frequency(get_word_length_matrix(df_article).fillna(value=0), word_count['word_count'])
elif feature == "wl_matrix_bins_20_30":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 20, 30).fillna(value=0),
word_count['word_count'])
elif feature == "wl_matrix_bins_10_20":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 10, 20).fillna(value=0),
word_count['word_count'])
elif feature == "wl_avg":
x = get_average_word_length(df_article)
elif feature == "wl_matrix_20":
x = get_word_length_matrix_with_margin(df_article, 20)
# Scale the data, else high counter in wl_matrix can dominate and hyperparameter optimization for svc
# takes a while because of small differences from average
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.2. comparison of the digit feature alternatives
def compare_digit_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'digit_sum_gnb': [], 'digit_sum_svc': [], 'digit_sum_knn': [],
'digits_gnb': [], 'digits_svc': [], 'digits_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
char_count = get_char_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["digit_sum", "digits"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "digit_sum":
x = get_rel_frequency(get_sum_digits(df_article).fillna(value=0), char_count['char_count'])
elif feature == "digits":
x = get_rel_frequency(get_digits(df_article).fillna(value=0), char_count['char_count'])
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 4-6
def compare_word_4_6_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w4g_gnb': [], 'w4g_svc': [], 'w4g_knn': [],
'w5g_gnb': [], 'w5g_svc': [], 'w5g_knn': [],
'w6g_gnb': [], 'w6g_svc': [], 'w6g_knn': []}
# load the data
df_w4g = pd.read_csv("daten/6_feature_analysis/input_data/word_4_gram_rel.csv", sep=',', encoding="utf-8")
df_w5g = pd.read_csv("daten/6_feature_analysis/input_data/word_5_gram_rel.csv", sep=',', encoding="utf-8")
df_w6g = pd.read_csv("daten/6_feature_analysis/input_data/word_6_gram_rel.csv", sep=',', encoding="utf-8")
for author_texts in list_author_texts:
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_all_texts, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# Get the scores for every feature
for feature in ["w4g", "w5g", "w6g"]:
# select the indices from the article rows by the given indices
if feature == "w4g":
x = df_w4g.iloc[index_list]
elif feature == "w5g":
x = df_w5g.iloc[index_list]
elif feature == "w6g":
x = df_w6g.iloc[index_list]
# Delete features which only occur once
x = trim_df_by_occurrence(x, 1)
# reset the indices to have a order from 0 to authors * text per author - 1
x = x.reset_index(drop=True)
y = df_all_texts.iloc[index_list]['label_encoded']
y = y.reset_index(drop=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 2-3
def compare_word_2_3_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w2g_gnb': [], 'w2g_svc': [], 'w2g_knn': [],
'w3g_gnb': [], 'w3g_svc': [], 'w3g_knn': []}
for author_texts in list_author_texts:
print(f"Texte pro Autor: {author_texts}")
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_balanced, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# select the indices from the article rows by the given indices
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
print(f"Artikel: {len(df_balanced.index)}")
# extract the features
df_w2g = get_word_n_grams(df_balanced, 2)
df_w3g = get_word_n_grams(df_balanced, 3)
# Preprocessing steps
word_count = get_word_count(df_balanced)
df_w2g = preprocessing_steps_pos_tag_n_grams(df_w2g, word_count['word_count'])
df_w3g = preprocessing_steps_pos_tag_n_grams(df_w3g, word_count['word_count'])
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_w2g[df_w2g.columns] = scaler.fit_transform(df_w2g[df_w2g.columns])
df_w3g[df_w3g.columns] = scaler.fit_transform(df_w3g[df_w3g.columns])
label = df_balanced['label_encoded']
# Train/Test 60/40 split
df_w2g_train, df_w2g_test, df_w3g_train, df_w3g_test, label_train, label_test = \
train_test_split(df_w2g, df_w3g, label, test_size=0.4, random_state=42, stratify=label)
# Get the scores for every feature
for feature in ["w2g", "w3g"]:
# select the indices from the article rows by the given indices
# iterative filter
# returns df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
if feature == "w2g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w2g_train, df_w2g_test, label_train, 1.0, mutual_info_classif)
elif feature == "w3g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w3g_train, df_w3g_test, label_train, 1.0, mutual_info_classif)
# Do not use iterative filter for gnb train caused by bad results
x_gnb_train, x_gnb_test, label_train, label_test = \
train_test_split(df_w3g, label, test_size=0.4, random_state=42, stratify=label)
print(f"cv: {cv}")
print(f"Train Labels: {label_train.value_counts()}")
print(f"Test Labels: {label_test.value_counts()}")
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.4. comparison of the different lengths of char ngrams
# Chapter 7.3.4. whole process of the comparison of the char-n-gram features
def compare_char_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
extract_n_gram_features_to_csv(df_balanced, par_base_path, number_authors, number_texts)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
compare_char_affix_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_affix_n_grams.csv", index=False)
compare_char_word_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_word_n_grams.csv", index=False)
compare_char_punct_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_punct_n_grams.csv", index=False)
# Chapter 7.3.4. char-affix-ngrams
def compare_char_affix_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_affix_2_gnb': [], 'c_affix_2_svc': [], 'c_affix_2_knn': [],
'c_affix_3_gnb': [], 'c_affix_3_svc': [], 'c_affix_3_knn': [],
'c_affix_4_gnb': [], 'c_affix_4_svc': [], 'c_affix_4_knn': [],
'c_affix_5_gnb': [], 'c_affix_5_svc': [], 'c_affix_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_affix_2", "c_affix_3", "c_affix_4", "c_affix_5"]:
# read the data based on n, texts and authors
if feature == "c_affix_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
elif feature == "c_affix_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
elif feature == "c_affix_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
elif feature == "c_affix_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-word-ngrams
def compare_char_word_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_word_2_gnb': [], 'c_word_2_svc': [], 'c_word_2_knn': [],
'c_word_3_gnb': [], 'c_word_3_svc': [], 'c_word_3_knn': [],
'c_word_4_gnb': [], 'c_word_4_svc': [], 'c_word_4_knn': [],
'c_word_5_gnb': [], 'c_word_5_svc': [], 'c_word_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_word_2", "c_word_3", "c_word_4", "c_word_5"]:
# read the data based on n, texts and authors
if feature == "c_word_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
elif feature == "c_word_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
elif feature == "c_word_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
elif feature == "c_word_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-punct-ngrams
def compare_char_punct_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_punct_2_gnb': [], 'c_punct_2_svc': [], 'c_punct_2_knn': [],
'c_punct_3_gnb': [], 'c_punct_3_svc': [], 'c_punct_3_knn': [],
'c_punct_4_gnb': [], 'c_punct_4_svc': [], 'c_punct_4_knn': [],
'c_punct_5_gnb': [], 'c_punct_5_svc': [], 'c_punct_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_punct_2", "c_punct_3", "c_punct_4", "c_punct_5"]:
# read the data based on n, texts and authors
if feature == "c_punct_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
elif feature == "c_punct_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
elif feature == "c_punct_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
elif feature == "c_punct_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. Print the char-n-gram features in different files
def extract_n_gram_features_to_csv(par_df, par_base_path, par_number_authors, par_number_texts):
char_count = get_char_count(par_df)
# n from 2-5
for n in range(2, 6):
ca_ng = get_char_affix_n_grams(par_df, n)
preprocessing_steps_char_n_grams(ca_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_affix_{n}_gram.csv", index=False)
cw_ng = get_char_word_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cw_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_word_{n}_gram.csv", index=False)
cp_ng = get_char_punct_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cp_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_punct_{n}_gram.csv", index=False)
print(f"Extraction Round {n - 1} done")
return True
# combined preprocessing steps of the pos-tag-n-grams
def preprocessing_steps_pos_tag_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_by_occurrence(par_feature, 1)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# combined preprocessing steps of the char-n-grams
def preprocessing_steps_char_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_sum_feature(par_feature, 5)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# Feature selection with the iterative filter without printing the results in a file
def feature_selection_iterative_filter(par_x_train, par_x_test, par_y_train, par_step, par_classif):
df_sorted_features = sort_features_by_score(par_x_train, par_y_train, par_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb = get_best_percentile_gnb(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_svc = get_best_percentile_svc(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_knn = get_best_percentile_knn(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
# select the 1 percent of the features (len/100) multiplied by par_best_percent
# select the best features from the original dataset
df_x_train_gnb = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_gnb = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_svc = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_svc = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_knn = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_knn = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
return df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
# Chapter 7.3.5. function to compare the pos-tag-n-grams
def compare_pos_tag_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'pos_2_gnb': [], 'pos_2_svc': [], 'pos_2_knn': [],
'pos_3_gnb': [], 'pos_3_svc': [], 'pos_3_knn': [],
'pos_4_gnb': [], 'pos_4_svc': [], 'pos_4_knn': [],
'pos_5_gnb': [], 'pos_5_svc': [], 'pos_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["pos_2", "pos_3", "pos_4", "pos_5"]:
# read the data based on n, texts and authors
if feature == "pos_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
elif feature == "pos_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
elif feature == "pos_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
elif feature == "pos_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.5. complete process of the pos-tag-n-grams comparison
def compare_pos_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
word_count = get_word_count(df_balanced)
# extract features and preprocessing
for n in range(2, 6):
pt_ng = get_pos_tags_n_grams(df_balanced, n)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_before_filter/"
f"a{number_authors}_t{number_texts}_pos_tag_{n}_gram.csv", index=False)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
# 2 grams for svc get not filtered, overwrite unfiltered for svc
pt_ng = get_pos_tags_n_grams(df_balanced, 2)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_after_filter/"
f"svc_a{number_authors}_t{number_texts}_pos_tag_2_gram_filtered.csv", index=False)
compare_pos_tag_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/pos_tag_n_grams.csv", index=False)
# Method to print all features for different counts of authors and texts
# Including all Preprocessing steps and filtering
def print_all_features_svc(par_base_path, par_article_path):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
author_counts = [2, 3, 4, 5, 10, 15, 25]
text_counts = [5, 10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# get all the features
df_bow = get_bow_matrix(df_balanced)
df_word_2g = get_word_n_grams(df_balanced, 2)
df_word_count = get_word_count(df_balanced)
df_word_length = get_word_length_matrix_with_margin(df_balanced, 20)
df_yules_k = get_yules_k(df_balanced)
sc_label_vector = ["!", "„", "“", "§", "$", "%", "&", "/", "(", ")", "=", "?", "{", "}", "[", "]", "\\",
"@", "#",
"‚", "‘", "-", "_", "+", "*", ".", ",", ";"]
special_char_matrix = get_special_char_matrix(df_balanced, sc_label_vector)
sc_label_vector = ["s_char:" + sc for sc in sc_label_vector]
df_special_char = pd.DataFrame(data=special_char_matrix, columns=sc_label_vector)
df_char_affix_4g = get_char_affix_n_grams(df_balanced, 4)
df_char_word_3g = get_char_word_n_grams(df_balanced, 3)
df_char_punct_3g = get_char_punct_n_grams(df_balanced, 3)
df_digits = get_sum_digits(df_balanced)
df_fwords = get_function_words(df_balanced)
df_pos_tags = get_pos_tags(df_balanced)
df_pos_tag_2g = get_pos_tags_n_grams(df_balanced, 2)
df_start_pos, df_end_pos = get_sentence_end_start(df_balanced)
df_start_end_pos = pd.concat([df_start_pos, df_end_pos], axis=1)
df_fre = get_flesch_reading_ease_vector(df_balanced)
# 7.1.1 Remove low occurrence
df_bow = trim_df_by_occurrence(df_bow, 1)
df_word_2g = trim_df_by_occurrence(df_word_2g, 1)
df_fwords = trim_df_by_occurrence(df_fwords, 1)
df_pos_tag_2g = trim_df_by_occurrence(df_pos_tag_2g, 1)
df_char_affix_4g = trim_df_sum_feature(df_char_affix_4g, 5)
df_char_word_3g = trim_df_sum_feature(df_char_word_3g, 5)
df_char_punct_3g = trim_df_sum_feature(df_char_punct_3g, 5)
# 7.1.2 Remove high frequency
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
df_word_2g = trim_df_by_doc_freq(df_word_2g, 0.5)
df_fwords = trim_df_by_doc_freq(df_fwords, 0.5)
# 7.1.4 individual relative frequency
df_len_metrics = pd.concat([get_char_count(df_balanced), get_sentence_count(df_balanced),
df_word_count], axis=1)
df_bow = get_rel_frequency(df_bow.fillna(value=0), df_len_metrics['word_count'])
df_word_2g = get_rel_frequency(df_word_2g.fillna(value=0), df_len_metrics['word_count'])
df_word_length = get_rel_frequency(df_word_length.fillna(value=0), df_len_metrics['word_count'])
df_special_char = get_rel_frequency(df_special_char.fillna(value=0), df_len_metrics['char_count'])
df_char_affix_4g = get_rel_frequency(df_char_affix_4g.fillna(value=0), df_len_metrics['char_count'])
df_char_word_3g = get_rel_frequency(df_char_word_3g.fillna(value=0), df_len_metrics['char_count'])
df_char_punct_3g = get_rel_frequency(df_char_punct_3g.fillna(value=0), df_len_metrics['char_count'])
df_digits = get_rel_frequency(df_digits.fillna(value=0), df_len_metrics['char_count'])
df_fwords = get_rel_frequency(df_fwords.fillna(value=0), df_len_metrics['word_count'])
df_pos_tags = get_rel_frequency(df_pos_tags.fillna(value=0), df_len_metrics['word_count'])
df_pos_tag_2g = get_rel_frequency(df_pos_tag_2g.fillna(value=0), df_len_metrics['word_count'])
df_start_end_pos = get_rel_frequency(df_start_end_pos.fillna(value=0), df_len_metrics['sentence_count'])
# Print to CSV
# Files for iterative filter
df_bow.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}_bow.csv", index=False)
df_word_2g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_word_2_gram.csv", index=False)
df_char_affix_4g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_affix_4_gram.csv", index=False)
df_char_word_3g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_word_3_gram.csv", index=False)
df_char_punct_3g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_punct_3_gram.csv", index=False)
df_fwords.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_function_words.csv", index=False)
# Files not for iterative filter directly in after filter folder
df_word_count.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_word_count.csv", index=False)
df_word_length.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_word_length.csv", index=False)
df_yules_k.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_yules_k.csv", index=False)
df_special_char.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_special_char.csv", index=False)
df_digits.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_digits.csv", index=False)
df_pos_tags.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag.csv", index=False)
df_pos_tag_2g.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram.csv", index=False)
df_start_end_pos.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag_start_end.csv", index=False)
df_fre.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}_fre.csv", index=False)
print(f"Extraction for {number_authors} authors with {number_texts} texts done. Starting iterative filter")
# Run the iterative filter
iterative_filter_process_svm(par_base_path, df_balanced, number_texts, number_authors)
# create a dataframe with the combined features for a specific number of authors and texts
# features can be excluded by name
def create_df_combined_features(par_path, par_num_texts, par_num_authors, par_exclude):
path = f'{par_path}csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
# exclude a feature by regex
regex = re.compile(f'.*{par_exclude}')
files = [i for i in files if not regex.match(i)]
df_all = pd.DataFrame()
# combine all features
for feature in files:
df_feature = pd.read_csv(f"{par_path}csv_after_filter/{feature}", sep=',', encoding="utf-8")
df_all = pd.concat([df_all, df_feature], axis=1)
return df_all
# Chapter 8.4. comparison of normalization and standardization
def compare_normalization_standardization(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_f1_results = {'without': [], 'standard': [], 'normal': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
df_features = create_df_combined_features(par_feature_path, number_texts, number_authors, "nothing")
# standardization of features
df_features_stand = copy.deepcopy(df_features)
scaler = StandardScaler()
df_features_stand[df_features_stand.columns] = \
scaler.fit_transform(df_features_stand[df_features_stand.columns])
# normalization of features
df_features_norm = copy.deepcopy(df_features)
normalizer = Normalizer()
df_features_norm[df_features_norm.columns] = \
normalizer.fit_transform(df_features_norm[df_features_norm.columns])
x_train, x_test, x_train_stand, x_test_stand, x_train_norm, x_test_norm, label_train, label_test = \
train_test_split(df_features, df_features_stand, df_features_norm, label,
test_size=0.4, random_state=42, stratify=label)
# append the results
dic_f1_results['without'].append(get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
dic_f1_results['standard'].append(get_f1_for_svc(x_train_stand, x_test_stand, label_train,
label_test, cv))
dic_f1_results['normal'].append(get_f1_for_svc(x_train_norm, x_test_norm, label_train,
label_test, cv))
print(f"Scores for {number_authors} authors with {number_texts} texts created.")
return pd.DataFrame(dic_f1_results)
# Chapter 8.5.1. Comparison of the individual features, data for table 21
def compare_single_features(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_results = {'number_authors': [], 'number_texts': []}
path = f'{par_feature_path}csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# get unique values for the list of the features
feature_list = list(set([re.search(r"a\d+_t\d+_(.+?(?=$))", f).group(1) for f in files]))
for feature in feature_list:
dic_results[feature] = []
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_results['number_authors'].append(number_authors)
dic_results['number_texts'].append(number_texts)
for feature in feature_list:
df_feature = pd.read_csv(
f"{par_feature_path}csv_after_filter/a{number_authors}_t{number_texts}_{feature}")
# standardization of features
scaler = StandardScaler()
df_feature[df_feature.columns] = \
scaler.fit_transform(df_feature[df_feature.columns])
x_train, x_test, label_train, label_test = \
train_test_split(df_feature, label, test_size=0.4, random_state=42, stratify=label)
dic_results[feature].append(
get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
print(f"Scores for {number_authors} authors with {number_texts} texts created.")
return pd.DataFrame(dic_results)
# Chapter 8.5.2. Get the values of the difference functions, data for table 22
def get_feature_function_difference(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts =
|
pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
|
pandas.read_csv
|
import joblib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from reports import mdl_results
from rolldecayestimators import logarithmic_decrement
from rolldecayestimators import lambdas
from sklearn.pipeline import Pipeline
from rolldecayestimators import measure
from rolldecayestimators.direct_estimator_cubic import EstimatorQuadraticB, EstimatorCubic, EstimatorQuadratic
def get_models_zero_speed():
mask = mdl_results.df_rolldecays['ship_speed'] == 0
df_rolldecays_zero = mdl_results.df_rolldecays.loc[mask].copy()
return _get_models(df=df_rolldecays_zero)
def get_models_speed():
mask = mdl_results.df_rolldecays['ship_speed'] > 0
df_rolldecays = mdl_results.df_rolldecays.loc[mask].copy()
return _get_models(df=df_rolldecays)
def get_models():
df = mdl_results.df_rolldecays
return _get_models(df=df)
def _get_models(df):
models = {}
for id, row in df.iterrows():
model_file_path = '../../models/KVLCC2_%i.pkl' % id
models[id] = joblib.load(model_file_path)['estimator']
return models
def gather_results(models):
df_results = pd.DataFrame()
for id, model in models.items():
row = mdl_results.df_rolldecays.loc[id]
scale_factor = row.scale_factor
meta_data = {
'Volume' : row.Volume/(scale_factor**3),
'rho' : row.rho,
'g' : row.g,
'GM' : row.gm/scale_factor,
}
results = model.result_for_database(meta_data=meta_data)
results =
|
pd.Series(results, name=id)
|
pandas.Series
|
import numpy as np
import pandas as pd
import pytest
@pytest.mark.functions
def test_change_type(dataframe):
df = dataframe.change_type(column_name="a", dtype=float)
assert df["a"].dtype == float
@pytest.mark.functions
def test_change_type_keep_values():
df =
|
pd.DataFrame(["a", 1, True], columns=["col1"])
|
pandas.DataFrame
|
import requests
import re
from bs4 import BeautifulSoup as bs
import pandas as pd
"""
References:
Title: pandas
Author: pandas Team
Availability: https://github.com/pandas-dev/pandas
Version: 1.2.4
Title: BeautifulSoup4
Author: BeautifulSoup Team
Availability: https://github.com/akalongman/python-beautifulsoup/blob/master/LICENSE
Version: 4.9.3
"""
#loads list of ufc events
r2 = requests.get('http://ufcstats.com/statistics/events/completed?page=all')
#convert to beautiful soup
event_list_soup = bs(r2.content,features='lxml')
# links to each event
links = [str(i['href']) for i in event_list_soup.find_all('a',attrs= {'class':'b-link b-link_style_black'}, href = True, text=True)]
#empty list to add all fight stats
l =[]
# loads event information
r = requests.get('http://ufcstats.com/event-details/9ddfb3369a3b0394')
# convert to beautiful soup
event_page = bs(r.content,features= 'lxml')
#loads list of ufc events
r2 = requests.get('http://ufcstats.com/statistics/events/completed?page=all')
#convert to beautiful soup
event_list_soup = bs(r2.content,features='lxml')
# links to each event
links = [str(i['href']) for i in event_list_soup.find_all('a',attrs= {'class':'b-link b-link_style_black'}, href = True, text=True)]
# event info column headers
event_column_info = [str(e.get_text().strip()) for e in event_page.find_all("th",attrs= { 'class':'b-fight-details__table-col'})]
event_column_info.insert(0,'W/L')
event_column_info [1]= 'Fighter1'
event_column_info.insert(2,'Fighter2')
event_column_info[3] = 'F1_Kd'
event_column_info.insert(4,'F2_Kd')
event_column_info[5] = 'F1_Str'
event_column_info.insert(6,'F2_Str')
event_column_info[7] = 'F1_Td'
event_column_info.insert(8,'F2_Td')
event_column_info[9] = 'F1_Sub'
event_column_info.insert(10,'F2_Sub')
event_column_info.remove('Sub')
print(event_column_info)
for link in links:
# loads event information
r = requests.get(link)
# convert to beautiful soup
event_page = bs(r.content, features='lxml')
# event information
table_row = event_page.find("tbody").find_all('tr')
for tr in table_row:
td = tr.find_all("td")
row = [str(tr.get_text().strip())for tr in td]
formatted_row = [info for segments in row for info in segments.splitlines()]
final_info = [row for row in formatted_row if row.strip()]
final_info = [info.strip() for info in final_info]
if final_info[1]== 'nc'or final_info[1]== 'draw':
del final_info[1]
if final_info[13] not in ('1','2','3','4','5'):
del final_info[13]
print(final_info)
l.append(final_info)
match_frame =
|
pd.DataFrame(data=l,columns=event_column_info)
|
pandas.DataFrame
|
import multiprocessing as mp
import re
import traceback
from typing import Dict, Optional, Union
import numpy as np
import pandas as pd
from mne import BaseEpochs, pick_types
from mne.io import BaseRaw, read_raw_fif
from mne.time_frequency import psd_welch
from scipy.integrate import simpson
from .. import logger
from ..utils._checks import (
_check_n_jobs,
_check_participants,
_check_path,
_check_type,
_check_value,
)
from ..utils._docs import fill_doc
from ..utils.list_files import list_raw_fif
from .epochs import make_fixed_length_epochs, reject_epochs
@fill_doc
def psd_avg_band(
folder,
participants: Union[int, list, tuple],
duration: float,
overlap: float,
reject: Optional[Union[Dict[str, float], str]],
fmin: float,
fmax: float,
average: str = "mean",
n_jobs: int = 1,
):
"""Compute the PSD.
Average by frequency band for the given participants using the welch
method.
Parameters
----------
folder : path-like
Path to the folder containing preprocessed files.
%(participants)s
%(psd_duration)s
%(psd_duration)s
%(psd_reject)s
fmin : float
Min frequency of interest.
fmax : float
Max frequency of interest.
average : 'mean' | 'integrate'
How to average the frequency bin/spectrum. Either 'mean' to calculate
the arithmetic mean of all bins or 'integrate' to use Simpson's rule to
compute integral from samples.
n_jobs : int
Number of parallel jobs used. Must not exceed the core count. Can be -1
to use all cores.
Returns
-------
%(df_psd)s
"""
folder = _check_path(folder, item_name="folder", must_exist=True)
participants = _check_participants(participants)
_check_type(fmin, ("numeric",), item_name="fmin")
_check_type(fmax, ("numeric",), item_name="fmax")
_check_type(average, (str,), item_name="average")
_check_value(average, ("mean", "integrate"), item_name="average")
n_jobs = _check_n_jobs(n_jobs)
# create input_pool
input_pool = list()
for participant in participants:
fnames = list_raw_fif(folder / str(participant).zfill(3))
for fname in fnames:
if fname.parent.name != "Online":
continue
input_pool.append(
(
participant,
fname,
duration,
overlap,
reject,
fmin,
fmax,
average,
)
)
assert 0 < len(input_pool) # sanity check
# compute psds
with mp.Pool(processes=n_jobs) as p:
results = p.starmap(_psd_avg_band, input_pool)
# construct dataframe
psd_dict = dict()
for participant, session, run, psds, ch_names in results:
for phase in psds:
_add_data_to_dict(
psd_dict,
participant,
session,
run,
phase,
psds[phase],
ch_names,
)
return
|
pd.DataFrame.from_dict(psd_dict, orient="columns")
|
pandas.DataFrame.from_dict
|
import statistics
import time
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn import decomposition
from sklearn import preprocessing
from sklearn import cluster
from sklearn import linear_model
from sklearn import ensemble
from sklearn import cross_validation
from sklearn.metrics import mean_absolute_error
def get_actual_y(data):
return data.groupby('Id').mean()[['Expected']]
def simplest_predictions(train, test):
# Build simplest model for reference
median_predictions = get_actual_y(test)
median_predictions['Expected'] = train['Expected'].median()
return median_predictions
# Kaggle example
def marshall_palmer(ref, minutes_past):
#print "Estimating rainfall from {0} observations".format(len(minutes_past))
# how long is each observation valid?
valid_time = np.zeros_like(minutes_past)
valid_time[0] = minutes_past.iloc[0]
for n in range(1, len(minutes_past)):
valid_time[n] = minutes_past.iloc[n] - minutes_past.iloc[n-1]
valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)
valid_time = valid_time / 60.0
# sum up rainrate * validtime
sum = 0
for dbz, hours in zip(ref, valid_time):
# See: https://en.wikipedia.org/wiki/DBZ_(meteorology)
if np.isfinite(dbz):
mmperhr = pow(pow(10, dbz/10)/200, 0.625)
sum = sum + mmperhr * hours
return sum
# Kaggle example
# each unique Id is an hour of data at some gauge
def myfunc(hour):
#rowid = hour['Id'].iloc[0]
# sort hour by minutes_past
hour = hour.sort('minutes_past', ascending=True)
est = marshall_palmer(hour['Ref'], hour['minutes_past'])
return est
def cluster_data(train_raw, test_raw):
# Normalize before building PCA components
cluster_size = 7
train = train_raw.fillna(-1)
test = test_raw.fillna(-1)
train_norm = preprocessing.scale(train.loc[:,['Ref','RefComposite','RhoHV','Zdr','Kdp']])
pca = decomposition.PCA(n_components=5).fit(train_norm)
train_pca = pca.transform(train_norm)
# Cluster measurements based on PCA components
clusterer = cluster.KMeans(n_clusters=cluster_size, n_init=15, max_iter=300, init='k-means++').fit(train_pca)
train_categories = clusterer.predict(train_pca)
train_dummies = pd.get_dummies(train_categories)
col_names = []
for i in range(0,cluster_size):
col_names.append('cat' + str(i))
train_dummies.columns = col_names
train_dummies.set_index(train.index, inplace=True)
train_dummies['Id'] = train_raw['Id']
train_raw = pd.concat([train_raw, train_dummies.drop('Id', axis=1)], axis=1)
test_norm = preprocessing.scale(test.loc[:,['Ref','RefComposite','RhoHV','Zdr','Kdp']])
test_pca = pca.transform(test_norm)
test_dummies = pd.get_dummies(clusterer.predict(test_pca))
test_dummies.columns = col_names
test_dummies.set_index(test.index, inplace=True)
test_dummies['Id'] = test_raw['Id']
test_raw = pd.concat([test_raw, test_dummies.drop('Id', axis=1)], axis=1)
return [train_raw, test_raw]
def predict(train, test):
predictions = get_actual_y(test)
predictions['Expected'] = train['Expected'].median()
# train, test = cluster_data(train, test)
# Group data by id
train = prep_and_filter_data(train)
test = prep_and_filter_data(test)
# Random Forest using all data
full_tree_train_data = train.dropna()
full_tree_test_data = test.dropna()
model = ensemble.RandomForestRegressor(n_estimators=N_EST, max_depth=MAX_D, n_jobs=-1, min_samples_split=MIN_LEAF, max_features=MAX_FEATURES, criterion="mae")
full_tree_test_data['predictions'] = model.fit(X=full_tree_train_data[full_tree_train_data.columns.difference(['Id','Expected'])], y=full_tree_train_data['Expected']).predict(X=full_tree_test_data[full_tree_test_data.columns.difference(['Id','Expected'])])
# Random Forest using only means
partial_tree_train_data = train[train.count(1) < 45][train['Ref_mean'].notnull()][train['RhoHV_mean'].notnull()][train['Zdr_mean'].notnull()][train['Kdp_mean'].notnull()]
partial_tree_train_data = partial_tree_train_data.loc[:,['Ref_mean','RhoHV_mean','Zdr_mean','Kdp_mean','Expected']].copy()
partial_tree_test_data = test[test.count(1) < 45][test['Ref_mean'].notnull()][test['RhoHV_mean'].notnull()][test['Zdr_mean'].notnull()][test['Kdp_mean'].notnull()]
partial_tree_test_data = partial_tree_test_data.loc[:,['Ref_mean','RhoHV_mean','Zdr_mean','Kdp_mean','Expected']].copy()
partial_model = ensemble.RandomForestRegressor(n_estimators=N_EST, max_depth=MAX_D, n_jobs=-1, min_samples_split=MIN_LEAF, max_features='auto', criterion="mae")
partial_tree_test_data['predictions'] = partial_model.fit(X=partial_tree_train_data[partial_tree_train_data.columns.difference(['Id','Expected'])], y=partial_tree_train_data['Expected']).predict(X=partial_tree_test_data[partial_tree_test_data.columns.difference(['Id','Expected'])])
for i in partial_tree_test_data.index:
predictions.loc[i,'Expected'] = partial_tree_test_data.loc[i,'predictions']
predictions.loc[full_tree_test_data.index,'Expected'] = full_tree_test_data.loc[:,'predictions']
return predictions
def run(data):
data = data.sample(1000000)
errors = list()
med_errors = list()
for t1, t2 in cross_validation.KFold(data.shape[0], n_folds=10, shuffle=True):
# Prep data - still raw
train = data.iloc[t1]
test = data.iloc[t2]
y = get_actual_y(test)
e = error_rate(y['Expected'], predict(train, test)['Expected'])
med_e = error_rate(y['Expected'], simplest_predictions(train, test)['Expected'])
errors.append(e)
med_errors.append(med_e)
print("Median error rate: {} --- Error rate: {}".format(med_e, e))
print("Difference: {}".format(med_e - e))
print("Avg median error: {} ({})".format(statistics.mean(med_errors), statistics.stdev(med_errors)))
print("Avg error: {} ({})".format(statistics.mean(errors), statistics.stdev(errors)))
print("Difference in errors: {}".format(statistics.mean(med_errors) - statistics.mean(errors)))
def error_rate(expected, predicted):
# MAE
return (expected - predicted).abs().mean()
def prep_and_filter_data(data):
means = data.groupby('Id').mean()
means.columns += '_mean'
medians = data.groupby('Id').median()
medians.columns += '_median'
comb = pd.concat([means, medians], axis=1)
#comb.drop('Expected_std', axis=1, inplace=True)
comb = comb[comb['Ref_mean'] > 0]
comb = comb[comb['Expected_mean'] < 70]
comb['Expected'] = comb['Expected_mean']
comb.drop('Expected_mean', inplace=True, axis=1)
return comb
# Data + features
# data_raw = pd.read_csv('input/train_clean.csv', usecols=[0,3,11,15,19,23])
MAX_FEATURES='auto'; N_EST=30; MAX_D=None; MIN_LEAF=1000;
run(data_raw)
train_raw = pd.read_csv('input/train_clean.csv')
test_raw =
|
pd.read_csv('input/test.csv')
|
pandas.read_csv
|
"""
本地数据库查询单个股票或指数代码的期间收益率、国库券利率
"""
import pandas as pd
from cnswd.utils import sanitize_dates
from cnswd.mongodb import get_db
from cnswd.websource.wy import get_main_index
from trading_calendars import get_calendar
DAILY_COLS = ['date', 'change_pct']
TREASURY_COL_MAPS = {
'm0': 'cash',
'm1': '1month',
'm2': '2month',
'm3': '3month',
'm6': '6month',
'm9': '9month',
'y1': '1year',
'y3': '3year',
'y5': '5year',
'y7': '7year',
'y10': '10year',
'y15': '15year',
'y20': '20year',
'y30': '30year',
'y40': '40year',
'y50': '50year',
}
def query(collection, start, end):
predicate = {'日期': {'$gte': start, '$lte': end}}
projection = {'日期': 1, '涨跌幅': 1, '_id': 0}
sort = [('日期', 1)]
cursor = collection.find(predicate, projection, sort=sort)
df = pd.DataFrame.from_records(cursor)
return df
def _get_single_stock_equity(symbol, start_date, end_date, is_index,
index_name):
start_date, end_date = sanitize_dates(start_date, end_date)
db_name = 'wy_index_daily' if is_index else 'wy_stock_daily'
db = get_db(db_name)
collection = db[symbol]
df = query(collection, start_date, end_date)
df.columns = DAILY_COLS
df['change_pct'] = df['change_pct'] / 100.0
df['date'] =
|
pd.to_datetime(df['date'])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 21 21:23:55 2018
@author: <NAME>
"""
import os
import csv
import math
import itertools
import keras.utils as ku
import pandas as pd
import numpy as np
from nltk.util import ngrams
from models import model_shared_stateful as stf
from support_modules.readers import log_reader as lr
from support_modules import role_discovery as rl
from support_modules import nn_support as nsup
from support_modules import support as sup
def training_model(timeformat, args, no_loops=False):
"""Main method of the training module.
Args:
timeformat (str): event-log date-time format.
args (dict): parameters for training the network.
no_loops (boolean): remove loops fom the event-log (optional).
"""
parameters = dict()
log = lr.LogReader(os.path.join('input_files', args['file_name']),
timeformat, timeformat, one_timestamp=True)
_, resource_table = rl.read_resource_pool(log, sim_percentage=0.50)
# Role discovery
log_df_resources = pd.DataFrame.from_records(resource_table)
log_df_resources = log_df_resources.rename(index=str, columns={"resource": "user"})
# Dataframe creation
log_df = pd.DataFrame.from_records(log.data)
log_df = log_df.merge(log_df_resources, on='user', how='left')
log_df = log_df[log_df.task != 'Start']
log_df = log_df[log_df.task != 'End']
log_df = log_df.reset_index(drop=True)
if no_loops:
log_df = nsup.reduce_loops(log_df)
# Index creation
ac_index = create_index(log_df, 'task')
ac_index['start'] = 0
ac_index['end'] = len(ac_index)
index_ac = {v: k for k, v in ac_index.items()}
rl_index = create_index(log_df, 'role')
rl_index['start'] = 0
rl_index['end'] = len(rl_index)
index_rl = {v: k for k, v in rl_index.items()}
# Load embedded matrix
ac_weights = load_embedded(index_ac, 'ac_'+ args['file_name'].split('.')[0]+'.emb')
rl_weights = load_embedded(index_rl, 'rl_'+ args['file_name'].split('.')[0]+'.emb')
# Calculate relative times
log_df = add_calculated_features(log_df, ac_index, rl_index)
# Split validation datasets
log_df_train, log_df_test = nsup.split_train_test(log_df, 0.3) # 70%/30%
# Input vectorization
vec = vectorization(log_df_train, ac_index, rl_index, args)
# Parameters export
output_folder = os.path.join('output_files', sup.folder_id())
if not os.path.exists(output_folder):
os.makedirs(output_folder)
os.makedirs(os.path.join(output_folder, 'parameters'))
parameters['event_log'] = args['file_name']
parameters['exp_desc'] = args
parameters['index_ac'] = index_ac
parameters['index_rl'] = index_rl
parameters['dim'] = dict(samples=str(np.sum([x.shape[0] for x in vec['prefixes']['x_ac_inp']])),
time_dim=str(vec['prefixes']['x_ac_inp'][0].shape[1]),
features=str(len(ac_index)))
parameters['max_tbtw'] = vec['max_tbtw']
sup.create_json(parameters, os.path.join(output_folder,
'parameters',
'model_parameters.json'))
sup.create_csv_file_header(log_df_test.to_dict('records'),
os.path.join(output_folder,
'parameters',
'test_log.csv'))
# print([x.shape for x in vec['prefixes']['x_ac_inp']])
stf.training_model(vec, ac_weights, rl_weights, output_folder, args)
# =============================================================================
# Load embedded matrix
# =============================================================================
def load_embedded(index, filename):
"""Loading of the embedded matrices.
Args:
index (dict): index of activities or roles.
filename (str): filename of the matrix file.
Returns:
numpy array: array of weights.
"""
weights = list()
input_folder = os.path.join('input_files', 'embedded_matix')
with open(os.path.join(input_folder, filename), 'r') as csvfile:
filereader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in filereader:
cat_ix = int(row[0])
if index[cat_ix] == row[1].strip():
weights.append([float(x) for x in row[2:]])
csvfile.close()
return np.array(weights)
# =============================================================================
# Pre-processing: n-gram vectorization
# =============================================================================
def vectorization(log_df, ac_index, rl_index, args):
"""Example function with types documented in the docstring.
Args:
log_df (dataframe): event log data.
ac_index (dict): index of activities.
rl_index (dict): index of roles.
args (dict): parameters for training the network
Returns:
dict: Dictionary that contains all the LSTM inputs.
"""
if args['norm_method'] == 'max':
max_tbtw = np.max(log_df.tbtw)
norm = lambda x: x['tbtw']/max_tbtw
log_df['tbtw_norm'] = log_df.apply(norm, axis=1)
log_df = reformat_events(log_df, ac_index, rl_index)
elif args['norm_method'] == 'lognorm':
logit = lambda x: math.log1p(x['tbtw'])
log_df['tbtw_log'] = log_df.apply(logit, axis=1)
max_tbtw = np.max(log_df.tbtw_log)
norm = lambda x: x['tbtw_log']/max_tbtw
log_df['tbtw_norm'] = log_df.apply(norm, axis=1)
log_df = reformat_events(log_df, ac_index, rl_index)
vec = {'prefixes':dict(), 'next_evt':dict(), 'max_tbtw':max_tbtw}
# n-gram definition
vec['prefixes']['x_ac_inp'] = list()
vec['prefixes']['x_rl_inp'] = list()
vec['prefixes']['xt_inp'] = list()
vec['next_evt']['y_ac_inp'] = list()
vec['next_evt']['y_rl_inp'] = list()
vec['next_evt']['yt_inp'] = list()
for i, _ in enumerate(log_df):
ac_n_grams = list(ngrams(log_df[i]['ac_order'], args['n_size'],
pad_left=True, left_pad_symbol=0))
rl_n_grams = list(ngrams(log_df[i]['rl_order'], args['n_size'],
pad_left=True, left_pad_symbol=0))
tn_grams = list(ngrams(log_df[i]['tbtw'], args['n_size'],
pad_left=True, left_pad_symbol=0))
x_ac_inp = np.array([ac_n_grams[0]])
x_rl_inp = np.array([rl_n_grams[0]])
xt_inp = np.array([tn_grams[0]])
y_ac_inp = np.array(ac_n_grams[1][-1])
y_rl_inp = np.array(rl_n_grams[1][-1])
yt_inp = np.array(tn_grams[1][-1])
for j in range(1, len(ac_n_grams)-1):
x_ac_inp = np.concatenate((x_ac_inp, np.array([ac_n_grams[j]])), axis=0)
x_rl_inp = np.concatenate((x_rl_inp, np.array([rl_n_grams[j]])), axis=0)
xt_inp = np.concatenate((xt_inp, np.array([tn_grams[j]])), axis=0)
y_ac_inp = np.append(y_ac_inp, np.array(ac_n_grams[j+1][-1]))
y_rl_inp = np.append(y_rl_inp, np.array(rl_n_grams[j+1][-1]))
yt_inp = np.append(yt_inp, np.array(tn_grams[j+1][-1]))
xt_inp = xt_inp.reshape((xt_inp.shape[0], xt_inp.shape[1], 1))
y_ac_inp = ku.to_categorical(y_ac_inp, num_classes=len(ac_index))
y_rl_inp = ku.to_categorical(y_rl_inp, num_classes=len(rl_index))
vec['prefixes']['x_ac_inp'].append(x_ac_inp)
vec['prefixes']['x_rl_inp'].append(x_rl_inp)
vec['prefixes']['xt_inp'].append(xt_inp)
vec['next_evt']['y_ac_inp'].append(y_ac_inp)
vec['next_evt']['y_rl_inp'].append(y_rl_inp)
vec['next_evt']['yt_inp'].append(yt_inp)
return vec
def add_calculated_features(log_df, ac_index, rl_index):
"""Appends the indexes and relative time to the dataframe.
Args:
log_df: dataframe.
ac_index (dict): index of activities.
rl_index (dict): index of roles.
Returns:
Dataframe: The dataframe with the calculated features added.
"""
ac_idx = lambda x: ac_index[x['task']]
log_df['ac_index'] = log_df.apply(ac_idx, axis=1)
rl_idx = lambda x: rl_index[x['role']]
log_df['rl_index'] = log_df.apply(rl_idx, axis=1)
log_df['tbtw'] = 0
log_df['tbtw_norm'] = 0
log_df = log_df.to_dict('records')
log_df = sorted(log_df, key=lambda x: (x['caseid'], x['end_timestamp']))
for _, group in itertools.groupby(log_df, key=lambda x: x['caseid']):
trace = list(group)
for i, _ in enumerate(trace):
if i != 0:
trace[i]['tbtw'] = (trace[i]['end_timestamp'] -
trace[i-1]['end_timestamp']).total_seconds()
return
|
pd.DataFrame.from_records(log_df)
|
pandas.DataFrame.from_records
|
#from subprocess import Popen, check_call
#import os
import pandas as pd
import numpy as np
import math
import PySimpleGUI as sg
import webbrowser
# Read Data
csv_path1 = "output/final_data.csv"
prop_df = pd.read_csv(csv_path1)
n = prop_df.shape[0]
prop_df.sort_values(by=["PRICE"],ascending=True,inplace=True)
prop_df.index = range(len(prop_df.index))
prop_df_old = prop_df.copy()
# Read Languages
csvLanguage = "data_sets/languages_spoken.csv"
lang_df = pd.read_csv(csvLanguage)
languages = [lang for lang in lang_df.columns.tolist() if lang not in ["Community Area","Community Area Name","PREDOMINANT NON-ENGLISH LANGUAGE (%)","TOTAL"]]
languages.sort()
# Add locations
local = prop_df["LOCATION"].unique().tolist()
local.sort()
local = ["NONE"] + local
sg.theme('BluePurple')
# House Fact Column
col_fact = [
[sg.Text('Address:',size=(12,1)),sg.Text(size=(30,1), key='address')],
[sg.Text('Location:',size=(12,1)),sg.Text(size=(30,1), key='location')],
[sg.Text('Price:',size=(12,1)),sg.Text(size=(30,1),key='price')],
[sg.Text('HOA:',size=(12,1)),sg.Text(size=(30,1),key='hoa')],
[sg.Text('Tax Year:',size=(12,1)),sg.Text(size=(30,1),key='taxYear')],
[sg.Text('Tax Assessed:',size=(12,1)),sg.Text(size=(30,1),key='assessTax')],
[sg.Text('SquareFeet:',size=(12,1)),sg.Text(size=(30,1), key='sqft')],
[sg.Text('Year Built:',size=(12,1)),sg.Text(size=(30,1),key='year')]
]
col_fact2 = [
[sg.Text('# of Beds:',size=(20,1)),sg.Text(size=(12,1),key='beds')],
[sg.Text('# of Bathrooms:',size=(20,1)),sg.Text(size=(12,1),key='baths')],
[sg.Text('Sold Date:',size=(20,1)),sg.Text(size=(12,1),key='soldDT')],
[sg.Text('Sold Price:',size=(20,1)),sg.Text(size=(12,1),key='soldP')],
[sg.Text('Zestimate:',size=(20,1)),sg.Text(size=(12,1),key='zest')],
[sg.Text('Est Tax:',size=(20,1)),sg.Text(size=(12,1),key='estTax')],
[sg.Text('Property Type:',size=(20,1)),sg.Text(size=(12,1),key="propType")]
]
# Commute Column
col_commute1 = [
[sg.Text('Commute Time:',size=(14,1)),sg.Text(size=(10,1),key='kommute')],
[sg.Text('# of Transfers:',size=(14,1)),sg.Text(size=(10,1),key='kommuteTransfers')],
[sg.Text('Walking Time:',size=(14,1)),sg.Text(size=(10,1),key='kommuteWalk')]
]
col_commute2 = [
[sg.Frame(layout=[[sg.Listbox(values=[],size=(20,5),key='kommuteSteps')]],title="Commute Steps:",title_color="blue")]
]
# Grocery Column
col_grocery = [
[sg.Frame(layout=[[sg.Listbox(values=[],size=(30,5),key='storeWalk')]],title="Grocery Stores(walking):",title_color="blue"),
sg.Frame(layout=[[sg.Listbox(values=[],size=(30,5),key='storeDrive')]],title="Grocery Stores(driving):",title_color="blue") ]
]
# Crime Column
col_crime = [
[sg.Text('GUN',size=(10,1)),sg.Text(size=(10,1),key='crimeGun')],
[sg.Text('MURDER',size=(10,1)),sg.Text(size=(10,1),key='crimeMurder')],
[sg.Text('DRUG',size=(10,1)),sg.Text(size=(10,1),key='crimeDrug')],
[sg.Text('HUMAN',size=(10,1)),sg.Text(size=(10,1),key='crimeHuman')],
[sg.Text('THEFT',size=(10,1)),sg.Text(size=(10,1),key='crimeTheft')],
[sg.Text('OTHER',size=(10,1)),sg.Text(size=(10,1),key='crimeOther')]
]
# SocioEconomic Column
col_socio = [
[sg.Text('Percent of aged 25+ without HS diploma:',size=(30,1)),sg.Text(size=(8,1),key='hsDiploma')],
[sg.Text('Percent of households below poverty:',size=(30,1)),sg.Text(size=(8,1),key='homePoverty')],
[sg.Text('Percent of housing crowded:',size=(30,1)),sg.Text(size=(8,1),key='homeCrowded')],
[sg.Text('Percent of aged 16+ unemployed:',size=(30,1)),sg.Text(size=(8,1),key='unemployed')],
[sg.Text('Percent aged under 18 or over 64:',size=(30,1)),sg.Text(size=(8,1),key='aged')],
[sg.Text('Per capita income:',size=(30,1)),sg.Text(size=(8,1),key='income')]
]
# Language Column
col_language = [
[sg.Text('Select Language 1: '),
sg.InputCombo(tuple(languages), key='lang1', default_value="CHINESE", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang1")],
[sg.Text('Select Language 2: '),
sg.InputCombo(tuple(languages), key='lang2', default_value="SPANISH", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang2")],
[sg.Text('Select Language 3: '),
sg.InputCombo(tuple(languages), key='lang3', default_value="POLISH", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang3")],
[sg.Text('Select Language 4: '),
sg.InputCombo(tuple(languages), key='lang4', default_value="RUSSIAN", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang4")],
[sg.Text('Select Language 5: '),
sg.InputCombo(tuple(languages), key='lang5', default_value="AFRICAN LANGUAGES", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang5")],
[sg.Text('Select Language 6: '),
sg.InputCombo(tuple(languages), key='lang6', default_value="GREEK", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang6")]
]
# Button Column
col_button = [
[sg.Button('',image_filename="images/thumbsDown.png",image_size=(100,100),image_subsample=5,border_width=0,key="dislike"),sg.Text(' ' * 25),
sg.Button('',image_filename="images/unsure.png",image_size=(100,100),image_subsample=3,border_width=0,key="unsure"),sg.Text(' ' * 25),
sg.Button('',image_filename="images/thumbsUp.png",image_size=(100,100),image_subsample=5,border_width=0,key="like") ]
]
# Score Column
col_score = [
[sg.Text("Your Rating: ",size=(15,1)),sg.Text(size=(10,1),key="rate")],
[sg.Text("Predicted Score: ",size=(15,1)),sg.Text(size=(10,1),key="score")]
]
layout = [[sg.Text('Is this house Hot or Not?',font=('Helvetica', 20))],
[sg.Frame(layout=[[sg.Text('User Select: '),sg.InputCombo(('MM','XY'),size=(10,1),key='user',default_value='MM',enable_events=True)]],title="SELECT USER",title_color="blue"),
sg.Frame(layout=[[sg.Text("View Select: "),sg.InputCombo(('ALL','UNRATED', 'RATED'), key='userRated', default_value="ALL", enable_events=True,size=(20, 1))]],
title="RATING VIEW",title_color="blue")],
[sg.Text('Sort by: '),
sg.InputCombo(('COMMUTE_TIME','WALKING_TIME', 'PRICE'), key='sortBy', default_value="PRICE", enable_events=True,size=(20, 1)),
sg.Radio("Ascending",group_id="radio1",key="ascend",default=True,enable_events=True),
sg.Radio("Descending",group_id="radio1",key="descend",enable_events=True),
sg.Button('Save Work and Exit'),
sg.Text(" "*5),sg.Column(col_score,background_color="red")],
[sg.Text('Filter by Location: '),
sg.InputCombo(local,key='filter', default_value="NONE", enable_events=True,size=(20, 1))],
[sg.Frame(layout = [[sg.Listbox(values=prop_df["ADDRESS"],
size=(30, 12), key='-home-', enable_events=True)]],title="Home Selection:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_fact,background_color="grey"),
sg.Column(col_fact2,background_color="grey")]],title="General Information:",title_color="blue")
],
[sg.Frame(layout = [[sg.Column(col_commute1,background_color="purple"),
sg.Column(col_commute2,background_color="purple")]],title="Commute Information:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_grocery,background_color="blue")]],title="Grocery Information:",title_color="blue")],
[sg.Frame(layout = [[sg.Column(col_crime,background_color="green")]],title="Crime Statistics:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_socio,background_color="magenta")]],title="Socioeconomic Statistics:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_language,background_color="orange")]],title="Language Spoken (%)",title_color="blue")],
[sg.Column(col_button,justification="center")]
]
window = sg.Window('Housing Dating App', layout)
while True: # Event Loop
event, values = window.read()
print(event, values)
print("EVENT: ", event)
print("VALUE: ", values)
if event in ["-home-"]:
print(values["-home-"][0])
i = prop_df["ADDRESS"].tolist().index(values["-home-"][0])
if event in ['Save Work and Exit',None]:
break
if event in ['sortBy','ascend','descend']:
print("ITEM1: ",values['sortBy'])
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
if event in ['filter','userRated','user']:
print("ITEM1: ",values['filter'])
print("ITEM2: ",values['userRated'])
if values['filter'] in ["NONE"]:
if values['userRated'] in ['ALL']:
prop_df = prop_df_old.copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
elif values['userRated'] in ['UNRATED']:
prop_df = prop_df_old.loc[pd.isnull(prop_df_old[values['user']+"_RATING"])].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
elif values['userRated'] in ['RATED']:
prop_df = prop_df_old.loc[pd.notnull(prop_df_old[values['user']+"_RATING"])].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
else:
if values['userRated'] in ['ALL']:
prop_df = prop_df_old.loc[prop_df_old["LOCATION"] == values["filter"]].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
elif values['userRated'] in ['UNRATED']:
prop_df = prop_df_old.loc[(prop_df_old["LOCATION"] == values["filter"]) & (pd.isnull(prop_df_old[values['user']+"_RATING"]))].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
elif values['userRated'] in ['RATED']:
prop_df = prop_df_old.loc[(prop_df_old["LOCATION"] == values["filter"]) & (pd.notnull(prop_df_old[values['user']+"_RATING"]))].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
if event in ["lang1"]:
window['perLang1'].update(str(f'{prop_df[values["lang1"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang2"]:
window['perLang2'].update(str(f'{prop_df[values["lang2"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang3"]:
window['perLang3'].update(str(f'{prop_df[values["lang3"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang4"]:
window['perLang4'].update(str(f'{prop_df[values["lang4"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang5"]:
window['perLang5'].update(str(f'{prop_df[values["lang5"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang6"]:
window['perLang6'].update(str(f'{prop_df[values["lang6"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["-home-","like","unsure","dislike"]:
if n > 0:
id = prop_df_old["ADDRESS"].tolist().index(prop_df["ADDRESS"][i])
if event == "like":
prop_df_old.at[id,values['user']+"_RATING"] = 3
if values['userRated'] in ['UNRATED']:
prop_df.drop(prop_df.index[i],inplace=True)
prop_df.index = range(len(prop_df.index))
n = prop_df.shape[0]
if i == n:
i = n-1
window.Element("-home-").Update(prop_df["ADDRESS"])
else:
prop_df.at[i,values['user']+"_RATING"] = 3
if i < n-1:
i += 1
if event == "unsure":
prop_df_old.at[id,values['user']+"_RATING"] = 2
if values['userRated'] in ['UNRATED']:
prop_df.drop(prop_df.index[i],inplace=True)
prop_df.index = range(len(prop_df.index))
n = prop_df.shape[0]
if i == n:
i = n-1
window.Element("-home-").Update(prop_df["ADDRESS"])
else:
prop_df.at[i,values['user']+"_RATING"] = 2
if i < n-1:
i += 1
if event == "dislike":
prop_df_old.at[id,values['user']+"_RATING"] = 1
if values['userRated'] in ['UNRATED']:
prop_df.drop(prop_df.index[i],inplace=True)
prop_df.index = range(len(prop_df.index))
n = prop_df.shape[0]
if i == n:
i = n-1
window.Element("-home-").Update(prop_df["ADDRESS"])
else:
prop_df.at[i,values['user']+"_RATING"] = 1
if i < n-1:
i += 1
window.Element("-home-").update(set_to_index=i,scroll_to_index=max(0,i-3))
if n > 0:
webbrowser.open(prop_df['URL'][i])
#call_url = prop_df['URL'][i]
#mycmd = r'start chrome /new-tab {}'.format(call_url)
#try:
# os.system("taskkill /F /IM chrome.exe")
#except:
# pass
#p1 = Popen(mycmd,shell=True)
window['address'].update(prop_df['ADDRESS'][i])
window['location'].update(prop_df['LOCATION'][i])
if pd.isnull(prop_df['SQFT'][i]):
window['sqft'].update("")
else:
window['sqft'].update(math.floor(prop_df['SQFT'][i]))
if pd.isnull(prop_df['YEAR'][i]):
window['year'].update("")
else:
window['year'].update(prop_df['YEAR'][i])
if pd.isnull(prop_df['LAST_SOLD_DATE'][i]):
window['soldDT'].update("")
else:
window['soldDT'].update(prop_df['LAST_SOLD_DATE'][i])
if pd.isnull(prop_df["ZESTIMATE"][i]):
window['zest'].update("$")
else:
window['zest'].update("$" + str(f'{math.floor(prop_df["ZESTIMATE"][i]):,}'))
if
|
pd.isnull(prop_df["LAST_SOLD_PRICE"][i])
|
pandas.isnull
|
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10,
|
pd.Timestamp('2015-01-09')
|
pandas.Timestamp
|
##############################################################################
#Copyright 2019 Google LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
################################################################################
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
import warnings
warnings.filterwarnings("ignore")
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from numpy import inf
from sklearn.linear_model import LassoCV, RidgeCV
from sklearn.linear_model import Lasso, Ridge
from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pylab as plt
get_ipython().magic(u'matplotlib inline')
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 10, 6
from sklearn.metrics import classification_report, confusion_matrix
from functools import reduce
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import RepeatedKFold, RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from autoviml.QuickML_Stacking import QuickML_Stacking
from autoviml.Transform_KM_Features import Transform_KM_Features
from autoviml.QuickML_Ensembling import QuickML_Ensembling
from autoviml.Auto_NLP import Auto_NLP, select_top_features_from_SVD
import xgboost as xgb
import sys
##################################################################################
def find_rare_class(classes, verbose=0):
######### Print the % count of each class in a Target variable #####
"""
Works on Multi Class too. Prints class percentages count of target variable.
It returns the name of the Rare class (the one with the minimum class member count).
This can also be helpful in using it as pos_label in Binary and Multi Class problems.
"""
counts = OrderedDict(Counter(classes))
total = sum(counts.values())
if verbose >= 1:
print(' Class -> Counts -> Percent')
for cls in counts.keys():
print("%6s: % 7d -> % 5.1f%%" % (cls, counts[cls], counts[cls]/total*100))
if type(pd.Series(counts).idxmin())==str:
return pd.Series(counts).idxmin()
else:
return int(pd.Series(counts).idxmin())
###############################################################################
def return_factorized_dict(ls):
"""
###### Factorize any list of values in a data frame using this neat function
if your data has any NaN's it automatically marks it as -1 and returns that for NaN's
Returns a dictionary mapping previous values with new values.
"""
factos = pd.unique(pd.factorize(ls)[0])
categs = pd.unique(pd.factorize(ls)[1])
if -1 in factos:
categs = np.insert(categs,np.where(factos==-1)[0][0],np.nan)
return dict(zip(categs,factos))
#############################################################################################
from sklearn.metrics import confusion_matrix
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
#############################################################################################
import os
def check_if_GPU_exists():
GPU_exists = False
try:
from tensorflow.python.client import device_lib
dev_list = device_lib.list_local_devices()
print('Number of GPUs = %d' %len(dev_list))
for i in range(len(dev_list)):
if 'GPU' == dev_list[i].device_type:
GPU_exists = True
print('%s available' %dev_list[i].device_type)
except:
print('')
if not GPU_exists:
try:
os.environ['NVIDIA_VISIBLE_DEVICES']
print('GPU available on this device')
return True
except:
print('No GPU available on this device')
return False
else:
return True
#############################################################################################
def analyze_problem_type(train, targ,verbose=0):
"""
This module analyzes a Target Variable and finds out whether it is a
Regression or Classification type problem
"""
if train[targ].dtype != 'int64' and train[targ].dtype != float :
if train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
else:
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
elif train[targ].dtype == 'int64' or train[targ].dtype == float :
if len(train[targ].unique()) == 1:
print('Error in data set: Only one class in Target variable. Check input and try again')
sys.exit()
elif len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
elif train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
elif train[targ].dtype == bool:
model_class = 'Binary_Classification'
elif train[targ].dtype == 'int64':
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
else :
model_class = 'Regression'
return model_class
#######
def convert_train_test_cat_col_to_numeric(start_train, start_test, col,str_flag=True):
"""
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
"""
start_train = copy.deepcopy(start_train)
start_test = copy.deepcopy(start_test)
missing_flag = False
new_missing_col = ''
if start_train[col].isnull().sum() > 0:
missing_flag = True
if str_flag:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype(str)
else:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype('category')
if len(start_train[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Train data set %s column with %d data types. Fixing it...' %(
col, len(start_train[col].apply(type).value_counts())))
train_categs = start_train[col].value_counts().index.tolist()
else:
train_categs = np.unique(start_train[col]).tolist()
if not isinstance(start_test,str) :
if start_test[col].isnull().sum() > 0:
#### IN some rare cases, Test data has missing values while Train data doesn.t
#### This section is take care of those rare cases. We need to create a missing col
#### We need to create that missing flag column in both train and test in that case
if not missing_flag:
missing_flag = True
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
##### THis is to take care of Missing_Flag in start_test data set!!
start_test[new_missing_col] = 0
start_test.loc[start_test[col].isnull(),new_missing_col]=1
if str_flag:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype(str)
else:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype('category')
else:
#### In some rare cases, there is missing values in train but not in test data!
#### In those cases, we need to create a new_missing_col in test data in addition to train
start_test[new_missing_col] = 0
if len(start_test[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Test data set %s column with %d data types. Fixing it...' %(
col, len(start_test[col].apply(type).value_counts())))
test_categs = start_test[col].value_counts().index.tolist()
test_categs = [x if isinstance(x,str) else str(x) for x in test_categs]
start_test[col] = start_test[col].astype(str).values
else:
test_categs = np.unique(start_test[col]).tolist()
if not isinstance(start_test,str) :
categs_all = np.unique( train_categs + test_categs).tolist()
dict_all = return_factorized_dict(categs_all)
else:
dict_all = return_factorized_dict(train_categs)
start_train[col] = start_train[col].map(dict_all)
if not isinstance(start_test,str) :
start_test[col] = start_test[col].map(dict_all)
return start_train, start_test, missing_flag, new_missing_col
#############################################################################################################
def flatten_list(list_of_lists):
final_ls = []
for each_item in list_of_lists:
if isinstance(each_item,list):
final_ls += each_item
else:
final_ls.append(each_item)
return final_ls
#############################################################################################################
import scipy as sp
def Auto_ViML(train, target, test='',sample_submission='',hyper_param='RS', feature_reduction=True,
scoring_parameter='logloss', Boosting_Flag=None, KMeans_Featurizer=False,
Add_Poly=0, Stacking_Flag=False, Binning_Flag=False,
Imbalanced_Flag=False, verbose=0):
"""
#########################################################################################################
############# This is not an Officially Supported Google Product! #########################
#########################################################################################################
#### Automatically Build Variant Interpretable Machine Learning Models (Auto_ViML) ######
#### Developed by <NAME> ######
###### Version 0.1.652 #######
##### GPU UPGRADE!! Now with Auto_NLP. Best Version to Download or Upgrade. May 15,2020 ######
###### Auto_VIMAL with Auto_NLP combines structured data with NLP for Predictions. #######
#########################################################################################################
#Copyright 2019 Google LLC #######
# #######
#Licensed under the Apache License, Version 2.0 (the "License"); #######
#you may not use this file except in compliance with the License. #######
#You may obtain a copy of the License at #######
# #######
# https://www.apache.org/licenses/LICENSE-2.0 #######
# #######
#Unless required by applicable law or agreed to in writing, software #######
#distributed under the License is distributed on an "AS IS" BASIS, #######
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #######
#See the License for the specific language governing permissions and #######
#limitations under the License. #######
#########################################################################################################
#### Auto_ViML was designed for building a High Performance Interpretable Model With Fewest Vars. ###
#### The "V" in Auto_ViML stands for Variant because it tries Multiple Models and Multiple Features ###
#### to find the Best Performing Model for any data set.The "i" in Auto_ViML stands " Interpretable"###
#### since it selects the fewest Features to build a simpler, more interpretable model. This is key. ##
#### Auto_ViML is built mostly using Scikit-Learn, Numpy, Pandas and Matplotlib. Hence it should run ##
#### on any Python 2 or Python 3 Anaconda installations. You won't have to import any special ####
#### Libraries other than "SHAP" library for SHAP values which provides more interpretability. #####
#### But if you don't have it, Auto_ViML will skip it and show you the regular feature importances. ###
#########################################################################################################
#### INPUTS: ###
#########################################################################################################
#### train: could be a datapath+filename or a dataframe. It will detect which is which and load it.####
#### test: could be a datapath+filename or a dataframe. If you don't have any, just leave it as "". ###
#### submission: must be a datapath+filename. If you don't have any, just leave it as empty string.####
#### target: name of the target variable in the data set. ####
#### sep: if you have a spearator in the file such as "," or "\t" mention it here. Default is ",". ####
#### scoring_parameter: if you want your own scoring parameter such as "f1" give it here. If not, #####
#### it will assume the appropriate scoring param for the problem and it will build the model.#####
#### hyper_param: Tuning options are GridSearch ('GS'), RandomizedSearch ('RS')and now HyperOpt ('HO')#
#### Default setting is 'GS'. Auto_ViML with HyperOpt is approximately 3X Faster than Auto_ViML###
#### feature_reduction: Default = 'True' but it can be set to False if you don't want automatic ####
#### feature_reduction since in Image data sets like digits and MNIST, you get better #####
#### results when you don't reduce features automatically. You can always try both and see. #####
#### KMeans_Featurizer = True: Adds a cluster label to features based on KMeans. Use for Linear. #####
#### False (default) = For Random Forests or XGB models, leave it False since it may overfit.####
#### Boosting Flag: you have 3 possible choices (default is False): #####
#### None = This will build a Linear Model #####
#### False = This will build a Random Forest or Extra Trees model (also known as Bagging) #####
#### True = This will build an XGBoost model #####
#### Add_Poly: Default is 0. It has 2 additional settings: #####
#### 1 = Add interaction variables only such as x1*x2, x2*x3,...x9*10 etc. #####
#### 2 = Add Interactions and Squared variables such as x1**2, x2**2, etc. #####
#### Stacking_Flag: Default is False. If set to True, it will add an additional feature which #####
#### is derived from predictions of another model. This is used in some cases but may result#####
#### in overfitting. So be careful turning this flag "on". #####
#### Binning_Flag: Default is False. It set to True, it will convert the top numeric variables #####
#### into binned variables through a technique known as "Entropy" binning. This is very #####
#### helpful for certain datasets (especially hard to build models). #####
#### Imbalanced_Flag: Default is False. If set to True, it will downsample the "Majority Class" #####
#### in an imbalanced dataset and make the "Rare" class at least 5% of the data set. This #####
#### the ideal threshold in my mind to make a model learn. Do it for Highly Imbalanced data.#####
#### verbose: This has 3 possible states: #####
#### 0 = limited output. Great for running this silently and getting fast results. #####
#### 1 = more charts. Great for knowing how results were and making changes to flags in input. #####
#### 2 = lots of charts and output. Great for reproducing what Auto_ViML does on your own. #####
#########################################################################################################
#### OUTPUTS: #####
#########################################################################################################
#### model: It will return your trained model #####
#### features: the fewest number of features in your model to make it perform well #####
#### train_modified: this is the modified train dataframe after removing and adding features #####
#### test_modified: this is the modified test dataframe with the same transformations as train #####
################# A D D I T I O N A L N O T E S ###########
#### Finally, it writes your submission file to disk in the current directory called "mysubmission.csv"
#### This submission file is ready for you to show it clients or submit it to competitions. #####
#### If no submission file was given but as long as you give it a test file name, it will create #####
#### a submission file for you named "mySubmission.csv". #####
#### Auto_ViML works on any Multi-Class, Multi-Label Data Set. So you can have many target labels #####
#### You don't have to tell Auto_ViML whether it is a Regression or Classification problem. #####
#### Suggestions for a Scoring Metric: #####
#### If you have Binary Class and Multi-Class in a Single Label, Choose Accuracy. It will ######
#### do very well. If you want something better, try roc_auc even for Multi-Class which works. ######
#### You can try F1 or Weighted F1 if you want something complex or for Multi-Class. ######
#### Note that For Imbalanced Classes (<=5% classes), it automatically adds Class Weights. ######
#### Also, Note that it handles Multi-Label automatically so you can send Train data ######
#### with multiple Labels (Targets) and it will automatically predict for each Label. ######
#### Finally this is Meant to Be a Fast Algorithm, so use it for just quick POCs ######
#### This is Not Meant for Production Problems. It produces great models but it is not Perfect! ######
######################### HELP OTHERS! PLEASE CONTRIBUTE! OPEN A PULL REQUEST! ##########################
#########################################################################################################
"""
##### These copies are to make sure that the originals are not destroyed ####
CPU_count = os.cpu_count()
test = copy.deepcopy(test)
orig_train = copy.deepcopy(train)
orig_test = copy.deepcopy(test)
train_index = train.index
if not isinstance(test, str):
test_index = test.index
start_test = copy.deepcopy(orig_test)
####### These are Global Settings. If you change them here, it will ripple across the whole code ###
corr_limit = 0.70 #### This decides what the cut-off for defining highly correlated vars to remove is.
scaling = 'MinMax' ### This decides whether to use MinMax scaling or Standard Scaling ("Std").
first_flag = 0 ## This is just a setting to detect which is
seed= 99 ### this maintains repeatability of the whole ML pipeline here ###
subsample=0.7 #### Leave this low so the models generalize better. Increase it if you want overfit models
col_sub_sample = 0.7 ### Leave this low for the same reason above
poly_degree = 2 ### this create 2-degree polynomial variables in Add_Poly. Increase if you want more degrees
booster = 'gbtree' ### this is the booster for XGBoost. The other option is "Linear".
n_splits = 5 ### This controls the number of splits for Cross Validation. Increasing will take longer time.
matplotlib_flag = True #(default) This is for drawing SHAP values. If this is False, initJS is used.
early_stopping = 20 #### Early stopping rounds for XGBoost ######
encoded = '_Label_Encoded' ### This is the tag we add to feature names in the end to indicate they are label encoded
catboost_limit = 0.4 #### The catboost_limit represents the percentage of num vars in data. ANy lower, CatBoost is used.
cat_code_limit = 100 #### If the number of dummy variables to create in a data set exceeds this, CatBoost is the default Algorithm used
one_hot_size = 500 #### This determines the max length of one_hot_max_size parameter of CatBoost algrithm
Alpha_min = -3 #### The lowest value of Alpha in LOGSPACE that is used in CatBoost
Alpha_max = 2 #### The highest value of Alpha in LOGSPACE that is used in Lasso or Ridge Regression
Cs = [0.001,0.005,0.01,0.05,0.1,0.25,0.5,1,2,4,6,10,20,30,40,50,100,150,200,400,800,1000,2000]
#Cs = np.logspace(-4,3,40) ### The list of values of C used in Logistic Regression
tolerance = 0.001 #### This tolerance is needed to speed up Logistic Regression. Otherwise, SAGA takes too long!!
#### 'lbfgs' is the fastest one but doesnt provide accurate results. Newton-CG is slower but accurate!
#### SAGA is extremely slow. Even slower than Newton-CG. Liblinear is the fastest and as accurate as Newton-CG!
solvers = ['liblinear'] ### Other solvers for Logistic Regression model: ['newton-cg','lbfgs','saga','liblinear']
solver = 'liblinear' ### This is the next fastest solver after liblinear. Useful for Multi-class problems!
penalties = ['l2','l1'] ### This is to determine the penalties for LogisticRegression
n_steps = 6 ### number of estimator steps between 100 and max_estims
max_depth = 10 ##### This limits the max_depth used in decision trees and other classifiers
max_features = 10 #### maximum number of features in a random forest model or extra trees model
warm_start = True ### This is to set the warm_start flag for the ExtraTrees models
bootstrap = True #### Set this flag to control whether to bootstrap variables or not.
n_repeats = 1 #### This is for repeated KFold and StratifiedKFold - this changes the folds every time
Bins = 30 ### This is for plotting probabilities in a histogram. For small data sets, 30 is enough.
top_nlp_features = 100 ### This sets a limit on the number of features added by each NLP transformer!
removed_features_threshold = 5 #### This triggers the Truncated_SVD if number of removed features from XGB exceeds this!
calibrator_flag = False ### In Multi-class data sets, a CalibratedClassifier works better than regular classifiers!
max_class_length = 1 ### It turns out the number of classes is directly correlated to Estimated Time. Hence this!
print('############## D A T A S E T A N A L Y S I S #######################')
########## I F CATBOOST IS REQUESTED, THEN CHECK IF IT IS INSTALLED #######################
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
from catboost import CatBoostClassifier, CatBoostRegressor
#### Similarly for Random Forests Model, it takes too long with Grid Search, so MAKE IT RandomizedSearch!
if not Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise, Random Forests will take too long for 10,000+ rows')
elif Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if not isinstance(Boosting_Flag, str):
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise XGBoost will take too long for 10,000+ rows.')
########### T H I S I S W H E R E H Y P E R O P T P A R A M S A R E S E T #########
if hyper_param == 'HO':
########### HyperOpt related objective functions are defined here #################
from hyperopt import hp, tpe
from hyperopt.fmin import fmin
from hyperopt import Trials
from autoviml.custom_scores_HO import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores_HO import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores_HO import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores_HO import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores_HO import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores_HO import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores_HO import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores_HO import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores_HO import gini_samples_recall, gini_macro_recall, gini_micro_recall
else:
from autoviml.custom_scores import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores import gini_samples_recall, gini_macro_recall, gini_micro_recall
###### If hyper_param = 'GS', it takes a LOOOONG TIME with "SAGA" solver for LogisticRegression.
#### Hence to speed it up you need to change the tolerance threshold to something bigger
if hyper_param == 'GS':
tolerance = 0.01 #### This tolerance is bigger to speed up Logistic Regression. Otherwise, SAGA takes too long!!
########## This is where some more default parameters are set up ######
data_dimension = orig_train.shape[0]*orig_train.shape[1] ### number of cells in the entire data set .
if data_dimension > 1000000:
### if data dimension exceeds 1 million, then reduce no of params
no_iter=30
early_stopping = 10
test_size = 0.20
max_iter = 10000
Bins = 100
top_nlp_features = 300
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 5000
else:
max_estims = 400
else:
max_estims = 400
else:
if orig_train.shape[0] <= 1000:
no_iter=20
test_size = 0.1
max_iter = 4000
top_nlp_features = 250
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 3000
else:
max_estims = 300
else:
max_estims = 300
early_stopping = 4
else:
no_iter=30
test_size = 0.15
max_iter = 7000
top_nlp_features = 200
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 4000
else:
max_estims = 350
else:
max_estims = 350
early_stopping = 6
#### The warnings from Sklearn are so annoying that I have to shut it off ####
import warnings
warnings.filterwarnings("ignore")
def warn(*args, **kwargs):
pass
warnings.warn = warn
### First_Flag is merely a flag for the first time you want to set values of variables
if scaling == 'MinMax':
SS = MinMaxScaler()
elif scaling == 'Std':
SS = StandardScaler()
else:
SS = MinMaxScaler()
### Make target into a list so that we can uniformly process the target label
if not isinstance(target, list):
target = [target]
model_label = 'Single_Label'
elif isinstance(target, list):
if len(target)==1:
model_label = 'Single_Label'
elif len(target) > 1:
model_label = 'Multi_Label'
else:
print('Target variable is neither a string nor a list. Please check input and try again!')
return
##### This is where we run the Traditional models to compare them to XGB #####
start_time = time.time()
####################################################################################
##### Set up your Target Labels and Classes Properly Here #### Label Encoding #####
#### This is for Classification Problems Only where you do Label Encoding of Target
mldict = lambda: defaultdict(mldict)
label_dict = mldict()
first_time = True
print('Training Set Shape = {}'.format(orig_train.shape))
print(' Training Set Memory Usage = {:.2f} MB'.format(orig_train.memory_usage().sum() / 1024**2))
if not isinstance(orig_test,str):
print('Test Set Shape = {}'.format(orig_test.shape))
print(' Test Set Memory Usage = {:.2f} MB'.format(orig_test.memory_usage().sum() / 1024**2))
print('%s Target: %s' %(model_label,target))
###### Now analyze what problem we have here ####
try:
modeltype = analyze_problem_type(train, target[0],verbose)
except:
print('Cannot find the Target variable in data set. Please check input and try again')
return
for each_target in target:
#### Make sure you don't move these 2 lines: they need to be reset for every target!
#### HyperOpt will not do Trials beyond max_evals - so only if you reset here, it will do it again.
if hyper_param == 'HO':
params_dict = {}
bayes_trials = Trials()
############ THIS IS WHERE OTHER DEFAULT PARAMS ARE SET ###############
c_params = dict()
r_params = dict()
if modeltype == 'Regression':
scv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
eval_metric = 'rmse'
objective = 'reg:squarederror'
model_class = 'Regression'
start_train = copy.deepcopy(orig_train)
else:
if len(np.unique(train[each_target])) == 2:
model_class = 'Binary-Class'
elif len(np.unique(train[each_target])) > 2:
model_class = 'Multi-Class'
##### If multi-class happens, then you absolutely need to do SMOTE. Otherwise, you don't get good results!
#### Unfortunately SMOTE blows up when the data set is large -> so better to turn it off!
print('ALERT! Setting Imbalanced_Flag to True in Auto_ViML for Multi_Classification problems improves results!')
#Imbalanced_Flag = True
else:
print('Target label %s has less than 2 classes. Stopping' %each_target)
return
### This is for Classification Problems Only ########
print('Shuffling the data set before training')
start_train = orig_train.sample(frac=1.0, random_state=seed)
scv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
if modeltype != 'Regression':
rare_class_orig = find_rare_class(orig_train[each_target].values,verbose=1)
### Perfrom Label Transformation only for Classification Problems ####
classes = np.unique(orig_train[each_target])
if first_time:
if hyper_param == 'GS':
print('Using GridSearchCV for Hyper Parameter Tuning. This is slow. Switch to RS for faster tuning...')
elif hyper_param == 'RS':
print('Using RandomizedSearchCV for Hyper Parameter Tuning. This is 3X faster than GridSearchCV...')
else:
print('Using HyperOpt which is approximately 3X Faster than GridSearchCV but results vary...')
first_time = False
if len(classes) > 2:
##### If Boosting_Flag = True, change it to False here since Multi-Class XGB is VERY SLOW!
max_class_length = len(classes)
if Boosting_Flag:
print('CAUTION: In Multi-Class Boosting (2+ classes), TRAINING WILL TAKE A LOT OF TIME!')
objective = 'multi:softmax'
eval_metric = "mlogloss"
else:
max_class_length = 2
eval_metric="logloss"
objective = 'binary:logistic'
### Do Label Encoding when the Target Classes in each Label are Strings or Multi Class ###
if type(start_train[each_target].values[0])==str or str(start_train[each_target].dtype
)=='category' or sorted(np.unique(start_train[each_target].values))[0] != 0:
### if the class is a string or if it has more than 2 classes, then use Factorizer!
label_dict[each_target]['values'] = start_train[each_target].values
#### Factorizer is the easiest way to convert target in train and predictions in test
#### This takes care of some classes that are present in train and not in predictions
### and vice versa. Hence it is better than Label Encoders which breaks when above happens.
train_targ_categs = list(start_train[each_target].value_counts().index)
if len(train_targ_categs) == 2:
majority_class = [x for x in train_targ_categs if x != rare_class_orig]
dict_targ_all = {majority_class[0]: 0, rare_class_orig: 1}
else:
dict_targ_all = return_factorized_dict(train_targ_categs)
start_train[each_target] = start_train[each_target].map(dict_targ_all)
label_dict[each_target]['dictionary'] = copy.deepcopy(dict_targ_all)
label_dict[each_target]['transformer'] = dict([(v,k) for (k,v) in dict_targ_all.items()])
label_dict[each_target]['classes'] = copy.deepcopy(train_targ_categs)
class_nums = list(dict_targ_all.values())
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
print('String or Multi Class target: %s transformed as follows: %s' %(each_target,dict_targ_all))
rare_class = find_rare_class(start_train[each_target].values)
else:
### Since the each_target here is already numeric, you don't have to modify it
start_train[each_target] = start_train[each_target].astype(int).values
rare_class = find_rare_class(start_train[each_target].values)
label_dict[each_target]['values'] = start_train[each_target].values
label_dict[each_target]['classes'] = np.unique(start_train[each_target].values)
class_nums = np.unique(start_train[each_target].values)
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
label_dict[each_target]['transformer'] = []
label_dict[each_target]['dictionary'] = dict(zip(classes,classes))
print(' Target %s is already numeric. No transformation done.' %each_target)
if rare_class != 1:
print('Alert! Rare Class is not 1 but %s in this data set' %rare_class)
else:
#### In Regression problems, max_class_length is artificially set to one.
#### It turns out that Estimated Time is correlated to number of classes in data set. Hence we use this!
max_class_length = 1
###########################################################################################
#### This is where we start doing the iterative hyper tuning parameters #####
params_dict = defaultdict(list)
accu_mean = []
error_rate = []
###### This is where we do the training and hyper parameter tuning ########
orig_preds = [x for x in list(orig_train) if x not in target]
count = 0
################# CLASSIFY COLUMNS HERE ######################
var_df = classify_columns(orig_train[orig_preds], verbose)
##### Classify Columns ################
id_cols = var_df['id_vars']
nlp_columns = var_df['nlp_vars']
date_cols = var_df['date_vars']
del_cols = var_df['cols_delete']
factor_cols = var_df['factor_vars']
numvars = var_df['continuous_vars']+var_df['int_vars']
cat_vars = var_df['string_bool_vars']+var_df['discrete_string_vars']+var_df[
'cat_vars']+var_df['factor_vars']+var_df['num_bool_vars']
num_bool_vars = var_df['num_bool_vars']
#######################################################################################
preds = [x for x in orig_preds if x not in id_cols+del_cols+date_cols+target]
if len(id_cols+del_cols+date_cols)== 0:
print(' No variables removed since no ID or low-information variables found in data set')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(id_cols+del_cols+date_cols))
################## This is where real code begins ###################################################
GPU_exists = check_if_GPU_exists()
###### This is where we set the CPU and GPU parameters for XGBoost
param = {}
if Boosting_Flag:
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
model_name = 'CatBoost'
hyper_param = None
else:
model_name = 'XGBoost'
else:
model_name = 'XGBoost'
elif Boosting_Flag is None:
model_name = 'Linear'
else:
model_name = 'Forests'
##### Set the Scoring Parameters here based on each model and preferences of user ##############
cpu_params = {}
if model_name == 'XGBoost':
##### WE should keep CPU params as backup in case GPU fails!
cpu_params['nthread'] = -1
cpu_params['tree_method'] = 'hist'
cpu_params['grow_policy'] = 'depthwise'
cpu_params['max_depth'] = max_depth
cpu_params['max_leaves'] = 0
cpu_params['verbosity'] = 0
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
cpu_params['num_parallel_tree'] = 1
if GPU_exists:
param['nthread'] = -1
param['tree_method'] = 'gpu_hist'
param['grow_policy'] = 'depthwise'
param['max_depth'] = max_depth
param['max_leaves'] = 0
param['verbosity'] = 0
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
param['num_parallel_tree'] = 1
else:
param = copy.deepcopy(cpu_params)
validation_metric = copy.deepcopy(scoring_parameter)
elif model_name.lower() == 'catboost':
if model_class == 'Binary-Class':
catboost_scoring = 'Accuracy'
validation_metric = 'Accuracy'
loss_function='Logloss'
elif model_class == 'Multi-Class':
catboost_scoring = 'AUC'
validation_metric = 'AUC:type=Mu'
loss_function='MultiClass'
else:
loss_function = 'RMSE'
validation_metric = 'RMSE'
catboost_scoring = 'RMSE'
else:
validation_metric = copy.deepcopy(scoring_parameter)
########## D A T A P R E P R O C E S S I N G H E R E ##########################
print('############# D A T A P R E P A R A T I O N #############')
if start_train.isnull().sum().sum() > 0:
print('Filling missing values with "missing" placeholder and adding a column for missing_flags')
else:
print('No Missing Values in train data set')
copy_preds = copy.deepcopy(preds)
missing_flag_cols = []
if len(copy_preds) > 0:
dict_train = {}
for f in copy_preds:
if f in nlp_columns:
#### YOu have to skip this for NLP columns ##############
continue
missing_flag = False
if start_train[f].dtype == object:
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,True)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif start_train[f].dtype == np.int64 or start_train[f].dtype == np.int32 or start_train[f].dtype == np.int16:
### if there are integer variables, don't scale them. Leave them as is.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num).astype(int)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num).astype(int)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif f in factor_cols:
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,False)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
else:
### for all numeric variables, fill missing values with 1 less than min.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
###########################################################################################
if orig_train.isnull().sum().sum() > 0:
### If there are missing values in remaining features print it here ####
top5 = orig_train.isnull().sum().sort_values(ascending=False).index.tolist()[:5]
print(' Columns with most missing values: %s' %(
[x for x in top5 if orig_train[x].isnull().sum()>0]))
print(' and their missing value totals: %s' %([orig_train[x].isnull().sum() for x in
top5 if orig_train[x].isnull().sum()>0]))
if start_train[copy_preds].isnull().sum().sum() == 0:
print('Completed missing value Imputation. No more missing values in train.')
if verbose >= 1:
print(' %d new missing value columns added: %s' %(len(missing_flag_cols),missing_flag_cols))
else:
print('Error: Unable to complete missing value imputation in train. Exiting...')
return
####################################################################################
if type(orig_test) != str:
if start_test[copy_preds].isnull().sum().sum() > 0:
print('Test data still has some missing values. Fix it. Exiting...')
return
else:
print('Test data has no missing values. Continuing...')
###########################################################################################
else:
print(' Could not find any variables in your data set. Please check your dataset and try again')
return
###########################################################################################
print('Completed Label Encoding and Filling of Missing Values for Train and Test Data')
### This is a minor test to make sure that Boolean vars are Integers if they are Numeric!
if len(num_bool_vars) > 0:
### Just make sure that numeric Boolean vars are set as Integer type -> otherwise CatBoost will blow up
for each_bool_num in var_df['num_bool_vars']:
start_train[each_bool_num] = start_train[each_bool_num].astype(int)
if type(start_test) != str:
start_test[each_bool_num] = start_test[each_bool_num].astype(int)
######################################################################################
######### Set your Refit Criterion here - if you want to maximize Precision or Recall do it here ##
if modeltype == 'Regression':
if scoring_parameter in ['log_loss', 'neg_mean_squared_error','mean_squared_error']:
refit_metric = 'rmse'
else:
refit_metric = 'mae'
else:
if scoring_parameter in ['precision', 'precision_score','average_precision']:
refit_metric = 'precision'
elif scoring_parameter in ['logloss', 'log_loss']:
refit_metric = 'log_loss'
elif scoring_parameter in ['recall', 'recall_score']:
refit_metric = 'recall'
elif scoring_parameter in ['f1', 'f1_score','f1_weighted']:
refit_metric = 'f1'
elif scoring_parameter in ['accuracy', 'balanced_accuracy','balanced-accuracy']:
refit_metric = 'balanced_accuracy'
else:
refit_metric = 'balanced_accuracy'
print('%s problem: hyperparameters are being optimized for %s' %(modeltype,refit_metric))
###########################################################################################
### Make sure you remove variables that are highly correlated within data set first
rem_vars = left_subtract(preds,numvars)
if len(numvars) > 0 and feature_reduction:
numvars = remove_variables_using_fast_correlation(start_train,numvars, 'pearson',
corr_limit,verbose)
### Reduced Preds are now free of correlated variables and hence can be used for Poly adds
red_preds = rem_vars + numvars
#### You need to save a copy of this red_preds so you can later on create a start_train
#### with it after each_target cycle is completed. Very important!
orig_red_preds = copy.deepcopy(red_preds)
for each_target in target:
print('\n############# PROCESSING T A R G E T = %s ##########################' %each_target)
######## D E F I N I N G N E W T R A I N and N E W T E S T here #########################
#### This is where we set the orig train data set with multiple labels to the new start_train
#### start_train has the new features added or reduced with the multi targets in one cycle
### That way, we start each train with one target, and then reset it with multi target
#############################################################################################
train = start_train[[each_target]+red_preds]
if type(orig_test) != str:
test = start_test[red_preds]
###### Add Polynomial Variables and Interaction Variables to Train ######
if Add_Poly >= 1:
if Add_Poly == 1:
print('\nAdding only Interaction Variables. This may result in Overfitting!')
elif Add_Poly == 2:
print('\nAdding only Squared Variables. This may result in Overfitting!')
elif Add_Poly == 3:
print('\nAdding Both Interaction and Squared Variables. This may result in Overfitting!')
## Since the data is already scaled, we set scaling to None here ##
### For train data we have to set the fit_flag to True ####
if len(numvars) > 1:
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
train_sel, lm, train_red,md,fin_xvars,feature_xvar_dict = add_poly_vars_select(train,numvars,
each_target,modeltype,poly_degree,Add_Poly,md='',
corr_limit=corr_limit, scaling='None',
fit_flag=True,verbose=verbose)
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
if len(left_subtract(train_sel,numvars)) > 0:
#### This means that new intxn and poly vars were added. In that case, you can use them as is
#### Since these vars were alread tested for correlation, there should be no high correlation!
### SO you can take train_sel as the new list of numeric vars (numvars) going forward!
addl_vars = left_subtract(train_sel,numvars)
#numvars = list(set(numvars).intersection(set(train_sel)))
##### Print the additional Interxn and Poly variables here #######
if verbose >= 1:
print(' Intxn and Poly Vars are: %s' %addl_vars)
train = train_red[train_sel].join(train[rem_vars+[each_target]])
red_preds = [x for x in list(train) if x not in [each_target]]
if type(test) != str:
######### Add Polynomial and Interaction variables to Test ################
## Since the data is already scaled, we set scaling to None here ##
### For Test data we have to set the fit_flag to False ####
_, _, test_x_df,_,_,_ = add_poly_vars_select(test,numvars,each_target,
modeltype,poly_degree,Add_Poly,md,
corr_limit, scaling='None', fit_flag=False,
verbose=verbose)
### we need to convert x_vars into text_vars in test_x_df using feature_xvar_dict
test_x_vars = test_x_df.columns.tolist()
test_text_vars = [feature_xvar_dict[x] for x in test_x_vars]
test_x_df.columns = test_text_vars
#### test_red contains reduced variables with orig and substituted poly/intxn variables
test_red = test_x_df[train_sel]
#### we should now combined test_red with rem_vars so that it is the same shape as train
test = test_red.join(test[rem_vars])
#### Now we should change train_sel to subst_vars since that is the new list of vars going forward
numvars = copy.deepcopy(train_sel)
else:
#### NO new variables were added. so we can skip the rest of the stuff now ###
#### This means the train_sel is the new set of numeric features selected by add_poly algorithm
red_preds = train_sel+rem_vars
print(' No new variable was added by polynomial features...')
else:
print('\nAdding Polynomial vars ignored since no numeric vars in data')
train_sel = copy.deepcopy(numvars)
else:
### if there are no Polynomial vars, then all numeric variables are selected
train_sel = copy.deepcopy(numvars)
################ A U T O N L P P R O C E S S I N G B E G I N S H E R E !!! ####
if len(nlp_columns) > 0:
for nlp_column in nlp_columns:
nlp_column_train = train[nlp_column].values
if not isinstance(orig_test, str):
nlp_column_test = test[nlp_column].values
train1, test1, best_nlp_transformer,max_features_limit = Auto_NLP(nlp_column,
train, test, each_target, refit_metric,
modeltype, top_nlp_features, verbose,
build_model=False)
########################################################################
if KMeans_Featurizer:
start_time1 = time.time()
##### Do a clustering of word vectors from each NLP_column. This gives great results!
tfidf_term_array = create_tfidf_terms(nlp_column_train, best_nlp_transformer,
is_train=True, max_features_limit=max_features_limit)
print ('Creating word clusters using term matrix of size: %d for Train data set...' %len(tfidf_term_array['terms']))
num_clusters = int(np.sqrt(len(tfidf_term_array['terms']))/2)
if num_clusters < 2:
num_clusters = 2
##### Always set verbose to 0 since we KMEANS running is too verbose!
km = KMeans(n_clusters=num_clusters, random_state=seed, verbose=0)
kme, cluster_labels = return_cluster_labels(km, tfidf_term_array, num_clusters,
is_train=True)
if isinstance(nlp_column, str):
cluster_col = nlp_column + '_word_cluster_label'
else:
cluster_col = str(nlp_column) + '_word_cluster_label'
train1[cluster_col] = cluster_labels
print ('Created one new column: %s using selected NLP technique...' %cluster_col)
if not isinstance(orig_test, str):
tfidf_term_array_test = create_tfidf_terms(nlp_column_test, best_nlp_transformer,
is_train=False, max_features_limit=max_features_limit)
_, cluster_labels_test = return_cluster_labels(kme, tfidf_term_array_test, num_clusters,
is_train=False)
test1[cluster_col] = cluster_labels_test
print ('Created word clusters using same sized term matrix for Test data set...')
print(' Time Taken for creating word cluster labels = %0.0f seconds' %(time.time()-start_time1) )
####### Make sure you include the above new columns created in the predictor variables!
red_preds = [x for x in list(train1) if x not in [each_target]]
train = train1[red_preds+[each_target]]
if not isinstance(orig_test, str):
test = test1[red_preds]
################ A U T O N L P P R O C E S S I N G E N D S H E R E !!! ####
###### We have to detect float variables again since we have created new variables using Auto_NLP!!
train_sel = np.array(red_preds)[(train[red_preds].dtypes==float).values].tolist()
######### A D D D A T E T I M E F E A T U R E S ####################
if len(date_cols) > 0:
#### Do this only if date time columns exist in your data set!
for date_col in date_cols:
print('Processing %s column for date time features....' %date_col)
date_df_train = create_time_series_features(orig_train, date_col)
if not isinstance(date_df_train, str):
date_col_adds = date_df_train.columns.tolist()
print(' Adding %d columns from date time column %s' %(len(date_col_adds),date_col))
train = train.join(date_df_train)
else:
date_col_adds = []
if not isinstance(orig_test, str):
date_df_test = create_time_series_features(orig_test, date_col)
if not isinstance(date_df_test, str):
test = test.join(date_df_test)
red_preds = [x for x in list(train) if x not in [each_target]]
train_sel = train_sel + date_col_adds
######### SELECT IMPORTANT FEATURES HERE #############################
if feature_reduction:
important_features,num_vars, imp_cats = find_top_features_xgb(train,red_preds,train_sel,
each_target,
modeltype,corr_limit,verbose)
else:
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
#####################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
### Training an XGBoost model to find important features
train = train[important_features+[each_target]]
######################################################################
if type(orig_test) != str:
test = test[important_features]
############## F E A T U R E E N G I N E E R I N G S T A R T S N O W ##############
###### From here on we do some Feature Engg using Target Variable with Data Leakage ############
### To avoid Model Leakage, we will now split the Data into Train and CV so that Held Out Data
## is Pure and is unadulterated by learning from its own Target. This is known as Data Leakage.
###################################################################################################
print('Starting Feature Engineering now...')
X = train[important_features]
y = train[each_target]
################ I M P O R T A N T ##################################################
### The reason we don't use train_test_split is because we want only a partial train entropy binned
### If we use the whole of Train for entropy binning then there will be data leakage and our
### cross validation test scores will not be so accurate. So don't change the next 5 lines here!
################ I M P O R T A N T ##################################################
if modeltype == 'Regression':
skf = KFold(n_splits=n_splits, random_state=seed)
else:
skf = StratifiedKFold(n_splits=n_splits, random_state=seed, shuffle=True)
cv_train_index, cv_index = next(skf.split(X, y))
################ TRAIN CV TEST SPLIT HERE ##################################################
try:
#### Sometimes this works but other times, it gives an error!
X_train, X_cv = X.loc[cv_train_index], X.loc[cv_index]
y_train, y_cv = y.loc[cv_train_index], y.loc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.loc[cv_train_index]
part_cv = train.loc[cv_index]
except:
#### This works when the above method gives an error!
X_train, X_cv = X.iloc[cv_train_index], X.iloc[cv_index]
y_train, y_cv = y.iloc[cv_train_index], y.iloc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.iloc[cv_train_index]
part_cv = train.iloc[cv_index]
print('Train CV Split completed with', "TRAIN rows:", cv_train_index.shape[0], "CV rows:", cv_index.shape[0])
################ IMPORTANT ENTROPY BINNING FIRST TIME #####################################
############ Add Entropy Binning of Continuous Variables Here ##############################
num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
saved_important_features = copy.deepcopy(important_features) ### these are original features without '_bin' added
#### saved_num_vars is an important variable: it contains the orig_num_vars before they were binned
saved_num_vars = copy.deepcopy(num_vars) ### these are original numeric features without '_bin' added
############### BINNING FIRST TIME ##################################################
if Binning_Flag and len(saved_num_vars) > 0:
#### Do binning only when there are numeric features ####
#### When we Bin the first time, we set the entropy_binning flag to False so
#### no numeric variables are removed. But next time, we will remove them later!
part_train, num_vars, important_features, part_cv = add_entropy_binning(part_train,
each_target, saved_num_vars,
saved_important_features, part_cv,
modeltype, entropy_binning=False,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
### you get the name of the original vars which were binned here in this orig_num_vars variable!
orig_num_vars = left_subtract(saved_num_vars,num_vars)
#### you need to know the name of the binner variables. This is where you get it!
binned_num_vars = left_subtract(num_vars,saved_num_vars)
imp_cats += binned_num_vars
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
####################### KMEANS FIRST TIME ############################
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
if KMeans_Featurizer and len(saved_num_vars) > 0:
### DO KMeans Featurizer only if there are numeric features in the data set!
print(' Adding one Feature named "KMeans_Clusters" based on KMeans_Featurizer_Flag=True...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
num_clusters = int(np.round(max(2,np.log10(train.shape[0]))))
#### Make the number of clusters as the same as log10 of number of rows in Train
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features], num_clusters)
else:
### If it is Regression, you don't have to specify the number of clusters
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features])
#### Since this is returning the each_target in X_train, we need to drop it here ###
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
part_train[km_label] = train_clusters
part_cv[km_label] = cv_clusters
#X_train.drop(each_target,axis=1,inplace=True)
imp_cats.append(km_label)
for imp_cat in imp_cats:
part_train[imp_cat] = part_train[imp_cat].astype(int)
part_cv[imp_cat] = part_cv[imp_cat].astype(int)
####### The features are checked again once we add the cluster feature ####
important_features.append(km_label)
else:
print(' KMeans_Featurizer set to False or there are no numeric vars in data')
km_label = ''
####################### STACKING FIRST TIME ############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('Alert! Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_cv!
addcol, stacks1 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_train[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
addcol, stacks2 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_cv[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
part_train = part_train.join(pd.DataFrame(stacks1,index=cv_train_index,
columns=addcol))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
part_cv = part_cv.join(pd.DataFrame(stacks2,index=cv_index,
columns=addcol))
print(' Adding %d Stacking feature(s) to training data' %len(addcol))
###### We make sure that we remove any new features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(X_train,addcol,corr_limit,verbose)
important_features += addcol
###############################################################################
#### part train contains the unscaled original train. It also contains binned and orig_num_vars!
#### DO NOT DO TOUCH part_train and part_cv -> we need it to recrate train later!
####################### Now do Feature Scaling Here #################################
part_train_scaled, part_cv_scaled = perform_scaling_numeric_vars(part_train, important_features,
part_cv, model_name, SS)
#### part_train_scaled has both predictor and target variables. Target must be removed!
important_features = find_remove_duplicates(important_features)
X_train = part_train_scaled[important_features]
X_cv = part_cv_scaled[important_features]
#### Remember that the next 2 lines are crucial: if X and y are dataframes, then predict_proba
### will return dataframes or series. Otherwise it will return Numpy array's.
## Be consistent when using dataframes with XGB. That's the best way to keep feature names!
print('############### M O D E L B U I L D I N G B E G I N S ####################')
print('Rows in Train data set = %d' %X_train.shape[0])
print(' Features in Train data set = %d' %X_train.shape[1])
print(' Rows in held-out data set = %d' %X_cv.shape[0])
data_dim = X_train.shape[0]*X_train.shape[1]
### Setting up the Estimators for Single Label and Multi Label targets only
if modeltype == 'Regression':
metrics_list = ['neg_mean_absolute_error' ,'neg_mean_squared_error',
'neg_mean_squared_log_error','neg_median_absolute_error']
eval_metric = "rmse"
if scoring_parameter == 'neg_mean_absolute_error' or scoring_parameter =='mae':
meae_scorer = make_scorer(gini_meae, greater_is_better=False)
scorer = meae_scorer
elif scoring_parameter == 'neg_mean_squared_error' or scoring_parameter =='mse':
mse_scorer = make_scorer(gini_mse, greater_is_better=False)
scorer = mse_scorer
elif scoring_parameter == 'neg_mean_squared_log_error' or scoring_parameter == 'log_error':
msle_scorer = make_scorer(gini_msle, greater_is_better=False)
print(' Log Error is not recommended since predicted values might be negative and error')
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
elif scoring_parameter == 'neg_median_absolute_error' or scoring_parameter == 'median_error':
mae_scorer = make_scorer(gini_mae, greater_is_better=False)
scorer = mae_scorer
elif scoring_parameter =='rmse' or scoring_parameter == 'root_mean_squared_error':
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
else:
scoring_parameter = 'rmse'
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
#### HYPER PARAMETERS FOR TUNING ARE SETUP HERE ###
if hyper_param == 'GS':
r_params = {
"Forests": {
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': np.logspace(-5,3),
},
"XGBoost": {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
else:
import scipy as sp
r_params = {
"Forests": {
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': sp.stats.uniform(scale=1000),
},
"XGBoost": {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(2, 10),
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostRegressor(verbose=1,iterations=max_estims,random_state=99,
one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBRegressor(seed=seed,n_jobs=-1,random_state=seed,subsample=subsample,
colsample_bytree=col_sub_sample,n_estimators=max_estims,
objective=objective)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#xgbm = Lasso(max_iter=max_iter,random_state=seed)
xgbm = Lasso(max_iter=max_iter,random_state=seed)
else:
xgbm = RandomForestRegressor(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,
'max_features': "sqrt"
})
else:
#### This is for Binary Classification ##############################
classes = label_dict[each_target]['classes']
metrics_list = ['accuracy_score','roc_auc_score','logloss', 'precision','recall','f1']
# Create regularization hyperparameter distribution with 50 C values ####
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#'max_features': [1,2,5, max_features],
#"criterion":['gini','entropy'],
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
}
c_params["CatBoost"] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'max_features': ['log', "sqrt"] ,
#'class_weight':[None,'balanced']
}
# Create regularization hyperparameter distribution using uniform distribution
if len(classes) == 2:
objective = 'binary:logistic'
if scoring_parameter == 'accuracy' or scoring_parameter == 'accuracy_score':
accuracy_scorer = make_scorer(gini_accuracy, greater_is_better=True, needs_proba=False)
scorer =accuracy_scorer
elif scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer =gini_scorer
elif scoring_parameter == 'auc' or scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_scorer = make_scorer(gini_roc, greater_is_better=True, needs_threshold=True)
scorer =roc_scorer
elif scoring_parameter == 'log_loss' or scoring_parameter == 'logloss':
scoring_parameter = 'neg_log_loss'
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'precision' or scoring_parameter == 'precision_score':
precision_scorer = make_scorer(gini_precision, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =precision_scorer
elif scoring_parameter == 'recall' or scoring_parameter == 'recall_score':
recall_scorer = make_scorer(gini_recall, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =recall_scorer
elif scoring_parameter == 'f1' or scoring_parameter == 'f1_score':
f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =f1_scorer
elif scoring_parameter == 'f2' or scoring_parameter == 'f2_score':
f2_scorer = make_scorer(f2_measure, greater_is_better=True, needs_proba=False)
scorer =f2_scorer
else:
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
#f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
# pos_label=rare_class)
#scorer = f1_scorer
### DO NOT USE NUM CLASS WITH BINARY CLASSIFICATION ######
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance,
warm_start=warm_start, max_iter=max_iter)
else:
xgbm = RandomForestClassifier(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,'oob_score':True,
'max_features': "sqrt"
})
else:
##### This is for MULTI Classification ##########################
objective = 'multi:softmax'
eval_metric = "mlogloss"
if scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = gini_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_auc_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = roc_auc_scorer
elif scoring_parameter == 'average_precision' or scoring_parameter == 'mean_precision':
average_precision_scorer = make_scorer(gini_average_precision,
greater_is_better=True, needs_proba=True)
scorer = average_precision_scorer
elif scoring_parameter == 'samples_precision':
samples_precision_scorer = make_scorer(gini_samples_precision,
greater_is_better=True, needs_proba=True)
scorer = samples_precision_scorer
elif scoring_parameter == 'weighted_precision' or scoring_parameter == 'weighted-precision':
weighted_precision_scorer = make_scorer(gini_weighted_precision,
greater_is_better=True, needs_proba=True)
scorer = weighted_precision_scorer
elif scoring_parameter == 'macro_precision':
macro_precision_scorer = make_scorer(gini_macro_precision,
greater_is_better=True, needs_proba=True)
scorer = macro_precision_scorer
elif scoring_parameter == 'micro_precision':
scorer = micro_precision_scorer
micro_precision_scorer = make_scorer(gini_micro_precision,
greater_is_better=True, needs_proba=True)
elif scoring_parameter == 'samples_recall':
samples_recall_scorer = make_scorer(gini_samples_recall, greater_is_better=True, needs_proba=True)
scorer = samples_recall_scorer
elif scoring_parameter == 'weighted_recall' or scoring_parameter == 'weighted-recall':
weighted_recall_scorer = make_scorer(gini_weighted_recall,
greater_is_better=True, needs_proba=True)
scorer = weighted_recall_scorer
elif scoring_parameter == 'macro_recall':
macro_recall_scorer = make_scorer(gini_macro_recall,
greater_is_better=True, needs_proba=True)
scorer = macro_recall_scorer
elif scoring_parameter == 'micro_recall':
micro_recall_scorer = make_scorer(gini_micro_recall, greater_is_better=True, needs_proba=True)
scorer = micro_recall_scorer
elif scoring_parameter == 'samples_f1':
samples_f1_scorer = make_scorer(gini_samples_f1,
greater_is_better=True, needs_proba=True)
scorer = samples_f1_scorer
elif scoring_parameter == 'weighted_f1' or scoring_parameter == 'weighted-f1':
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
elif scoring_parameter == 'macro_f1':
macro_f1_scorer = make_scorer(gini_macro_f1,
greater_is_better=True, needs_proba=True)
scorer = macro_f1_scorer
elif scoring_parameter == 'micro_f1':
micro_f1_scorer = make_scorer(gini_micro_f1,
greater_is_better=True, needs_proba=True)
scorer = micro_f1_scorer
else:
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
import scipy as sp
if Boosting_Flag:
# Create regularization hyperparameter distribution using uniform distribution
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100, max_estims),
'max_depth': sp.stats.randint(1, 10)
}
c_params['CatBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
num_class= len(classes),
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
if hyper_param == 'GS':
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
}
else:
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
}
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance, multi_class='auto',
max_iter=max_iter, warm_start=False,
)
else:
if hyper_param == 'GS':
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion":['gini','entropy'],
}
else:
c_params["Forests"] = {
##### I have set these to avoid OverFitting which is a problem for small data sets ###
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'class_weight':[None,'balanced']
}
xgbm = RandomForestClassifier(bootstrap=bootstrap, oob_score=True,warm_start=warm_start,
n_estimators=100,max_depth=3,
min_samples_leaf=2,max_features='auto',
random_state=seed,n_jobs=-1)
###### Now do RandomizedSearchCV using # Early-stopping ################
if modeltype == 'Regression':
#scoreFunction = {"mse": "neg_mean_squared_error", "mae": "neg_mean_absolute_error"}
#### I have set the Verbose to be False here since it produces too much output ###
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=r_params[model_name],
scoring = scorer,
n_jobs=-1,
cv = scv,
refit = refit_metric,
return_train_score = True,
verbose=0)
elif hyper_param == 'RS':
gs = RandomizedSearchCV(xgbm,
param_distributions = r_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
cv = scv,
n_jobs=-1,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
else:
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=c_params[model_name],
scoring = scorer,
return_train_score = True,
n_jobs=-1,
refit = refit_metric,
cv = scv,
verbose=0)
elif hyper_param == 'RS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = RandomizedSearchCV(xgbm,
param_distributions = c_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
n_jobs=-1,
cv = scv,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
#trains and optimizes the model
eval_set = [(X_train,y_train),(X_cv,y_cv)]
print('Finding Best Model and Hyper Parameters for Target: %s...' %each_target)
##### Here is where we put the part_train and part_cv together ###########
if modeltype != 'Regression':
### Do this only for Binary Classes and Multi-Classes, both are okay
baseline_accu = 1-(train[each_target].value_counts(1).sort_values())[rare_class]
print(' Baseline Accuracy Needed for Model = %0.2f%%' %(baseline_accu*100))
print('CPU Count = %s in this device' %CPU_count)
if modeltype == 'Regression':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(80000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
else:
if hyper_param == 'GS':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(300000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(10000.*CPU_count)))
elif Boosting_Flag is None:
#### A Linear model is usually the fastest ###########
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(16000.*CPU_count)))
else:
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(100000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(25000.*CPU_count)))
##### Since we are using Multiple Models each with its own quirks, we have to make sure it is done this way
##### ############ TRAINING MODEL FIRST TIME WITH X_TRAIN AND TESTING ON X_CV ############
model_start_time = time.time()
################################################################################################################################
##### BE VERY CAREFUL ABOUT MODIFYING THIS NEXT LINE JUST BECAUSE IT APPEARS TO BE A CODING MISTAKE. IT IS NOT!! #############
################################################################################################################################
#######
if Imbalanced_Flag:
if modeltype == 'Regression':
########### In case someone sets the Imbalanced_Flag mistakenly to True and it is Regression, you must set it to False ######
Imbalanced_Flag = False
else:
####### Imbalanced with Classification #################
try:
print('############## Imbalanced Flag on: Training model with SMOTE Oversampling method ###########')
#### The model is the downsampled model Trained on downsampled data sets. ####
model, X_train, y_train = training_with_SMOTE(X_train,y_train,eval_set, gs,
Boosting_Flag, eval_metric,
modeltype, model_name,training=True,
minority_class=rare_class,imp_cats=imp_cats,
calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params = cpu_params,
verbose=verbose)
if isinstance(model, str):
model = copy.deepcopy(gs)
#### If d_model failed, it will just be an empty string, so you try the regular model ###
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
try:
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train,
cat_features=imp_cats,eval_set=(X_cv,y_cv), use_best_model=True,plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats,use_best_model=False,plot=False)
else:
model.fit(X_train, y_train)
#### If downsampling succeeds, it will be used to get the best score and can become model again ##
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
else:
val_keys = list(model.best_score_.keys())
best_score = model.best_score_[val_keys[-1]][validation_metric]
except:
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
best_score = 0
################################################################################################################################
####### Though this next step looks like it is a Coding Mistake by Me, don't change it!!! ###################
####### This is for case when Imbalanced with Classification succeeds, this next step is skipped ############
################################################################################################################################
if not Imbalanced_Flag:
########### This is for both regular Regression and regular Classification Model Training. It is not a Mistake #############
########### In case Imbalanced training fails, this method is also tried. That's why we test the Flag here!! #############
try:
model = copy.deepcopy(gs)
if Boosting_Flag:
if model_name == 'XGBoost':
try:
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train, cat_features=imp_cats,
eval_set=(X_cv,y_cv), use_best_model=True, plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X_train, y_train)
except:
print('Training regular model first time is Erroring: Check if your Input is correct...')
return
try:
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
validation_metric = copy.deepcopy(scoring_parameter)
else:
val_keys = list(model.best_score_.keys())
if 'validation' in val_keys:
validation_metric = list(model.best_score_['validation'].keys())[0]
best_score = model.best_score_['validation'][validation_metric]
else:
validation_metric = list(model.best_score_['learn'].keys())[0]
best_score = model.best_score_['learn'][validation_metric]
except:
print('Error: Not able to print validation metrics. Continuing...')
## TRAINING OF MODELS COMPLETED. NOW GET METRICS on CV DATA ################
print(' Actual training time (in seconds): %0.0f' %(time.time()-model_start_time))
print('########### S I N G L E M O D E L R E S U L T S #################')
if modeltype != 'Regression':
############## This is for Classification Only !! ########################
if scoring_parameter in ['logloss','neg_log_loss','log_loss','log-loss','']:
print('{}-fold Cross Validation {} = {}'.format(n_splits, 'logloss', best_score))
elif scoring_parameter in ['accuracy','balanced-accuracy','balanced_accuracy','roc_auc','roc-auc',
'f1','precision','recall','average-precision','average_precision',
'weighted_f1','weighted-f1','AUC']:
print('%d-fold Cross Validation %s = %0.1f%%' %(n_splits,scoring_parameter, best_score*100))
else:
print('%d-fold Cross Validation %s = %0.1f' %(n_splits,validation_metric, best_score))
else:
######### This is for Regression only ###############
if best_score < 0:
best_score = best_score*-1
if scoring_parameter == '':
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,'RMSE', best_score))
else:
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,validation_metric, best_score))
#### We now need to set the Best Parameters, Fit the Model on Full X_train and Predict on X_cv
### Find what the order of best params are and set the same as the original model ###
if hyper_param == 'RS' or hyper_param == 'GS':
best_params= model.best_params_
print(' Best Parameters for Model = %s' %model.best_params_)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
#### CatBoost does not need too many iterations. Just make sure you set the iterations low after the first time!
if model.get_best_iteration() == 0:
### In some small data sets, the number of iterations becomes zero, hence we set it as a default number
best_params = dict(zip(['iterations','learning_rate'],[1000,model.get_all_params()['learning_rate']]))
else:
best_params = dict(zip(['iterations','learning_rate'],[model.get_best_iteration(),model.get_all_params()['learning_rate']]))
print(' %s Best Parameters for Model: Iterations = %s, learning_rate = %0.2f' %(
model_name, model.get_best_iteration(), model.get_all_params()['learning_rate']))
if hyper_param == 'RS' or hyper_param == 'GS':
#### In the case of CatBoost, we don't do any Hyper Parameter tuning #########
gs = copy.deepcopy(model)
model = gs.best_estimator_
if modeltype == 'Multi_Classification':
try:
if X_cv.shape[0] <= 1000:
# THis works well for small data sets and is similar to parametric
method= 'sigmoid' # 'isotonic' # #
else:
# THis works well for large data sets and is non-parametric
method= 'isotonic'
model = CalibratedClassifierCV(model, method=method, cv="prefit")
model.fit(X_train, y_train)
print('Using a Calibrated Classifier in this Multi_Classification dataset to improve results...')
calibrator_flag = True
except:
calibrator_flag = False
pass
### Make sure you set this flag as False so that when ensembling is completed, this flag is True ##
if model_name.lower() == 'catboost':
print('Best Model selected and its parameters are:\n %s' %model.get_all_params())
else:
print('Best Model selected and its parameters are:\n %s' %model)
performed_ensembling = False
if modeltype != 'Regression':
m_thresh = 0.5
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
if len(classes) <= 2:
print('Finding Best Threshold for Highest F1 Score...')
precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,rare_class])
#precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,1])
try:
f1 = (2*precision*recall)/(precision+recall)
f1 = np.nan_to_num(f1)
m_idx = np.argmax(f1)
m_thresh = thresholds[m_idx]
best_f1 = f1[m_idx]
except:
best_f1 = f1_score(y_cv, y_pred)
m_thresh = 0.5
# retrieve just the probabilities for the positive class
pos_probs = y_proba[:, rare_class]
if verbose >= 1:
# create a histogram of the predicted probabilities for the Rare Class since it will help decide threshold
plt.figure(figsize=(6,6))
plt.hist(pos_probs, bins=Bins, color='g')
plt.title("Model's Predictive Probability Histogram for Rare Class=%s with suggested threshold in red" %rare_class_orig)
plt.axvline(x=m_thresh, color='r', linestyle='--')
plt.show();
print(" Using threshold=0.5. However, %0.3f provides better F1=%0.2f for rare class..." %(m_thresh,best_f1))
###y_pred = (y_proba[:,rare_class]>=m_thresh).astype(int)
predicted = copy.deepcopy(y_proba)
predicted [:,0] = (predicted [:,0] >= (1-m_thresh)).astype('int')
predicted [:,1] = (predicted [:,1] > m_thresh).astype('int')
if m_thresh != 0.5:
y_pred = predicted[:,rare_class]
else:
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
else:
y_pred = model.predict(X_cv)
### This is where you print out the First Model's Results ########
print('########################################################')
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
if isinstance(y_cv,pd.Series):
y_cv = y_cv.values
print('%s Model Prediction Results on Held Out CV Data Set:' %model_name)
if modeltype == 'Regression':
rmsle_calculated_m = rmse(y_cv, y_pred)
print_regression_model_stats(y_cv, y_pred,'%s Model: Predicted vs Actual for %s'%(model_name,each_target))
else:
if model_name == 'Forests':
if calibrator_flag:
print(' OOB Score = %0.3f' %model.base_estimator.oob_score_)
else:
print(' OOB Score = %0.3f' %model.oob_score_)
rmsle_calculated_m = balanced_accuracy_score(y_cv,y_pred)
if len(classes) == 2:
print(' Regular Accuracy Score = %0.1f%%' %(accuracy_score(y_cv,y_pred)*100))
y_probas = model.predict_proba(X_cv)
rmsle_calculated_m = print_classification_model_stats(y_cv, y_probas, m_thresh)
else:
###### Use a nice classification matrix printing module here #########
print(' Balanced Accuracy Score = %0.1f%%' %(rmsle_calculated_m*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv, y_pred))
###### SET BEST PARAMETERS HERE ######
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if modeltype == 'Regression':
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
try:
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype=modeltype, Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d regressors' %len(cols))
ensem_pred = subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])
else:
print(' Calculating regular average ensemble of %d regressors' %len(cols))
ensem_pred = (subm[cols].mean(axis=1))
print('#############################################################################')
performed_ensembling = True
#### Since we have a new ensembled y_pred, make sure it is series or array before printing it!
if isinstance(y_pred,pd.Series):
print_regression_model_stats(y_cv, ensem_pred.values,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
else:
print_regression_model_stats(y_cv, ensem_pred,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
## This is for Classification Problems Only #
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
#### We do Ensembling only if the Stacking_Flag is False. Otherwise, we don't!
try:
classes = label_dict[each_target]['classes']
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
if len(classes) == 2:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
else:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d classifiers' %len(cols))
ensem_pred = np.round(subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])).astype(int)
else:
print(' Calculating regular average ensemble of %d classifiers' %len(cols))
ensem_pred = (subm[cols].mean(axis=1)).astype(int)
print('#############################################################################')
performed_ensembling = True
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
print('No Ensembling of models done since Stacking_Flag = True ')
if verbose >= 1:
if len(classes) == 2:
plot_classification_results(model,X_cv, y_cv, y_pred, classes, class_nums, each_target )
else:
try:
Draw_ROC_MC_ML(model, X_cv, y_cv, each_target, model_name, verbose)
Draw_MC_ML_PR_ROC_Curves(model,X_cv,y_cv)
except:
print('Could not plot PR and ROC curves. Continuing...')
#### In case there are special scoring_parameter requests, you can print it here!
if scoring_parameter == 'roc_auc' or scoring_parameter == 'auc':
if len(classes) == 2:
print(' ROC AUC Score = %0.1f%%' %(roc_auc_score(y_cv, y_proba[:,rare_class])*100))
else:
print(' No ROC AUC score for multi-class problems')
elif scoring_parameter == 'jaccard':
accu_all = jaccard_singlelabel(y_cv, y_pred)
print(' Mean Jaccard Similarity = {:,.1f}%'.format(
accu_all*100))
## This is for multi-label problems ##
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
elif scoring_parameter == 'basket_recall':
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
if not Stacking_Flag and performed_ensembling:
if modeltype == 'Regression':
rmsle_calculated_f = rmse(y_cv, y_pred)
print('After multiple models, Ensemble Model Results:')
print(' RMSE Score = %0.5f' %(rmsle_calculated_f,))
print('#############################################################################')
if rmsle_calculated_f < rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
else:
rmsle_calculated_f = balanced_accuracy_score(y_cv,y_pred)
print('After multiple models, Ensemble Model Results:')
rare_pct = y_cv[y_cv==rare_class].shape[0]/y_cv.shape[0]
print(' Balanced Accuracy Score = %0.3f%%' %(
rmsle_calculated_f*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv,y_pred))
print('#############################################################################')
if rmsle_calculated_f > rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
if verbose >= 1:
if Boosting_Flag:
try:
if model_name.lower() == 'catboost':
plot_xgb_metrics(model,catboost_scoring,eval_set,modeltype,'%s Results' %each_target,
model_name)
else:
plot_xgb_metrics(gs.best_estimator_,eval_metric,eval_set,modeltype,'%s Results' %each_target,
model_name)
except:
print('Could not plot Model Evaluation Results Metrics')
else:
try:
plot_RS_params(gs.cv_results_, scoring_parameter, each_target)
except:
print('Could not plot Cross Validation Parameters')
print(' Time taken for this Target (in seconds) = %0.0f' %(time.time()-start_time))
print('Training model on complete Train data and Predicting using give Test Data...')
################ I M P O R T A N T: C O M B I N I N G D A T A ######################
#### This is Second time: we combine train and CV into Train and Test Sets #################
train = part_train.append(part_cv)
important_features = [x for x in list(train) if x not in [each_target]]
############################################################################################
###### Now that we have used partial data to make stacking predictors, we can remove them from consideration!
if Stacking_Flag:
important_features = left_subtract(important_features, addcol)
try:
train.drop(addcol,axis=1, inplace=True)
except:
pass
###### Similarly we will have to create KMeans_Clusters again using full Train data!
if KMeans_Featurizer:
important_features = left_subtract(important_features, km_label)
try:
train.drop(km_label,axis=1, inplace=True)
except:
pass
########################## BINNING SECOND TIME ###############################
new_num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
## Now we re-use the saved_num_vars which contained a list of num_vars for binning now!
###### Once again we do Entropy Binning on the Full Train Data Set !!
########################## BINNING SECOND TIME ###############################
if Binning_Flag and len(saved_num_vars) > 0:
### when you bin the second time, you have to send in important_features with original
### numeric variables so that it works on binning only those. Otherwise it will fail.
### Do Entropy Binning only if there are numeric variables in the data set! #####
#### When we Bin the second first time, we set the entropy_binning flag to True so
#### that all numeric variables that are binned are removed. This way, only bins remain.
train, num_vars, important_features, test = add_entropy_binning(train, each_target,
orig_num_vars, important_features, test,
modeltype, entropy_binning=True,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
####################### KMEANS SECOND TIME ############################
if KMeans_Featurizer and len(saved_num_vars) > 0:
#### Perform KMeans Featurizer only if there are numeric variables in data set! #########
print('Adding one feature named "KMeans_Clusters" using KMeans_Featurizer...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features], num_clusters)
else:
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features])
#### Now make sure that the cat features are either string or integers ######
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
train[km_label] = train_cluster
if not isinstance(test, str):
test[km_label] = test_cluster
#X_train.drop(each_target,axis=1,inplace=True)
for imp_cat in imp_cats:
train[imp_cat] = train[imp_cat].astype(int)
if not isinstance(test, str):
test[imp_cat] = test[imp_cat].astype(int)
saved_num_vars.append(km_label) ### You need to add it to this variable list for Scaling later!
important_features.append(km_label)
########################## STACKING SECOND TIME ###############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('CAUTION: Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_cv to train on and using it to predict on X_train!
addcol, stacks1 = QuickML_Stacking(train[important_features],train[each_target],'',
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#### The reason we add the word "Partial_Train" is to show that these Stacking results are from Partial Train data!
addcols = copy.deepcopy(addcol)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
train = train.join(pd.DataFrame(stacks1,index=train.index,
columns=addcols))
##### Leaving multiple columns for Stacking is best! Do not do the average of predictions!
print(' Adding %d Stacking feature(s) to training data' %len(addcols))
if not isinstance(orig_test, str):
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_test
_, stacks2 = QuickML_Stacking(train[important_features],train[each_target],test[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
test = test.join(pd.DataFrame(stacks2,index=test.index,
columns=addcols))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#test = test.join(pd.DataFrame(stacks2.mean(axis=1).round().astype(int),
# columns=[addcol],index=test.index))
###### We make sure that we remove too many features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(train,addcol,corr_limit,verbose)
important_features += addcols
saved_num_vars.append(addcol) ### You need to add it for binning later!
############################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(saved_important_features)
#important_features = copy.deepcopy(red_preds)
############################################################################################
if model_name.lower() == 'catboost':
print(' Setting best params for CatBoost model from Initial State since you cannot change params to a fitted Catboost model ')
model = xgbm.set_params(**best_params)
print(' Number of Categorical and Integer variables used in CatBoost training = %d' %len(imp_cats))
#### Perform Scaling of Train data a second time using FULL TRAIN data set this time !
#### important_features keeps track of all variables that we need to ensure they are scaled!
train, test = perform_scaling_numeric_vars(train, important_features, test,
model_name, SS)
################ T R A I N I N G M O D E L A S E C O N D T I M E ###################
### The next 2 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
trainm = train[important_features+[each_target]]
red_preds = copy.deepcopy(important_features)
X = trainm[red_preds]
y = trainm[each_target]
eval_set = [()]
##### ############ TRAINING MODEL SECOND TIME WITH FULL_TRAIN AND PREDICTING ON TEST ############
model_start_time = time.time()
if modeltype != 'Regression':
if Imbalanced_Flag:
try:
print('################## Imbalanced Flag Set ############################')
print('Imbalanced Class Training using SMOTE Rare Class Oversampling method...')
model, X, y = training_with_SMOTE(X,y, eval_set, model,
Boosting_Flag, eval_metric,modeltype, model_name,
training=False, minority_class=rare_class,
imp_cats=imp_cats, calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params=cpu_params,
verbose=verbose)
if isinstance(model, str):
#### If downsampling model failed, it will just be an empty string, so you can try regular model ###
model = copy.deepcopy(best_model)
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
#### Set the Verbose to 0 since we don't want too much output ##
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
### Since second time we don't have X_cv, we remove it
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Training regular model second time erroring: Check if Input is correct...')
return
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X, y)
except:
print('Training model second time is Erroring: Check if Input is correct...')
return
print('Actual Training time taken in seconds = %0.0f' %(time.time()-model_start_time))
## TRAINING OF MODELS COMPLETED. NOW START PREDICTIONS ON TEST DATA ################
#### new_cols is to keep track of new prediction columns we are creating #####
new_cols = []
if not isinstance(orig_test, str):
### If there is a test data frame, then let us predict on it #######
### The next 3 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
try:
#### We need the id columns to carry over into the predictions ####
testm = orig_test[id_cols].join(test[red_preds])
except:
### if for some reason id columns are not available, then do without it
testm = test[red_preds]
X_test = testm[red_preds]
else:
##### If there is no Test file, then do a final prediction on Train itself ###
orig_index = orig_train.index
trainm = train.reindex(index = orig_index)
testm = orig_train[id_cols].join(trainm[red_preds])
X_test = testm[red_preds]
if modeltype == 'Regression':
y_pred = model.predict(X_test)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
######## This is for Regression Problems Only ###########
###### If Stacking_ Flag is False, then we do Ensembling #######
if not Stacking_Flag:
try:
new_cols = []
subm =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import string
from collections import Counter
from datetime import datetime
from functools import partial
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from scipy.stats.stats import chisquare
from tangled_up_in_unicode import block, block_abbr, category, category_long, script
from pandas_profiling.config import Settings
from pandas_profiling.model.summary_helpers_image import (
extract_exif,
hash_image,
is_image_truncated,
open_image,
)
def mad(arr: np.ndarray) -> np.ndarray:
"""Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variability of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
return np.median(np.abs(arr - np.median(arr)))
def named_aggregate_summary(series: pd.Series, key: str) -> dict:
summary = {
f"max_{key}": np.max(series),
f"mean_{key}": np.mean(series),
f"median_{key}": np.median(series),
f"min_{key}": np.min(series),
}
return summary
def length_summary(series: pd.Series, summary: dict = None) -> dict:
if summary is None:
summary = {}
length = series.str.len()
summary.update({"length": length})
summary.update(named_aggregate_summary(length, "length"))
return summary
def file_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# Transform
stats = series.map(lambda x: os.stat(x))
def convert_datetime(x: float) -> str:
return datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S")
# Transform some more
summary = {
"file_size": stats.map(lambda x: x.st_size),
"file_created_time": stats.map(lambda x: x.st_ctime).map(convert_datetime),
"file_accessed_time": stats.map(lambda x: x.st_atime).map(convert_datetime),
"file_modified_time": stats.map(lambda x: x.st_mtime).map(convert_datetime),
}
return summary
def path_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# TODO: optimize using value counts
summary = {
"common_prefix": os.path.commonprefix(series.values.tolist())
or "No common prefix",
"stem_counts": series.map(lambda x: os.path.splitext(x)[0]).value_counts(),
"suffix_counts": series.map(lambda x: os.path.splitext(x)[1]).value_counts(),
"name_counts": series.map(lambda x: os.path.basename(x)).value_counts(),
"parent_counts": series.map(lambda x: os.path.dirname(x)).value_counts(),
"anchor_counts": series.map(lambda x: os.path.splitdrive(x)[0]).value_counts(),
}
summary["n_stem_unique"] = len(summary["stem_counts"])
summary["n_suffix_unique"] = len(summary["suffix_counts"])
summary["n_name_unique"] = len(summary["name_counts"])
summary["n_parent_unique"] = len(summary["parent_counts"])
summary["n_anchor_unique"] = len(summary["anchor_counts"])
return summary
def url_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
summary = {
"scheme_counts": series.map(lambda x: x.scheme).value_counts(),
"netloc_counts": series.map(lambda x: x.netloc).value_counts(),
"path_counts": series.map(lambda x: x.path).value_counts(),
"query_counts": series.map(lambda x: x.query).value_counts(),
"fragment_counts": series.map(lambda x: x.fragment).value_counts(),
}
return summary
def count_duplicate_hashes(image_descriptions: dict) -> int:
"""
Args:
image_descriptions:
Returns:
"""
counts = pd.Series(
[x["hash"] for x in image_descriptions if "hash" in x]
).value_counts()
return counts.sum() - len(counts)
def extract_exif_series(image_exifs: list) -> dict:
"""
Args:
image_exifs:
Returns:
"""
exif_keys = []
exif_values: dict = {}
for image_exif in image_exifs:
# Extract key
exif_keys.extend(list(image_exif.keys()))
# Extract values per key
for exif_key, exif_val in image_exif.items():
if exif_key not in exif_values:
exif_values[exif_key] = []
exif_values[exif_key].append(exif_val)
series = {"exif_keys": pd.Series(exif_keys, dtype=object).value_counts().to_dict()}
for k, v in exif_values.items():
series[k] = pd.Series(v).value_counts()
return series
def extract_image_information(
path: Path, exif: bool = False, hash: bool = False
) -> dict:
"""Extracts all image information per file, as opening files is slow
Args:
path: Path to the image
exif: extract exif information
hash: calculate hash (for duplicate detection)
Returns:
A dict containing image information
"""
information: dict = {}
image = open_image(path)
information["opened"] = image is not None
if image is not None:
information["truncated"] = is_image_truncated(image)
if not information["truncated"]:
information["size"] = image.size
if exif:
information["exif"] = extract_exif(image)
if hash:
information["hash"] = hash_image(image)
return information
def image_summary(series: pd.Series, exif: bool = False, hash: bool = False) -> dict:
"""
Args:
series: series to summarize
exif: extract exif information
hash: calculate hash (for duplicate detection)
Returns:
"""
image_information = series.apply(
partial(extract_image_information, exif=exif, hash=hash)
)
summary = {
"n_truncated": sum(
[1 for x in image_information if "truncated" in x and x["truncated"]]
),
"image_dimensions": pd.Series(
[x["size"] for x in image_information if "size" in x],
name="image_dimensions",
),
}
image_widths = summary["image_dimensions"].map(lambda x: x[0])
summary.update(named_aggregate_summary(image_widths, "width"))
image_heights = summary["image_dimensions"].map(lambda x: x[1])
summary.update(named_aggregate_summary(image_heights, "height"))
image_areas = image_widths * image_heights
summary.update(named_aggregate_summary(image_areas, "area"))
if hash:
summary["n_duplicate_hash"] = count_duplicate_hashes(image_information)
if exif:
exif_series = extract_exif_series(
[x["exif"] for x in image_information if "exif" in x]
)
summary["exif_keys_counts"] = exif_series["exif_keys"]
summary["exif_data"] = exif_series
return summary
def get_character_counts(series: pd.Series) -> Counter:
"""Function to return the character counts
Args:
series: the Series to process
Returns:
A dict with character counts
"""
return Counter(series.str.cat())
def counter_to_series(counter: Counter) -> pd.Series:
if not counter:
return
|
pd.Series([], dtype=object)
|
pandas.Series
|
from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from sys import byteorder
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
is_sequence,
is_unsigned_integer_dtype,
pandas_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_indexing_slices_equivalent,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_metadata_equivalent,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import ( # noqa:F401
get_dtype,
get_obj,
)
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.api import (
Float64Index,
Int64Index,
NumericIndex,
UInt64Index,
)
from pandas.core.arrays import (
BaseMaskedArray,
ExtensionArray,
PandasArray,
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.construction import extract_array
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_NUMPY_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_NUMPY_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES
ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES
FLOAT_NUMPY_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_NUMPY_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NARROW_NP_DTYPES = [
np.float16,
np.float32,
np.int8,
np.int16,
np.int32,
np.uint8,
np.uint16,
np.uint32,
]
ENDIAN = {"little": "<", "big": ">"}[byteorder]
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
NP_NAT_OBJECTS = [
cls("NaT", unit)
for cls in [np.datetime64, np.timedelta64]
for unit in [
"Y",
"M",
"W",
"D",
"h",
"m",
"s",
"ms",
"us",
"ns",
"ps",
"fs",
"as",
]
]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
if isinstance(expected, RangeIndex):
# pd.array would return an IntegerArray
expected = PandasArray(np.asarray(expected._values))
else:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index._with_infer(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is np.ndarray or box_cls is np.array:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
"""
Similar to pd.array, but does not cast numpy dtypes to nullable dtypes.
"""
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if dtype is None:
return np.asarray(obj)
return extract_array(obj, extract_numpy=True)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
"""make a length k index or n categories"""
x = rands_array(nchars=4, size=n, replace=False)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
"""make a length k IntervalIndex"""
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeNumericIndex(k=10, name=None, *, dtype):
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
if is_integer_dtype(dtype):
values = np.arange(k, dtype=dtype)
if is_unsigned_integer_dtype(dtype):
values += 2 ** (dtype.itemsize * 8 - 1)
elif is_float_dtype(dtype):
values = np.random.random_sample(k) - np.random.random_sample(1)
values.sort()
values = values * (10 ** np.random.randint(0, 9))
else:
raise NotImplementedError(f"wrong dtype {dtype}")
return NumericIndex(values, dtype=dtype, name=name)
def makeIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="int64")
return Int64Index(base_idx)
def makeUIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="uint64")
return UInt64Index(base_idx)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="float64")
return Float64Index(base_idx)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
N = (k // 2) + 1
rng = range(N)
mi = MultiIndex.from_product([("foo", "bar"), rng], names=names, **kwargs)
assert len(mi) >= k # GH#38795
return mi[:k]
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def make_rand_series(name=None, dtype=np.float64):
index = makeStringIndex(_N)
data = np.random.randn(_N)
data = data.astype(dtype, copy=False)
return Series(data, index=index, name=name)
def makeFloatSeries(name=None):
return make_rand_series(name=name)
def makeStringSeries(name=None):
return make_rand_series(name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return
|
DataFrame(data)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 16:15:46 2020
@author: navarrenhn
"""
import pandas as pd
def feed_demand(groups, Lancet_diet):
Region_demands = {}
for name, group in groups:
d = Lancet_diet.copy()
d["GROUP"] = name
#print(d)
##create animal product demands:
d.loc[["beef and lamb"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["pork"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["chicken and other poultry"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["fish"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fishandseafood", "Total"].min())
#d = d.drop(["fish"])
#d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
#d.loc[["eggs"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
##create feed demands:
##need to determine oil production from soymeal production for feed
##feed required for 1 dairy cow per day
##similar feed is assumed for beef cattle being reared on mixed system (pasture/crop-fed)
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn equals sum of fresh and corn in concentrate
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
cow_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [41250, 13750 + (1250*0.86), 750*0.8 ]}##, 500, 500, 500, 250, 250, 1000]}
cow_Lancet_diet_per_day = pd.DataFrame(cow_dict)
cow_Lancet_diet_per_day = cow_Lancet_diet_per_day.set_index(["type"])
cow_feed_per_g_milk = cow_Lancet_diet_per_day["gram"]/25000
##beef production from dairy cows and dairy calf rearing
calf_per_g_milk = (1.5/(25000*365*6)) ##3 calves per cow divided by two as only males are used(?)
##Type A calf of Nguyen 2010 using 8438 kg of feed per 1000 kg carcass weight (= per 660kg edible meat)
##(significantly more soymeal --> look into, maybe change Lancet_diet)
g_calf_per_g_milk = calf_per_g_milk * 214880
cow_feed_per_g_calf = ((cow_Lancet_diet_per_day["gram"]/55000)*8438000)/660000
##One 680 kg Holstein dairy cow delivers 224.52 kg of meat (excluding offal and bones) ##what to do with offal?
g_dairycow_beef_per_g_milk = 224520.0 / 36500000.0 #36500000 g milk in her milk giving time of 4 years
g_beef_per_g_milk = g_calf_per_g_milk + g_dairycow_beef_per_g_milk
##feed demand from classic suckler-cow rearing systems is 20863 kg of feed per 1000kg carcass weight (= per 660kg edible meat) (Nguyen 2010)
cow_feed_per_g_suckler_beef = ((cow_Lancet_diet_per_day["gram"]/55000)*20863000)/660000
##required extra beef production besides dairy cows and their calves to reach demand
required_extra_beef_production = max(d.loc[["beef and lamb"], ["BMI" , "EAT", "Org"]].values[0][0] - (d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]].values[0][0] * g_beef_per_g_milk), 0)
##this needs a lamb factor
total_feed_cows_for_Lancet_diet_per_day = (d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]].values[0][0] * g_calf_per_g_milk * cow_feed_per_g_calf) + (d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]].values[0][0] * cow_feed_per_g_milk) + (required_extra_beef_production * cow_feed_per_g_suckler_beef)
##one dutch cow delivers on average 25 liter milk per day and eats 55kg of feed a day
##assuming 3 calves per dairy cow of which half is male so used for slaughter
##one dutch dairy cow is culled after 6 years on average
##if not, how much feed does a meat cow need?
##how much manure do the cows produce? (for effect on N input ratio)
##soybean meal assumed to equal 0.8 times fresh soybean weight as in cow Lancet_diet
##whole grains assumed here
##one dutch egg-laying chicken lays 0.85232877 egg per day amounting to 19400/311.1 = 62.35937 gram egg per day
##one dutch chicken eats 121.3 gram feed per day (both broiler and egg)
##chicken feed based on Rezaei et al (high protein organic Lancet_diet) and ratios based on 1/3 of feeds used in first and 2/3 of last stages of life, byproducts and supplements (under 3%) placed in "other"
##one dutch broiler chicken lives 6 weeks, averages 2446g and delivers 166+547+243+520 = 1476 gram of meat
##is chicken manure used as fertilizer? How much manure does a chicken produce?
chicken_dict = {"type": ["wheat", "soybean meal", "rapeseed", "oats", "peas"], ##"other"],
"gram": [45.95, 21.62*0.8, 4.04, 23.15, 9.7]} ##, 16.84]}
chicken_Lancet_diet_per_day = pd.DataFrame(chicken_dict)
chicken_Lancet_diet_per_day = chicken_Lancet_diet_per_day.set_index(["type"])
chicken_feed_per_g_meat = (chicken_Lancet_diet_per_day["gram"]*42)/1476
chicken_feed_per_g_egg = chicken_Lancet_diet_per_day["gram"]/62.35937
total_feed_meat_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_meat * d.loc[["chicken and other poultry"], ["BMI" , "EAT", "Org"]].values[0][0]
total_feed_egg_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_egg * d.loc[["eggs"], ["BMI" , "EAT", "Org"]].values[0][0]
##feed required for 1 lamb per day
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
##one lamb gives 35.24% of its original weight as meat. One slaughtered lamb weighs 40kg so 40* 0.3524 = 14.096 kg meat per lamb
##feed composition assumed to be similar to milk cow (both pasture raised and ruminants).Feed requirement about 1kg a day (Bello et al, 2016)
##manure production
lamb_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [687.5, 312.5 + (20.8*0.86), 12.5*0.8]} ##, 8.33, 8.33, 8.33, 4.15, 4.15, 16.66]}
lamb_Lancet_diet_per_day = pd.DataFrame(lamb_dict)
lamb_Lancet_diet_per_day = lamb_Lancet_diet_per_day.set_index(["type"])
lamb_feed_per_g_meat = (lamb_Lancet_diet_per_day["gram"]*365)/14096
total_feed_lamb_for_Lancet_diet_per_day = lamb_feed_per_g_meat * d.loc[["beef and lamb"], ["BMI" , "EAT", "Org"]].values[0][0]
##need to add beef/lamb ratio
##one slaughtered pig gives on average 57% of its live weight as meat, slaughtered weight is 95.2kg so 95.2*0.57 = 54.264kg meat per fattening pig
##one pig lives 88 days (based on BINternet growth per day) and uses 185,064kg of feed in its life (based on BINternet feed conversion) so eats 2,103kg of feed a day
##feed requirement based on byproducts scenario of Lassaletta et al 2016
##manure production
##swill and molasses assumed to be by-products
##are brans a by-product? Do they require extra production? Assumed to be about 10% of original crop (Feedipedia)
pig_dict = {"type": ["corn", "barley", "brans", "wheat"], ##"swill", "molasses"],
"gram": [378.54, 147.21, 525.75, 630.9]} ##, 210.3, 210.3]}
pig_Lancet_diet_per_day = pd.DataFrame(pig_dict)
pig_Lancet_diet_per_day = pig_Lancet_diet_per_day.set_index(["type"])
pig_feed_per_g_meat = (pig_Lancet_diet_per_day["gram"]*88)/54264
total_feed_pig_for_Lancet_diet_per_day = pig_feed_per_g_meat * d.loc[["pork"], ["BMI" , "EAT", "Org"]].values[0][0]
##create crop demands including demand for feed crops:
##assuming no waste in feedcrops
d.loc[["rice wheat corn and other"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Cereals", "Total"].min())
d.loc[["rice wheat corn and other"], ["BMI" , "EAT", "Org"]] += total_feed_cows_for_Lancet_diet_per_day.loc["corn"] + total_feed_meat_chickens_for_Lancet_diet_per_day.loc["wheat"] + total_feed_meat_chickens_for_Lancet_diet_per_day.loc["oats"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["wheat"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["oats"] + total_feed_lamb_for_Lancet_diet_per_day.loc["corn"] + total_feed_pig_for_Lancet_diet_per_day.loc["corn"] + total_feed_pig_for_Lancet_diet_per_day.loc["barley"] + total_feed_pig_for_Lancet_diet_per_day.loc["wheat"]
d.loc[["potatoes and cassava"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Rootsandtubers", "Total"].min())
d.loc[["dry beans lentils and peas"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["dry beans lentils and peas"], ["BMI" , "EAT", "Org"]] += total_feed_meat_chickens_for_Lancet_diet_per_day.loc["peas"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["peas"]
d.loc[["soy foods"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["soy foods"], ["BMI" , "EAT", "Org"]] += total_feed_cows_for_Lancet_diet_per_day.loc["soybean meal"] + total_feed_lamb_for_Lancet_diet_per_day.loc["soybean meal"] + total_feed_meat_chickens_for_Lancet_diet_per_day.loc["soybean meal"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["soybean meal"]
d.loc[["peanuts"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["tree nuts"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
#d.loc[["palm oil"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["unsaturated oils"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["unsaturated oils"], ["BMI" , "EAT", "Org"]] += total_feed_meat_chickens_for_Lancet_diet_per_day.loc["rapeseed"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["rapeseed"]
d.loc[["all fruit"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
#d.loc[["all vegetables"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
d.loc[["dark green vegetables"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
d.loc[["red and orange vegetables"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
Region_demands[name] = d.loc[(Lancet_diet["GROUP"] == name)]
return Region_demands
def feed_remove(groups, Lancet_diet):
Region_demands = {}
for name, group in groups:
d = Lancet_diet.copy()
d["GROUP"] = name
#print(d)
##create animal product demands:
d.loc[["beef and lamb"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["pork"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["chicken and other poultry"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["fish"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Fishandseafood", "Total"].min())
#d = d.drop(["fish"])
#d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
#d.loc[["eggs"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
##create feed demands:
##need to determine oil production from soymeal production for feed
##feed required for 1 dairy cow per day
##similar feed is assumed for beef cattle being reared on mixed system (pasture/crop-fed)
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn equals sum of fresh and corn in concentrate
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
cow_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [41250, 13750 + (1250/0.86), 750/0.8 ]}##, 500, 500, 500, 250, 250, 1000]}
cow_Lancet_diet_per_day = pd.DataFrame(cow_dict)
cow_Lancet_diet_per_day = cow_Lancet_diet_per_day.set_index(["type"])
cow_feed_per_g_milk = cow_Lancet_diet_per_day["gram"]/25000
##beef production from dairy cows and dairy calf rearing
calf_per_g_milk = (1.5/(25000*365*6)) ##3 calves per cow divided by two as only males are used(?)
##Type A calf of Nguyen 2010 using 8438 kg of feed per 1000 kg carcass weight (= per 660kg edible meat)
##(significantly more soymeal --> look into, maybe change Lancet_diet)
g_calf_per_g_milk = calf_per_g_milk * 214880
cow_feed_per_g_calf = ((cow_Lancet_diet_per_day["gram"]/55000)*8438000)/660000
##One 680 kg Holstein dairy cow delivers 224.52 kg of meat (excluding offal and bones) ##what to do with offal?
g_dairycow_beef_per_g_milk = 224520.0 / 36500000.0 #36500000 g milk in her milk giving time of 4 years
g_beef_per_g_milk = g_calf_per_g_milk + g_dairycow_beef_per_g_milk
##feed demand from classic suckler-cow rearing systems is 20863 kg of feed per 1000kg carcass weight (= per 660kg edible meat) (Nguyen 2010)
cow_feed_per_g_suckler_beef = ((cow_Lancet_diet_per_day["gram"]/55000)*20863000)/660000
##required extra beef production besides dairy cows and their calves to reach demand
required_extra_beef_production = max(d.loc[["beef and lamb"], ["Org_nf"]].values[0][0] - (d.loc[["whole milk or derivative equivalents"], ["Org_nf"]].values[0][0] * g_beef_per_g_milk), 0)
##this needs a lamb factor
total_feed_cows_for_Lancet_diet_per_day = (d.loc[["whole milk or derivative equivalents"], ["Org_nf"]].values[0][0] * g_calf_per_g_milk * cow_feed_per_g_calf) + (d.loc[["whole milk or derivative equivalents"], ["Org_nf"]].values[0][0] * cow_feed_per_g_milk) + (required_extra_beef_production * cow_feed_per_g_suckler_beef)
##one dutch cow delivers on average 25 liter milk per day and eats 55kg of feed a day
##assuming 3 calves per dairy cow of which half is male so used for slaughter
##one dutch dairy cow is culled after 6 years on average
##if not, how much feed does a meat cow need?
##how much manure do the cows produce? (for effect on N input ratio)
##soybean meal assumed to equal 0.8 times fresh soybean weight as in cow Lancet_diet
##whole grains assumed here
##one dutch egg-laying chicken lays 0.85232877 egg per day amounting to 19400/311.1 = 62.35937 gram egg per day
##one dutch chicken eats 121.3 gram feed per day (both broiler and egg)
##chicken feed based on Rezaei et al (high protein organic Lancet_diet) and ratios based on 1/3 of feeds used in first and 2/3 of last stages of life, byproducts and supplements (under 3%) placed in "other"
##one dutch broiler chicken lives 6 weeks, averages 2446g and delivers 166+547+243+520 = 1476 gram of meat
##is chicken manure used as fertilizer? How much manure does a chicken produce?
chicken_dict = {"type": ["wheat", "soybean meal", "rapeseed", "oats", "peas"], ##"other"],
"gram": [45.95, 21.62/0.8, 4.04, 23.15, 9.7]} ##, 16.84]}
chicken_Lancet_diet_per_day = pd.DataFrame(chicken_dict)
chicken_Lancet_diet_per_day = chicken_Lancet_diet_per_day.set_index(["type"])
chicken_feed_per_g_meat = (chicken_Lancet_diet_per_day["gram"]*42)/1476
chicken_feed_per_g_egg = chicken_Lancet_diet_per_day["gram"]/62.35937
total_feed_meat_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_meat * d.loc[["chicken and other poultry"], ["Org_nf"]].values[0][0]
total_feed_egg_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_egg * d.loc[["eggs"], ["Org_nf"]].values[0][0]
##feed required for 1 lamb per day
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
##one lamb gives 35.24% of its original weight as meat. One slaughtered lamb weighs 40kg so 40* 0.3524 = 14.096 kg meat per lamb
##feed composition assumed to be similar to milk cow (both pasture raised and ruminants).Feed requirement about 1kg a day (Bello et al, 2016)
##manure production
lamb_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [687.5, 312.5 + (20.8/0.86), 12.5/0.8]} ##, 8.33, 8.33, 8.33, 4.15, 4.15, 16.66]}
lamb_Lancet_diet_per_day = pd.DataFrame(lamb_dict)
lamb_Lancet_diet_per_day = lamb_Lancet_diet_per_day.set_index(["type"])
lamb_feed_per_g_meat = (lamb_Lancet_diet_per_day["gram"]*365)/14096
total_feed_lamb_for_Lancet_diet_per_day = lamb_feed_per_g_meat * d.loc[["beef and lamb"], ["Org_nf"]].values[0][0]
##need to add beef/lamb ratio
##one slaughtered pig gives on average 57% of its live weight as meat, slaughtered weight is 95.2kg so 95.2*0.57 = 54.264kg meat per fattening pig
##one pig lives 88 days (based on BINternet growth per day) and uses 185,064kg of feed in its life (based on BINternet feed conversion) so eats 2,103kg of feed a day
##feed requirement based on byproducts scenario of Lassaletta et al 2016
##manure production
##swill and molasses assumed to be by-products
##are brans a by-product? Do they require extra production? Assumed to be about 10% of original crop (Feedipedia)
pig_dict = {"type": ["corn", "barley", "brans", "wheat"], ##"swill", "molasses"],
"gram": [378.54, 147.21, 525.75, 630.9]} ##, 210.3, 210.3]}
pig_Lancet_diet_per_day =
|
pd.DataFrame(pig_dict)
|
pandas.DataFrame
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from tqdm import tqdm, trange
import pandas as pd
import io
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import re
import argparse
from pytorch_transformers import BertTokenizer
from other_func import write_log, preprocess1, preprocessing
from sklearn.model_selection import KFold
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--original_data",
default=None,
type=str,
required=True,
help="The input data file path."
" Should be the .tsv file (or other data file) for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the processed data will be written.")
parser.add_argument("--temp_dir",
default=None,
type=str,
required=True,
help="The output directory where the intermediate processed data will be written.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task.")
parser.add_argument("--log_path",
default=None,
type=str,
required=True,
help="The log file path.")
parser.add_argument("--id_num_neg",
default=None,
type=int,
required=True,
help="The number of admission ids that we want to use for negative category.")
parser.add_argument("--id_num_pos",
default=None,
type=int,
required=True,
help="The number of admission ids that we want to use for positive category.")
parser.add_argument("--random_seed",
default=1,
type=int,
required=True,
help="The random_seed for train/val/test split.")
parser.add_argument("--bert_model",
default="bert-base-uncased",
type=str,
required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
## Other parameters
parser.add_argument("--Kfold",
default=None,
type=int,
required=False,
help="The number of folds that we want ot use for cross validation. "
"Default is not doing cross validation")
args = parser.parse_args()
RANDOM_SEED = args.random_seed
LOG_PATH = args.log_path
TEMP_DIR = args.temp_dir
if os.path.exists(TEMP_DIR) and os.listdir(TEMP_DIR):
raise ValueError("Temp Output directory ({}) already exists and is not empty.".format(TEMP_DIR))
os.makedirs(TEMP_DIR, exist_ok=True)
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
original_df = pd.read_csv(args.original_data, header=None)
original_df.rename(columns={0: "Adm_ID",
1: "Note_ID",
2: "chartdate",
3: "charttime",
4: "TEXT",
5: "Label"}, inplace=True)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True)
write_log(("New Pre-processing Job Start! \n"
"original_data: {}, output_dir: {}, temp_dir: {} \n"
"task_name: {}, log_path: {}\n"
"id_num_neg: {}, id_num_pos: {}\n"
"random_seed: {}, bert_model: {}").format(args.original_data, args.output_dir, args.temp_dir,
args.task_name, args.log_path,
args.id_num_neg, args.id_num_pos,
args.random_seed, args.bert_model), LOG_PATH)
for i in range(int(np.ceil(len(original_df) / 10000))):
write_log("chunk {} tokenize start!".format(i), LOG_PATH)
df_chunk = original_df.iloc[i * 10000:(i + 1) * 10000].copy()
df_processed_chunk = preprocessing(df_chunk, tokenizer)
df_processed_chunk = df_processed_chunk.astype({'Adm_ID': 'int64', 'Note_ID': 'int64', 'Label': 'int64'})
temp_file_dir = os.path.join(TEMP_DIR, 'Processed_{}.csv'.format(i))
df_processed_chunk.to_csv(temp_file_dir, index=False)
df = pd.DataFrame({'Adm_ID': [], 'Note_ID': [], 'TEXT': [], 'Input_ID': [],
'Label': [], 'chartdate': [], 'charttime': []})
for i in range(int(np.ceil(len(original_df) / 10000))):
temp_file_dir = os.path.join(TEMP_DIR, 'Processed_{}.csv'.format(i))
df_chunk = pd.read_csv(temp_file_dir, header=0)
write_log("chunk {} has {} notes".format(i, len(df_chunk)), LOG_PATH)
df = df.append(df_chunk, ignore_index=True)
result = df.Label.value_counts()
write_log(
"In the full dataset Positive Patients' Notes: {}, Negative Patients' Notes: {}".format(result[1],
result[0]),
LOG_PATH)
dead_ID = pd.Series(df[df.Label == 1].Adm_ID.unique())
not_dead_ID = pd.Series(df[df.Label == 0].Adm_ID.unique())
write_log("Total Positive Patients' ids: {}, Total Negative Patients' ids: {}".format(len(dead_ID), len(not_dead_ID)), LOG_PATH)
not_dead_ID_use = not_dead_ID.sample(n=args.id_num_neg, random_state=RANDOM_SEED)
dead_ID_use = dead_ID.sample(n=args.id_num_pos, random_state=RANDOM_SEED)
if args.Kfold is None:
id_val_test_t = dead_ID_use.sample(frac=0.2, random_state=RANDOM_SEED)
id_val_test_f = not_dead_ID_use.sample(frac=0.2, random_state=RANDOM_SEED)
id_train_t = dead_ID_use.drop(id_val_test_t.index)
id_train_f = not_dead_ID_use.drop(id_val_test_f.index)
id_val_t = id_val_test_t.sample(frac=0.5, random_state=RANDOM_SEED)
id_test_t = id_val_test_t.drop(id_val_t.index)
id_val_f = id_val_test_f.sample(frac=0.5, random_state=RANDOM_SEED)
id_test_f = id_val_test_f.drop(id_val_f.index)
id_test = pd.concat([id_test_t, id_test_f])
test_id_label = pd.DataFrame(data=list(zip(id_test, [1] * len(id_test_t) + [0] * len(id_test_f))),
columns=['id', 'label'])
id_val = pd.concat([id_val_t, id_val_f])
val_id_label = pd.DataFrame(data=list(zip(id_val, [1] * len(id_val_t) + [0] * len(id_val_f))),
columns=['id', 'label'])
id_train =
|
pd.concat([id_train_t, id_train_f])
|
pandas.concat
|
import unittest
from abc import ABC
import numpy as np
import pandas as pd
from toolbox.ml.ml_factor_calculation import ModelWrapper, calc_ml_factor, generate_indexes
from toolbox.utils.slice_holder import SliceHolder
class MyTestCase(unittest.TestCase):
def examples(self):
# index includes non trading days
# exactly 60 occurrences of each ticker
first = pd.Timestamp(year=2010, month=1, day=1)
self.date_index = pd.MultiIndex.from_product(
[pd.date_range(start=first, end=pd.Timestamp(year=2010, month=3, day=1)),
['BOB', 'JEFF', 'CARL']], names=['date', 'symbol'])
self.expected_index_e5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=34)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=49))),
(SliceHolder(first, first + pd.Timedelta(days=39)),
SliceHolder(first + pd.Timedelta(days=50), first + pd.Timedelta(days=54))),
(SliceHolder(first, first + pd.Timedelta(days=44)),
SliceHolder(first + pd.Timedelta(days=55), first + pd.Timedelta(days=59)))
]
self.expected_index_e7_8_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=37), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=37)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=52))),
(SliceHolder(first, first + pd.Timedelta(days=45)),
SliceHolder(first + pd.Timedelta(days=53), first + pd.Timedelta(days=59))),
]
self.expected_index_e5_10_30 = self.turn_to_datetime64(self.expected_index_e5_10_30)
self.expected_index_e7_8_30 = self.turn_to_datetime64(self.expected_index_e7_8_30)
self.expected_index_r5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + pd.Timedelta(days=44))),
(SliceHolder(first + pd.Timedelta(days=5), first + pd.Timedelta(days=34)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=49))),
(SliceHolder(first + pd.Timedelta(days=10), first + pd.Timedelta(days=39)),
SliceHolder(first + pd.Timedelta(days=50), first + pd.Timedelta(days=54))),
(SliceHolder(first + pd.Timedelta(days=15), first + pd.Timedelta(days=44)),
SliceHolder(first + pd.Timedelta(days=55), first + pd.Timedelta(days=59)))
]
self.expected_index_r7_8_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=37), first + pd.Timedelta(days=44))),
(SliceHolder(first + pd.Timedelta(days=8), first +
|
pd.Timedelta(days=37)
|
pandas.Timedelta
|
# -*- coding: utf-8 -*-
import pandas as pd
import io, sys, os, datetime
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import base64
import pickle
import plotly.express as px
import matplotlib
matplotlib.use('Agg')
from dash_extensions import Download
from dash_extensions.snippets import send_file
import json
import time
import subprocess
from pathlib import Path
import dash_uploader as du
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BaseFolder = "./"
FULL_PATH = os.path.abspath(BaseFolder)+"/"
sys.path.append(FULL_PATH)
from netcom.netcom import pathwayEnrichment, EdgeR_to_seeds, simulation
#Delete all results older then 5 days
deleteOlFiles_command = "find "+FULL_PATH+"Results/* -type d -ctime +5 -exec rm -rf {} \;"
os.system(deleteOlFiles_command)
try:
with open(BaseFolder+"data/DB/DB.pickle", 'rb') as handle:
DB = pickle.load(handle)
except:
DB = pd.read_pickle(BaseFolder+"data/DB/DB.pickle")
df_el_ = DB['full_enzymes_labels_jun.txt']
df_ecMapping_ = DB['ec_reac_mapping_jun.txt']
df_reactions_ = DB['reactions_3_balanced.txt']
df_ec_to_compoundIndex_ = DB['compound_labels_jun.txt']
def read_edgeR(df):
try:
df_edgeR_grouped = df.groupby("association")['X'].apply(list).to_frame()
except:
df_edgeR_grouped = df.groupby("association")['enzyme'].apply(list).to_frame()
return df_edgeR_grouped
def createParametersDict(folder):
start = time.time()
#write default parameters pickle file. this will be changed by the user later.
parametersDict = {}
parametersDict["drop_fragment_with_size"] = 1
parametersDict["filter_hubness"] = 25
parametersDict["soft_color_A"] = "green"
parametersDict["dark_color_A"] = "lime"
parametersDict["corrected_p-val"] = 0.05
parametersDict["enrichment_results_slice"] = [0, 100]
parametersDict["figure_name"] = "Figure"
parametersDict["network_layout_iter"] = 75
parametersDict["treatment_col"] = ""
parametersDict["comparison_col"] = ""
parametersDict["Not_associated_col"] = ""
parametersDict["Min_entities_Enrichment"] = 3
parametersDict["Max_entities_Enrichment"] = 25
parametersDict["Enriched_pathways"] = []
parametersDict["Final_folder"] = ""
folder = str(folder).strip("\"").strip("\'")
f = open(folder+"parametersDict.json", "w", encoding="utf8")
json.dump([parametersDict], f)
f.close()
try:
os.system("rm "+folder+"main_process_results_html.pkl")
except:
print()
end = time.time()
print("createParametersDict time (sec):")
print(end - start)
def loadParametersDict(folder):
start = time.time()
folder = str(folder).strip("\"").strip("\'")
f = open(folder+"parametersDict.json", "r")
output = json.load(f)
f.close()
end = time.time()
print("loadParametersDict time (sec):")
print(end - start)
return output[0]
def update_parameters(val, col, folder):
start = time.time()
try:
folder = str(folder).strip("\"").strip("\'")
f = open(folder+"parametersDict.json", "r")
parametersDict = json.load(f)[0]
f.close()
except:
folder = str(folder).strip("\"").strip("\'")
createParametersDict(folder)
f = open(folder+"parametersDict.json", "r")
parametersDict = json.load(f)[0]
f.close()
parametersDict[col] = val
f = open(folder+"parametersDict.json", "w", encoding="utf8")
json.dump([parametersDict], f)
f.close()
end = time.time()
print("update_parameters time (sec):")
print(end - start)
def presentDatasetStatistics(folder):
start = time.time()
print("loading edger")
df = pd.read_csv(folder+"raw_input_edger.csv")
colorsDict={}
colorsDict["treatment_col"] = "blue"
colorsDict["comparison_col"] = "red"
colorsDict["Not_associated"] = "Not_associated"
print("prep colors")
df["Treatment color"] = df["association"].replace(colorsDict)
#VOLCANO PLOT
try:
volcano = px.scatter(df, x="logFC", y="PValue",color="Treatment color",
hover_name="enzyme", log_y=True)
try:
labels = df[["association"]].value_counts().index
values = df[["association"]].value_counts().values
except:
labels = df["association"].value_counts().index
values = df["association"].value_counts().values
pieChart = go.Figure(data=[go.Pie(labels=labels, values=values)])
pvalHist = px.histogram(df, x="PValue")
descriptionGraphs = html.Div(dbc.Row([
dbc.Col(dcc.Graph(
id='volcano-scatter',
#style={'display': 'inline-block'},
figure=volcano
)),
dbc.Col(dcc.Graph(
id='pie-chart',
#style={'display': 'inline-block'},
figure=pieChart
)),
dbc.Col(dcc.Graph(
id='pval-hist',
#style={'display': 'inline-block'},
figure=pvalHist
)),
])
)
end = time.time()
print("presentDatasetStatistics time (sec):")
print(end - start)
#calculate enrichment for keep_pathway.txt file creation
parametersDict = loadParametersDict(folder)
#print(parametersDict)
folder = str(folder).strip("\"").strip("\'")
FinalFolder_ = folder
Seeds_A_input, T1_seeds_tag, ECs_A_input, Seeds_B_input, T2_seeds_tag, ECs_B_input, Seeds_All_input, ECs_All_input=EdgeR_to_seeds(edgeR_row_location=FinalFolder_+"raw_input_edger.csv",
col_treatment_1=parametersDict["treatment_col"],
col_treatment_2=parametersDict["comparison_col"],
outputFolder=FinalFolder_,
input_sep=",")
with open(FinalFolder_+"keep_pathways.txt", 'w') as f:
f.write("\n")
pathways_enzymes_A = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=folder,
DE_ecs_list_=ECs_A_input,
All_B=ECs_B_input,
All_ecs_list_=ECs_A_input+ECs_B_input,
input_type="enzymes", # "compound" or "enzyme"
outputfilename=parametersDict["treatment_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
pathways_enzymes_B = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=folder,
DE_ecs_list_=ECs_B_input,
All_B=ECs_A_input,
All_ecs_list_=ECs_A_input+ECs_B_input,
input_type="enzymes", # "compound" or "enzyme"
outputfilename=parametersDict["comparison_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
return descriptionGraphs
except Exception as e:
print(e)
end = time.time()
print("presentDatasetStatistics time (sec):")
print(end - start)
def CreateBarPlot(folder):
start = time.time()
parametersDict = loadParametersDict(folder)
folder = str(folder).strip("\"").strip("\'")
FinalFolder_ = folder
df = pd.read_csv(FinalFolder_+"raw_input_edger.csv")
df=read_edgeR(df)
Seeds_A_input, T1_seeds_tag, ECs_A_input, Seeds_B_input, T2_seeds_tag, ECs_B_input, Seeds_All_input, ECs_All_input=EdgeR_to_seeds(edgeR_row_location=FinalFolder_+"raw_input_edger.csv",
col_treatment_1=parametersDict["treatment_col"],
col_treatment_2=parametersDict["comparison_col"],
outputFolder=FinalFolder_,
input_sep=",")
with open(FinalFolder_+"keep_pathways.txt", 'w') as f:
f.write("\n")
pathways_enzymes_A = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=folder,
DE_ecs_list_=ECs_A_input,
All_B=ECs_B_input,
All_ecs_list_=ECs_A_input+ECs_B_input,
input_type="enzymes", # "compound" or "enzyme"
outputfilename=parametersDict["treatment_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
pathways_enzymes_B = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=folder,
DE_ecs_list_=ECs_B_input,
All_B=ECs_A_input,
All_ecs_list_=ECs_A_input+ECs_B_input,
input_type="enzymes", # "compound" or "enzyme"
outputfilename=parametersDict["comparison_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
df_enzymes_A =
|
pd.read_csv(FinalFolder_+parametersDict["treatment_col"]+"_pathway.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected =
|
pd.Series(data=corrs, index=['a', 'b'])
|
pandas.Series
|
import numpy as np
import pandas as pd
import pytest
from pandas.util import testing as tm
from pytest import param
from ibis.pandas.aggcontext import Summarize, window_agg_udf
df = pd.DataFrame(
{
'id': [1, 2, 1, 2],
'v1': [1.0, 2.0, 3.0, 4.0],
'v2': [10.0, 20.0, 30.0, 40.0],
}
)
@pytest.mark.parametrize(
('agg_fn', 'expected_fn'),
[
param(lambda v1: v1.mean(), lambda df: df['v1'].mean(), id='udf',),
param('mean', lambda df: df['v1'].mean(), id='string',),
],
)
def test_summarize_single_series(agg_fn, expected_fn):
"""Test Summarize.agg operating on a single Series."""
aggcontext = Summarize()
result = aggcontext.agg(df['v1'], agg_fn)
expected = expected_fn(df)
assert result == expected
@pytest.mark.parametrize(
('agg_fn', 'expected_fn'),
[
param(lambda v1: v1.mean(), lambda df: df['v1'].mean(), id='udf',),
param('mean', lambda df: df['v1'].mean(), id='string',),
],
)
def test_summarize_single_seriesgroupby(agg_fn, expected_fn):
"""Test Summarize.agg operating on a single SeriesGroupBy."""
aggcontext = Summarize()
df_grouped = df.sort_values('id').groupby('id')
result = aggcontext.agg(df_grouped['v1'], agg_fn)
expected = expected_fn(df_grouped)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('agg_fn', 'expected_fn'),
[
param(
lambda v1, v2: v1.mean() - v2.mean(),
lambda df: df['v1'].mean() - df['v2'].mean(),
id='two-column',
),
# Two columns, but only the second one is actually used in UDF
param(
lambda v1, v2: v2.mean(),
lambda df: df['v2'].mean(),
id='redundant-column',
),
],
)
def test_summarize_multiple_series(agg_fn, expected_fn):
"""Test Summarize.agg operating on many Series."""
aggcontext = Summarize()
args = [df['v1'], df['v2']]
result = aggcontext.agg(args[0], agg_fn, *args[1:])
expected = expected_fn(df)
assert result == expected
@pytest.mark.parametrize(
'param',
[
(
pd.Series([True, True, True, True]),
pd.Series([1.0, 2.0, 2.0, 3.0]),
),
(
pd.Series([False, True, True, False]),
pd.Series([np.NaN, 2.0, 2.0, np.NaN]),
),
],
)
def test_window_agg_udf(param):
""" Test passing custom window indices for window aggregation."""
mask, expected = param
grouped_data = df.sort_values('id').groupby('id')['v1']
result_index = grouped_data.obj.index
window_lower_indices =
|
pd.Series([0, 0, 2, 2])
|
pandas.Series
|
import xgboost as xgb
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import numpy as np
import pandas as pd
import gc
import datetime
import joblib
import warnings
warnings.filterwarnings("ignore")
##### Functions
# 1st function: Adds number of ocurrences to the column element to see if it is a frequent value or not.
def frecuency_encoder(cols, df_train, df_test):
for col in cols:
temp = df_train[col].value_counts().to_dict()
df_train[f'{col}_counts'] = df_train[col].map(temp).astype('float32')
temp = df_test[col].value_counts().to_dict()
df_test[f'{col}_counts'] = df_test[col].map(temp).astype('float32')
# 2nd function : creates a list of statistics (in aggregations) to be applied to a list of columns, given to group specified by group.
def aggregations(cols, group, aggregations, df_train, df_test):
for col in cols:
for aggr in aggregations:
temp = df_train.groupby([group])[col].agg([aggr])
dict_aux = temp[aggr].to_dict()
df_train[f'{col}_{aggr}'] = df_train[group].map(dict_aux).astype('float32')
temp = df_test.groupby([group])[col].agg([aggr])
dict_aux = temp[aggr].to_dict()
df_test[f'{col}_{aggr}'] = df_test[group].map(dict_aux).astype('float32')
##### Download of files.
print('Downloading datasets...')
print(' ')
train =
|
pd.read_pickle('/kaggle/input/ieee-cis-preprocessing/train.pkl')
|
pandas.read_pickle
|
# coding: utf8
import torch
import numpy as np
import os
import warnings
import pandas as pd
from time import time
import logging
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from sklearn.utils import column_or_1d
import scipy.sparse as sp
from clinicadl.tools.deep_learning.iotools import check_and_clean
from clinicadl.tools.deep_learning import EarlyStopping, save_checkpoint
#####################
# CNN train / test #
#####################
def train(model, train_loader, valid_loader, criterion, optimizer, resume, log_dir, model_dir, options, logger=None):
"""
Function used to train a CNN.
The best model and checkpoint will be found in the 'best_model_dir' of options.output_dir.
Args:
model: (Module) CNN to be trained
train_loader: (DataLoader) wrapper of the training dataset
valid_loader: (DataLoader) wrapper of the validation dataset
criterion: (loss) function to calculate the loss
optimizer: (torch.optim) optimizer linked to model parameters
resume: (bool) if True, a begun job is resumed
log_dir: (str) path to the folder containing the logs
model_dir: (str) path to the folder containing the models weights and biases
options: (Namespace) ensemble of other options given to the main script.
logger: (logging object) writer to stdout and stderr
"""
from tensorboardX import SummaryWriter
from time import time
if logger is None:
logger = logging
columns = ['epoch', 'iteration', 'time',
'balanced_accuracy_train', 'loss_train',
'balanced_accuracy_valid', 'loss_valid']
if hasattr(model, "variational") and model.variational:
columns += ["kl_loss_train", "kl_loss_valid"]
filename = os.path.join(os.path.dirname(log_dir), 'training.tsv')
if not resume:
check_and_clean(model_dir)
check_and_clean(log_dir)
results_df = pd.DataFrame(columns=columns)
with open(filename, 'w') as f:
results_df.to_csv(f, index=False, sep='\t')
options.beginning_epoch = 0
else:
if not os.path.exists(filename):
raise ValueError('The training.tsv file of the resumed experiment does not exist.')
truncated_tsv = pd.read_csv(filename, sep='\t')
truncated_tsv.set_index(['epoch', 'iteration'], inplace=True)
truncated_tsv.drop(options.beginning_epoch, level=0, inplace=True)
truncated_tsv.to_csv(filename, index=True, sep='\t')
# Create writers
writer_train = SummaryWriter(os.path.join(log_dir, 'train'))
writer_valid = SummaryWriter(os.path.join(log_dir, 'validation'))
# Initialize variables
best_valid_accuracy = -1.0
best_valid_loss = np.inf
epoch = options.beginning_epoch
model.train() # set the model to training mode
train_loader.dataset.train()
early_stopping = EarlyStopping('min', min_delta=options.tolerance, patience=options.patience)
mean_loss_valid = None
t_beginning = time()
while epoch < options.epochs and not early_stopping.step(mean_loss_valid):
logger.info("Beginning epoch %i." % epoch)
model.zero_grad()
evaluation_flag = True
step_flag = True
tend = time()
total_time = 0
for i, data in enumerate(train_loader, 0):
t0 = time()
total_time = total_time + t0 - tend
if options.gpu:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
if hasattr(model, "variational") and model.variational:
z, mu, std, train_output = model(imgs)
kl_loss = kl_divergence(z, mu, std)
loss = criterion(train_output, labels) + kl_loss
else:
train_output = model(imgs)
loss = criterion(train_output, labels)
# Back propagation
loss.backward()
del imgs, labels
if (i + 1) % options.accumulation_steps == 0:
step_flag = False
optimizer.step()
optimizer.zero_grad()
del loss
# Evaluate the model only when no gradients are accumulated
if options.evaluation_steps != 0 and (i + 1) % options.evaluation_steps == 0:
evaluation_flag = False
_, results_train = test(model, train_loader, options.gpu, criterion)
mean_loss_train = results_train["total_loss"] / (len(train_loader) * train_loader.batch_size)
_, results_valid = test(model, valid_loader, options.gpu, criterion)
mean_loss_valid = results_valid["total_loss"] / (len(valid_loader) * valid_loader.batch_size)
model.train()
train_loader.dataset.train()
global_step = i + epoch * len(train_loader)
writer_train.add_scalar('balanced_accuracy', results_train["balanced_accuracy"], global_step)
writer_train.add_scalar('loss', mean_loss_train, global_step)
writer_valid.add_scalar('balanced_accuracy', results_valid["balanced_accuracy"], global_step)
writer_valid.add_scalar('loss', mean_loss_valid, global_step)
logger.info("%s level training accuracy is %f at the end of iteration %d"
% (options.mode, results_train["balanced_accuracy"], i))
logger.info("%s level validation accuracy is %f at the end of iteration %d"
% (options.mode, results_valid["balanced_accuracy"], i))
t_current = time() - t_beginning
row = [epoch, i, t_current,
results_train["balanced_accuracy"], mean_loss_train,
results_valid["balanced_accuracy"], mean_loss_valid]
if hasattr(model, "variational") and model.variational:
row += [results_train["total_kl_loss"] / (len(train_loader) * train_loader.batch_size),
results_valid["total_kl_loss"] / (len(valid_loader) * valid_loader.batch_size)]
row_df = pd.DataFrame([row], columns=columns)
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
tend = time()
logger.debug('Mean time per batch loading: %.10f s'
% (total_time / len(train_loader) * train_loader.batch_size))
# If no step has been performed, raise Exception
if step_flag:
raise Exception('The model has not been updated once in the epoch. The accumulation step may be too large.')
# If no evaluation has been performed, warn the user
elif evaluation_flag and options.evaluation_steps != 0:
warnings.warn('Your evaluation steps are too big compared to the size of the dataset.'
'The model is evaluated only once at the end of the epoch')
# Always test the results and save them once at the end of the epoch
model.zero_grad()
logger.debug('Last checkpoint at the end of the epoch %d' % epoch)
_, results_train = test(model, train_loader, options.gpu, criterion)
mean_loss_train = results_train["total_loss"] / (len(train_loader) * train_loader.batch_size)
_, results_valid = test(model, valid_loader, options.gpu, criterion)
mean_loss_valid = results_valid["total_loss"] / (len(valid_loader) * valid_loader.batch_size)
model.train()
train_loader.dataset.train()
global_step = (epoch + 1) * len(train_loader)
writer_train.add_scalar('balanced_accuracy', results_train["balanced_accuracy"], global_step)
writer_train.add_scalar('loss', mean_loss_train, global_step)
writer_valid.add_scalar('balanced_accuracy', results_valid["balanced_accuracy"], global_step)
writer_valid.add_scalar('loss', mean_loss_valid, global_step)
logger.info("%s level training accuracy is %f at the end of iteration %d"
% (options.mode, results_train["balanced_accuracy"], len(train_loader)))
logger.info("%s level validation accuracy is %f at the end of iteration %d"
% (options.mode, results_valid["balanced_accuracy"], len(train_loader)))
t_current = time() - t_beginning
row = [epoch, i, t_current,
results_train["balanced_accuracy"], mean_loss_train,
results_valid["balanced_accuracy"], mean_loss_valid]
if hasattr(model, "variational") and model.variational:
row += [results_train["total_kl_loss"] / (len(train_loader) * train_loader.batch_size),
results_valid["total_kl_loss"] / (len(valid_loader) * valid_loader.batch_size)]
row_df = pd.DataFrame([row], columns=columns)
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
accuracy_is_best = results_valid["balanced_accuracy"] > best_valid_accuracy
loss_is_best = mean_loss_valid < best_valid_loss
best_valid_accuracy = max(results_valid["balanced_accuracy"], best_valid_accuracy)
best_valid_loss = min(mean_loss_valid, best_valid_loss)
save_checkpoint({'model': model.state_dict(),
'epoch': epoch,
'valid_loss': mean_loss_valid,
'valid_acc': results_valid["balanced_accuracy"]},
accuracy_is_best, loss_is_best,
model_dir)
# Save optimizer state_dict to be able to reload
save_checkpoint({'optimizer': optimizer.state_dict(),
'epoch': epoch,
'name': options.optimizer,
},
False, False,
model_dir,
filename='optimizer.pth.tar')
epoch += 1
os.remove(os.path.join(model_dir, "optimizer.pth.tar"))
os.remove(os.path.join(model_dir, "checkpoint.pth.tar"))
def evaluate_prediction(y, y_pred):
"""
Evaluates different metrics based on the list of true labels and predicted labels.
Args:
y: (list) true labels
y_pred: (list) corresponding predictions
Returns:
(dict) ensemble of metrics
"""
true_positive = np.sum((y_pred == 1) & (y == 1))
true_negative = np.sum((y_pred == 0) & (y == 0))
false_positive = np.sum((y_pred == 1) & (y == 0))
false_negative = np.sum((y_pred == 0) & (y == 1))
accuracy = (true_positive + true_negative) / (true_positive + true_negative + false_positive + false_negative)
if (true_positive + false_negative) != 0:
sensitivity = true_positive / (true_positive + false_negative)
else:
sensitivity = 0.0
if (false_positive + true_negative) != 0:
specificity = true_negative / (false_positive + true_negative)
else:
specificity = 0.0
if (true_positive + false_positive) != 0:
ppv = true_positive / (true_positive + false_positive)
else:
ppv = 0.0
if (true_negative + false_negative) != 0:
npv = true_negative / (true_negative + false_negative)
else:
npv = 0.0
balanced_accuracy = (sensitivity + specificity) / 2
results = {'accuracy': accuracy,
'balanced_accuracy': balanced_accuracy,
'sensitivity': sensitivity,
'specificity': specificity,
'ppv': ppv,
'npv': npv,
}
return results
def test(model, dataloader, use_cuda, criterion, mode="image", use_labels=True):
"""
Computes the predictions and evaluation metrics.
Args:
model: (Module) CNN to be tested.
dataloader: (DataLoader) wrapper of a dataset.
use_cuda: (bool) if True a gpu is used.
criterion: (loss) function to calculate the loss.
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
use_labels (bool): If True the true_label will be written in output DataFrame and metrics dict will be created.
Returns
(DataFrame) results of each input.
(dict) ensemble of metrics + total loss on mode level.
"""
model.eval()
dataloader.dataset.eval()
if mode == "image":
columns = ["participant_id", "session_id", "true_label", "predicted_label"]
elif mode in ["patch", "roi", "slice"]:
columns = ['participant_id', 'session_id', '%s_id' % mode, 'true_label', 'predicted_label', 'proba0', 'proba1']
else:
raise ValueError("The mode %s is invalid." % mode)
softmax = torch.nn.Softmax(dim=1)
results_df = pd.DataFrame(columns=columns)
total_loss = 0
total_kl_loss = 0
total_time = 0
tend = time()
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
t0 = time()
total_time = total_time + t0 - tend
if use_cuda:
inputs, labels = data['image'].cuda(), data['label'].cuda()
else:
inputs, labels = data['image'], data['label']
if hasattr(model, "variational") and model.variational:
z, mu, std, outputs = model(inputs)
kl_loss = kl_divergence(z, mu, std)
total_kl_loss += kl_loss.item()
else:
outputs = model(inputs)
if use_labels:
loss = criterion(outputs, labels)
total_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
# Generate detailed DataFrame
for idx, sub in enumerate(data['participant_id']):
if mode == "image":
row = [[sub, data['session_id'][idx], labels[idx].item(), predicted[idx].item()]]
else:
normalized_output = softmax(outputs)
row = [[sub, data['session_id'][idx], data['%s_id' % mode][idx].item(),
labels[idx].item(), predicted[idx].item(),
normalized_output[idx, 0].item(), normalized_output[idx, 1].item()]]
row_df = pd.DataFrame(row, columns=columns)
results_df = pd.concat([results_df, row_df])
del inputs, outputs, labels
tend = time()
results_df.reset_index(inplace=True, drop=True)
if not use_labels:
results_df = results_df.drop("true_label", axis=1)
metrics_dict = None
else:
metrics_dict = evaluate_prediction(results_df.true_label.values.astype(int),
results_df.predicted_label.values.astype(int))
metrics_dict['total_loss'] = total_loss
metrics_dict['total_kl_loss'] = total_kl_loss
torch.cuda.empty_cache()
return results_df, metrics_dict
def sort_predicted(model, data_df, input_dir, model_options, criterion, keep_true,
batch_size=1, num_workers=0, gpu=False):
from .data import return_dataset, get_transforms
from torch.utils.data import DataLoader
from copy import copy
if keep_true is None:
return data_df
_, all_transforms = get_transforms(model_options.mode, model_options.minmaxnormalization)
dataset = return_dataset(mode=model_options.mode, input_dir=input_dir,
data_df=data_df, preprocessing=model_options.preprocessing,
train_transformations=None, all_transformations=all_transforms,
params=model_options)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
test_options = copy(model_options)
test_options.gpu = gpu
results_df, _ = test(model, dataloader, gpu, criterion, model_options.mode, use_labels=True)
sorted_df = data_df.sort_values(['participant_id', 'session_id']).reset_index(drop=True)
results_df = results_df.sort_values(['participant_id', 'session_id']).reset_index(drop=True)
if keep_true:
return sorted_df[results_df.true_label == results_df.predicted_label].reset_index(drop=True)
else:
return sorted_df[results_df.true_label != results_df.predicted_label].reset_index(drop=True)
#################################
# Voting systems
#################################
def mode_level_to_tsvs(output_dir, results_df, metrics, fold, selection, mode, dataset='train', cnn_index=None):
"""
Writes the outputs of the test function in tsv files.
Args:
output_dir: (str) path to the output directory.
results_df: (DataFrame) the individual results per patch.
metrics: (dict or DataFrame) the performances obtained on a series of metrics.
fold: (int) the fold for which the performances were obtained.
selection: (str) the metrics on which the model was selected (best_acc, best_loss)
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
dataset: (str) the dataset on which the evaluation was performed.
cnn_index: (int) provide the cnn_index only for a multi-cnn framework.
"""
if cnn_index is None:
performance_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', selection)
else:
performance_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', 'cnn-%i' % cnn_index,
selection)
os.makedirs(performance_dir, exist_ok=True)
results_df.to_csv(os.path.join(performance_dir, '%s_%s_level_prediction.tsv' % (dataset, mode)), index=False,
sep='\t')
if metrics is not None:
metrics["%s_id" % mode] = cnn_index
if isinstance(metrics, dict):
pd.DataFrame(metrics, index=[0]).to_csv(os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode)),
index=False, sep='\t')
elif isinstance(metrics, pd.DataFrame):
metrics.to_csv(os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode)),
index=False, sep='\t')
else:
raise ValueError("Bad type for metrics: %s. Must be dict or DataFrame." % type(metrics).__name__)
def concat_multi_cnn_results(output_dir, fold, selection, mode, dataset, num_cnn):
"""Concatenate the tsv files of a multi-CNN framework"""
prediction_df = pd.DataFrame()
metrics_df = pd.DataFrame()
for cnn_index in range(num_cnn):
cnn_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', 'cnn-%i' % cnn_index)
performance_dir = os.path.join(cnn_dir, selection)
cnn_pred_path = os.path.join(performance_dir, '%s_%s_level_prediction.tsv' % (dataset, mode))
cnn_metrics_path = os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode))
cnn_pred_df =
|
pd.read_csv(cnn_pred_path, sep='\t')
|
pandas.read_csv
|
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from pytz import timezone
from sklearn.decomposition import KernelPCA
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import my_scorer
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_daily_nwps
class DatasetCreatorPCA:
def __init__(self, project, data=None, n_jobs=1, test=False, dates=None):
if test is None:
raise NotImplemented('test is none for short-term, not implemented for PCA')
self.data = data
self.is_for_test = test
self.project_name = project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.path_data = self.static_data['path_data']
self.areas = self.static_data['areas']
self.area_group = self.static_data['area_group']
self.nwp_model = self.static_data['NWP_model']
self.nwp_resolution = self.static_data['NWP_resolution']
self.location = self.static_data['location']
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = n_jobs
self.variables = self.static_data['data_variables']
self.logger = create_logger(logger_name=f"log_{self.static_data['project_group']}",
abs_path=self.path_nwp_project,
logger_path=f"log_{self.static_data['project_group']}.log", write_type='a')
if self.data is not None:
self.dates = self.check_dates()
elif dates is not None:
self.dates = dates
def check_dates(self):
# Extract dates of power measurements.
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def make_dataset_res(self):
nwp_3d_pickle = 'nwps_3d_test.pickle' if self.is_for_test else 'nwps_3d.pickle'
dataset_cnn_pickle = 'dataset_cnn_test.pickle' if self.is_for_test else 'dataset_cnn.pickle'
nwp_3d_pickle = os.path.join(self.path_data, nwp_3d_pickle)
dataset_cnn_pickle = os.path.join(self.path_data, dataset_cnn_pickle)
if not (os.path.exists(nwp_3d_pickle) and os.path.exists(dataset_cnn_pickle)):
data, x_3d = self.get_3d_dataset()
else:
data = joblib.load(nwp_3d_pickle)
x_3d = joblib.load(dataset_cnn_pickle) # FIXME: unused variable
data_path = self.path_data
if not isinstance(self.areas, dict):
self.dataset_for_single_farm(data, data_path)
else:
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=25), t + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates)
flag = False
for i, p_dates in enumerate(dates_stack):
t = self.dates[i]
file_name = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for date in p_dates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except Exception:
continue
if flag:
break
self.dataset_for_multiple_farms(data, self.areas, lats_group, longs_group)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def get_3d_dataset(self):
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47),
freq='H') # 47 hours: 00:00 -> 23:00
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates) # For each date we have prediction append the next 47 hours
area = self.area_group if isinstance(self.areas, dict) else self.areas
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], p_dates, self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
for i, p_dates in enumerate(dates_stack))
x = np.array([])
data_var = dict()
for var in self.variables:
if (var == 'WS' and self.static_data['type'] == 'wind') or \
(var == 'Flux' and self.static_data['type'] == 'pv'):
data_var[var + '_prev'] = x
data_var[var] = x
data_var[var + '_next'] = x
else:
data_var[var] = x
data_var['dates'] = x
x_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
x_3d = stack_3d(x_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
if self.is_for_test:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d_test.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn.pickle'))
self.logger.info('NWP stacked data saved')
return data_var, x_3d
def train_pca(self, data, components, level):
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
param_grid = [{
"gamma": np.logspace(-3, 0, 20),
}]
kpca = KernelPCA(n_components=components, fit_inverse_transform=True, n_jobs=self.n_jobs)
grid_search = GridSearchCV(kpca, param_grid, cv=3, scoring=my_scorer, n_jobs=self.n_jobs)
grid_search.fit(data_scaled)
kpca = grid_search.best_estimator_
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
joblib.dump({'scaler': scaler, 'kpca': kpca}, fname)
def pca_transform(self, data, components, level):
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
if not os.path.exists(fname):
self.train_pca(data, components, level)
models = joblib.load(fname)
data_scaled = models['scaler'].transform(data)
data_compress = models['kpca'].transform(data_scaled)
return data_compress
def dataset_for_single_farm(self, data, data_path):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def dataset_for_multiple_farms(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(self.path_data, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def make_dataset_res_offline(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_offline(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_offline(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_offline(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_offline(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
for dt in self.dates:
if utc:
pdates = pd.date_range(dt +
|
pd.DateOffset(hours=25)
|
pandas.DateOffset
|
"""
Integrated Label Preparation Code
Created on 4/25/2019
@author: RH
"""
#CPTAC initial prep
import pandas as pd
imlist = pd.read_excel('../S043_CPTAC_UCEC_Discovery_Cohort_Study_Specimens_r1_Sept2018.xlsx', header=4)
imlist = imlist[imlist['Group'] == 'Tumor ']
cllist = pd.read_csv('../UCEC_V2.1/waffles_updated.txt', sep='\t', header = 0)
cllist = cllist[cllist['Proteomics_Tumor_Normal'] == 'Tumor']
joined = pd.merge(imlist, cllist, how='inner', on=['Participant_ID'])
joined.to_csv('../joined_PID.csv', index = False)
#CPTAC prep
import pandas as pd
import shutil
import os
import csv
def flatten(l, a):
for i in l:
if isinstance(i, list):
flatten(i, a)
else:
a.append(i)
return a
# Get all images in the root directory
def image_ids_in(root_dir, ignore=['.DS_Store', 'dict.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
dirname = id.split('_')[-2]
ids.append((id, dirname))
return ids
PID = pd.read_csv("../joined_PID.csv", header = 0)
temp = []
ls = []
for idx, row in PID.iterrows():
if "," in row["Parent Sample ID(s)"]:
m = row["Parent Sample ID(s)"].split(',')
for x in m:
w = row
ls.append(x)
temp.append(w)
PID = PID.drop(idx)
temp = pd.DataFrame(temp)
temp["Parent Sample ID(s)"] = ls
PID = PID.append(temp, ignore_index=True)
PID = PID.sort_values(["Parent Sample ID(s)"], ascending=1)
PID.to_csv("../new_joined_PID.csv", header = True, index = False)
PID = pd.read_csv("../new_joined_PID.csv", header = 0)
ref_list = PID["Parent Sample ID(s)"].tolist()
imids = image_ids_in('../CPTAC_img')
inlist = []
outlist = []
reverse_inlist = []
try:
os.mkdir('../CPTAC_img/inlist')
except FileExistsError:
pass
try:
os.mkdir('../CPTAC_img/outlist')
except FileExistsError:
pass
for im in imids:
if im[1] in ref_list:
inlist.append(im[0])
reverse_inlist.append(im[1])
shutil.move('../CPTAC_img/'+str(im[0]), '../CPTAC_img/inlist/'+str(im[0]))
else:
outlist.append(im[0])
shutil.move('../CPTAC_img/' + str(im[0]), '../CPTAC_img/outlist/' + str(im[0]))
csvfile = "../CPTAC_inlist.csv"
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in inlist:
writer.writerow([val])
csvfile = "../CPTAC_outlist.csv"
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in outlist:
writer.writerow([val])
filtered_PID = PID[PID["Parent Sample ID(s)"].isin(reverse_inlist)]
tpdict = {'CN-High': 'Serous-like', 'CN-Low': 'Endometrioid', 'MSI-H': 'MSI', 'POLE': 'POLE', 'Other': 'Other'}
a = filtered_PID['TCGA_subtype']
filtered_PID['Subtype'] = a
filtered_PID.Subtype = filtered_PID.Subtype.replace(tpdict)
filtered_PID = filtered_PID[filtered_PID.Subtype != 'Other']
filtered_PID.to_csv("../filtered_joined_PID.csv", header=True, index=False)
#TCGA prep
import pandas as pd
def flatten(l, a):
for i in l:
if isinstance(i, list):
flatten(i, a)
else:
a.append(i)
return a
image_meta = pd.read_csv('../TCGA_Image_meta.tsv', sep='\t', header=0)
TCGA_list = pd.read_excel('../TCGA_nature12113-s2/datafile.S1.1.KeyClinicalData.xls', header=0)
namelist = []
for idx, row in image_meta.iterrows():
namelist.append(row['File Name'].split('-01Z')[0])
image_meta['bcr_patient_barcode'] = namelist
TCGA_list = TCGA_list.join(image_meta.set_index('bcr_patient_barcode'), on='bcr_patient_barcode')
TCGA_list = TCGA_list.dropna()
labellist = []
TCGA_list = TCGA_list[TCGA_list['IntegrativeCluster'] != "Notassigned"]
TCGA_list = TCGA_list.rename(columns={'IntegrativeCluster': 'label'})
lbdict = {'CN low': 'Endometrioid', 'CN high': 'Serous-like'}
TCGA_list['label'] = TCGA_list['label'].replace(lbdict)
TCGA_list.to_csv('../new_TCGA_list.csv', header=True, index=False)
#Mutation prep
import pandas as pd
import numpy as np
CPTAC = pd.read_csv("../filtered_joined_PID.csv", header=0)
CPTAC_MUT = pd.read_csv('../UCEC_V2.1/UCEC_CPTAC3_meta_table_V2.1.txt', sep='\t', header=0)
TCGA = pd.read_csv('../new_TCGA_list.csv', header=0)
TCGA_MUT =
|
pd.read_csv('../TCGA_MUT/TCGA_clinical/MUT_clinical.tsv', sep='\t', header=0)
|
pandas.read_csv
|
from numpy.core.defchararray import array
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
import librosa
def salvar_csv(x_media,y_media,z_media,nome):
dfdict = {}
dfdict["x"] = x_media
dfdict["y"] = y_media
dfdict["z"] = z_media
df =
|
pd.DataFrame(dfdict)
|
pandas.DataFrame
|
from typing import Optional
from dataclasses import dataclass
import pandas as pd
from poker.base import unique_values, native_mean, running_mean, running_std, running_median, running_percentile
from poker.document_filter_class import DocumentFilter
pd.set_option('use_inf_as_na', True)
def _ts_concat(dic: dict, index_lst: list) -> pd.DataFrame:
"""Concat a dict of dicts or pd.DataFrames"""
lst_df = []
for key, val in dic.items():
if type(val) != pd.DataFrame:
val = pd.DataFrame(val, index=index_lst)
val.columns = [key + ' ' + col if col != '' else key for col in val.columns]
else:
val.columns = [key]
lst_df.append(val)
final_df = pd.concat(lst_df, axis=1).reset_index()
return final_df
def _ts_hand(data: pd.DataFrame) -> pd.DataFrame:
"""Build Hand related data"""
pos_dic = {'Pre Flop': 0.25, 'Post Flop': 0.50, 'Post Turn': 0.75, 'Post River': 1.0}
# Game Id
g_i_df = pd.DataFrame(data.groupby('Start Time')['Game Id'].last())
g_i_df.columns = ['']
# Time in Hand
t_h_df = pd.DataFrame(data.groupby('Start Time')['Seconds into Hand'].last())
t_h_df.columns = ['']
# Last Position
last_position = data.groupby('Start Time')['Position'].last().tolist()
l_p_df = pd.DataFrame([pos_dic[item] for item in last_position], index=t_h_df.index, columns=[''])
# Win
r_w_p = data.groupby('Start Time')['Win'].last().tolist()
r_w_p = [1 if item is True else 0 for item in r_w_p]
r_w_p_df = pd.DataFrame(running_mean(data=r_w_p, num=5), index=t_h_df.index, columns=[''])
ind_lst = data.groupby('Start Time').last().index.tolist()
lst_dic = {'Seconds per Hand': t_h_df, 'Last Position in Hand': l_p_df, 'Rolling Win Percent': r_w_p_df,
'Game Id': g_i_df}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
def _ts_position(data: pd.DataFrame) -> pd.DataFrame:
"""Build position related data"""
temp_df = data[(data['Class'] == 'Calls') | (data['Class'] == 'Raises') | (data['Class'] == 'Checks')]
p_bet = {'Pre Flop': [], 'Post Flop': [], 'Post Turn': [], 'Post River': []}
t_p_bet = {'Pre Flop': 0, 'Post Flop': 0, 'Post Turn': 0, 'Post River': 0}
prev_ind, len_temp_df, game_id_lst = temp_df['Start Time'].iloc[0], len(temp_df), []
for ind, row in temp_df.iterrows():
if row['Start Time'] != prev_ind:
prev_ind = row['Start Time']
game_id_lst.append(row['Game Id'])
for key, val in t_p_bet.items():
p_bet[key].append(val)
t_p_bet = {'Pre Flop': 0, 'Post Flop': 0, 'Post Turn': 0, 'Post River': 0}
t_p_bet[row['Position']] += row['Bet Amount']
if ind == len_temp_df:
game_id_lst.append(row['Game Id'])
for key, val in t_p_bet.items():
p_bet[key].append(val)
ind_lst = unique_values(data=temp_df['Start Time'].tolist())
lst_dic = {'Position Bet': p_bet, 'Game Id': {'': game_id_lst}}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
def _ts_class_counts_seconds(data: pd.DataFrame) -> pd.DataFrame:
"""Build class, counts, and seconds data"""
# Bet, Count, and Time Per Position
temp_df = data[(data['Class'] == 'Calls') | (data['Class'] == 'Raises') | (data['Class'] == 'Checks')]
pos_lst = ['Pre Flop', 'Post Flop', 'Post Turn', 'Post River']
class_lst, short_class_lst = ['Checks', 'Calls', 'Raises'], ['Calls', 'Raises']
c_count = {item1 + ' ' + item: [] for item in class_lst for item1 in pos_lst}
c_seconds = {item1 + ' ' + item: [] for item in class_lst for item1 in pos_lst}
c_bet = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
c_bet_per_pot = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
c_bet_per_chips = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
t_c_count = {item1 + ' ' + item: 0 for item in class_lst for item1 in pos_lst}
t_c_seconds = {item1 + ' ' + item: None for item in class_lst for item1 in pos_lst}
t_c_bet = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_pot = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_chips = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
prev_ind, len_temp_df, game_id_lst = temp_df['Start Time'].iloc[0], len(temp_df), []
for ind, row in temp_df.iterrows():
if row['Start Time'] != prev_ind:
prev_ind = row['Start Time']
game_id_lst.append(row['Game Id'])
for item in class_lst:
for item1 in pos_lst:
c_count[item1 + ' ' + item].append(t_c_count[item1 + ' ' + item])
c_seconds[item1 + ' ' + item].append(t_c_seconds[item1 + ' ' + item])
if item != 'Checks':
c_bet[item1 + ' ' + item].append(t_c_bet[item1 + ' ' + item])
c_bet_per_pot[item1 + ' ' + item].append(t_c_bet_per_pot[item1 + ' ' + item])
c_bet_per_chips[item1 + ' ' + item].append(t_c_bet_per_chips[item1 + ' ' + item])
t_c_count = {item1 + ' ' + item: 0 for item in class_lst for item1 in pos_lst}
t_c_seconds = {item1 + ' ' + item: None for item in class_lst for item1 in pos_lst}
t_c_bet = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_pot = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_chips = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_pos, t_bet, t_class, t_second = row['Position'], row['Bet Amount'], row['Class'], row['Seconds']
t_key = t_pos + ' ' + t_class
t_c_count[t_key] += 1
if t_c_seconds[t_key] is not None:
t_c_seconds[t_key] = native_mean(data=[t_c_seconds[t_key]] + [t_second])
else:
t_c_seconds[t_key] = t_second
if t_class != 'Checks':
if t_c_bet[t_key] is not None:
t_c_bet[t_key] = native_mean(data=[t_c_bet[t_key]] + [t_bet])
else:
t_c_bet[t_key] = t_bet
bet_pot_per = t_bet / (row['Pot Size'] - t_bet)
if t_c_bet_per_pot[t_key] is not None:
t_c_bet_per_pot[t_key] = native_mean(data=[t_c_bet_per_pot[t_key]] + [bet_pot_per])
else:
t_c_bet_per_pot[t_key] = bet_pot_per
bet_chip_per = t_bet / (row['Player Current Chips'] + t_bet)
if t_c_bet_per_chips[t_key] is not None:
t_c_bet_per_chips[t_key] = native_mean(data=[t_c_bet_per_chips[t_key]] + [bet_chip_per])
else:
t_c_bet_per_chips[t_key] = bet_chip_per
if ind == len_temp_df:
game_id_lst.append(row['Game Id'])
for item in class_lst:
for item1 in pos_lst:
c_count[item1 + ' ' + item].append(t_c_count[item1 + ' ' + item])
c_seconds[item1 + ' ' + item].append(t_c_seconds[item1 + ' ' + item])
if item != 'Checks':
c_bet[item1 + ' ' + item].append(t_c_bet[item1 + ' ' + item])
c_bet_per_pot[item1 + ' ' + item].append(t_c_bet_per_pot[item1 + ' ' + item])
c_bet_per_chips[item1 + ' ' + item].append(t_c_bet_per_chips[item1 + ' ' + item])
ind_lst = unique_values(data=temp_df['Start Time'].tolist())
lst_dic = {'Class Count': c_count, 'Class Seconds': c_seconds, 'Class Bet': c_bet,
'Class Bet Percent of Pot': c_bet_per_pot, 'Class Bet Percent of Chips': c_bet_per_chips,
'Game Id': {'': game_id_lst}}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
@dataclass
class TSanalysis:
"""
Calculate Time Series stats for a player.
:param data: Input DocumentFilter.
:type data: DocumentFilter
:param upper_q: Upper Quantile percent, default is 0.841. *Optional*
:type upper_q: float
:param lower_q: Lower Quantile percent, default is 0.159. *Optional*
:type lower_q: float
:param window: Rolling window, default is 5. *Optional*
:type window: int
:example:
>>> from poker.time_series_class import TSanalysis
>>> docu_filter = DocumentFilter(data=poker, player_index_lst=['DZy-22KNBS'])
>>> TSanalysis(data=docu_filter)
:note: This class expects a DocumentFilter with only one player_index used.
"""
def __init__(self, data: DocumentFilter, upper_q: Optional[float] = 0.841, lower_q: Optional[float] = 0.159,
window: Optional[int] = 5):
self._docu_filter = data
self._window = window
self._upper_q = upper_q
self._lower_q = lower_q
self._df = data.df
hand_df = _ts_hand(data=self._df)
self._hand = hand_df.copy()
position_df = _ts_position(data=self._df)
self._position = position_df.copy()
class_df = _ts_class_counts_seconds(data=self._df)
self._class = class_df.copy()
hand_cols, hand_ind = hand_df.columns, hand_df.index
self._hand_mean = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_std = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_median = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_upper_q = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_lower_q = pd.DataFrame(columns=hand_cols, index=hand_ind)
for col in hand_cols:
if col not in ['Game Id', 'index', 'Start Time']:
self._hand_mean[col] = running_mean(data=hand_df[col], num=self._window)
self._hand_std[col] = running_std(data=hand_df[col], num=self._window)
self._hand_median[col] = running_median(data=hand_df[col], num=self._window)
self._hand_upper_q[col] = running_percentile(data=hand_df[col], num=self._window, q=upper_q)
self._hand_lower_q[col] = running_percentile(data=hand_df[col], num=self._window, q=lower_q)
pos_cols, pos_ind = position_df.columns, position_df.index
self._position_mean = pd.DataFrame(columns=pos_cols, index=pos_ind)
self._position_std = pd.DataFrame(columns=pos_cols, index=pos_ind)
self._position_median =
|
pd.DataFrame(columns=pos_cols, index=pos_ind)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pyarrow as pa
import pytest
from pyarrow.parquet import ParquetFile
from kartothek.serialization import (
CsvSerializer,
DataFrameSerializer,
ParquetSerializer,
default_serializer,
)
from kartothek.serialization._util import ensure_unicode_string_type
TYPE_STABLE_SERIALISERS = [ParquetSerializer()]
SERLIALISERS = TYPE_STABLE_SERIALISERS + [
CsvSerializer(),
CsvSerializer(compress=False),
default_serializer(),
]
type_stable_serialisers = pytest.mark.parametrize("serialiser", TYPE_STABLE_SERIALISERS)
predicate_serialisers = pytest.mark.parametrize(
"serialiser",
[
ParquetSerializer(chunk_size=1),
ParquetSerializer(chunk_size=2),
ParquetSerializer(chunk_size=4),
]
+ SERLIALISERS,
)
def test_load_df_from_store_unsupported_format(store):
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, "test.unknown")
def test_store_df_to_store(store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"]})
dataframe_format = default_serializer()
assert isinstance(dataframe_format, ParquetSerializer)
key = dataframe_format.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_store_table_to_store(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"]})
table = pa.Table.from_pandas(df)
key = serialiser.store(store, "prefix", table)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip(serialiser, store):
if serialiser in TYPE_STABLE_SERIALISERS:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], b"d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
df.columns = [ensure_unicode_string_type(col) for col in df.columns]
else:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], "d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "c"]),
df[["a", "c"]],
)
# Test that all serialisers can ingest predicate_pushdown_to_io
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["a", "c"], predicate_pushdown_to_io=False
),
df[["a", "c"]],
)
# Test that all serialisers can deal with categories
expected = df[["c", "d"]].copy()
expected["c"] = expected["c"].astype("category")
# Check that the dtypes match but don't care about the order of the categoricals.
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["c", "d"], categories=["c"]
),
expected,
check_categorical=False,
)
# Test restore w/ empty col list
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=[]), df[[]]
)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_missing_column(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], "d": ["#", ";"]})
key = serialiser.store(store, "prefix", df)
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "x"])
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_empty(serialiser, store):
df = pd.DataFrame({})
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_no_rows(serialiser, store):
df = pd.DataFrame({"a": [], "b": [], "c": []}).astype(object)
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "c"]),
df[["a", "c"]],
)
def test_filter_query_predicate_exclusion(store):
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(
store, "test.parquet", predicates=[[("a", "==", 1)]], filter_query="True"
)
def assert_frame_almost_equal(df_left, df_right):
"""
Be more friendly to some dtypes that are not preserved during the roundtrips.
"""
# FIXME: This needs a better documentation
for col in df_left.columns:
if pd.api.types.is_datetime64_dtype(
df_left[col].dtype
) and pd.api.types.is_object_dtype(df_right[col].dtype):
df_right[col] = pd.to_datetime(df_right[col])
elif pd.api.types.is_object_dtype(
df_left[col].dtype
) and pd.api.types.is_datetime64_dtype(df_right[col].dtype):
df_left[col] = pd.to_datetime(df_left[col])
elif (
len(df_left) > 0
and pd.api.types.is_object_dtype(df_left[col].dtype)
and pd.api.types.is_object_dtype(df_right[col].dtype)
):
if isinstance(df_left[col].iloc[0], datetime.date) or isinstance(
df_right[col].iloc[0], datetime.date
):
df_left[col] = pd.to_datetime(df_left[col])
df_right[col] = pd.to_datetime(df_right[col])
elif pd.api.types.is_object_dtype(
df_left[col].dtype
) and pd.api.types.is_categorical_dtype(df_right[col].dtype):
df_left[col] = df_left[col].astype(df_right[col].dtype)
pdt.assert_frame_equal(
df_left.reset_index(drop=True), df_right.reset_index(drop=True)
)
@pytest.mark.parametrize(
"df, read_kwargs",
[
(pd.DataFrame({"string_ü": ["abc", "affe", "banane", "buchstabe_ü"]}), {}),
(pd.DataFrame({"integer_ü": np.arange(4)}), {}),
(pd.DataFrame({"float_ü": [-3.141591, 0.0, 3.141593, 3.141595]}), {}),
(
pd.DataFrame(
{
"date_ü": [
datetime.date(2011, 1, 31),
datetime.date(2011, 2, 3),
datetime.date(2011, 2, 4),
datetime.date(2011, 3, 10),
]
}
),
{"date_as_object": False},
),
(
pd.DataFrame(
{
"date_ü": [
datetime.date(2011, 1, 31),
datetime.date(2011, 2, 3),
datetime.date(2011, 2, 4),
datetime.date(2011, 3, 10),
]
}
),
{"date_as_object": True},
),
(
pd.DataFrame(
{"categorical_ü": list("abcd")},
dtype=pd.api.types.CategoricalDtype(list("abcd"), ordered=True),
),
{},
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_pushdown(
store, df, read_kwargs, predicate_pushdown_to_io, serialiser
):
"""
Test predicate pushdown for several types and operations.
The DataFrame parameters all need to be of same length for this test to
work universally. Also the values in the DataFrames need to be sorted in
ascending order.
"""
# All test dataframes need to have the same length
assert len(df) == 4
assert df[df.columns[0]].is_monotonic and df.iloc[0, 0] < df.iloc[-1, 0]
# This is due to the limitation that dates cannot be expressed in
# Pandas' query() method.
if isinstance(serialiser, CsvSerializer) and isinstance(
df.iloc[0, 0], datetime.date
):
pytest.skip("CsvSerialiser cannot filter on dates")
key = serialiser.store(store, "prefix", df)
# Test `<` and `>` operators
expected = df.iloc[[1, 2], :].copy()
predicates = [
[(df.columns[0], "<", df.iloc[3, 0]), (df.columns[0], ">", df.iloc[0, 0])]
]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `=<` and `>=` operators
expected = df.iloc[[1, 2, 3], :].copy()
predicates = [
[(df.columns[0], "<=", df.iloc[3, 0]), (df.columns[0], ">=", df.iloc[1, 0])]
]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `==` operator
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "==", df.iloc[1, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `in` operator
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "in", [df.iloc[1, 0]])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `!=` operator
expected = df.iloc[[0, 2, 3], :].copy()
predicates = [[(df.columns[0], "!=", df.iloc[1, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test empty DataFrame
expected = df.head(0)
predicates = [[(df.columns[0], "<", df.iloc[0, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test in empty list
expected = df.head(0)
predicates = [[(df.columns[0], "in", [])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test in numpy array
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "in", np.asarray([df.iloc[1, 0], df.iloc[1, 0]]))]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test malformed predicates 1
predicates = []
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Empty predicates"
# Test malformed predicates 2
predicates = [[]]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Invalid predicates: Conjunction 0 is empty"
# Test malformed predicates 3
predicates = [[(df.columns[0], "<", df.iloc[0, 0])], []]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Invalid predicates: Conjunction 1 is empty"
# Test malformed predicates 4
predicates = [[(df.columns[0], "<", df.iloc[0, 0])], ["foo"]]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert (
str(exc.value)
== "Invalid predicates: Clause 0 in conjunction 1 should be a 3-tuple, got object of type <class 'str'> instead"
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_float_equal_big(predicate_pushdown_to_io, store, serialiser):
df = pd.DataFrame({"float": [3141590.0, 3141592.0, 3141594.0]})
key = serialiser.store(store, "prefix", df)
predicates = [[("float", "==", 3141592.0)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[1], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_float_equal_small(predicate_pushdown_to_io, store, serialiser):
df = pd.DataFrame({"float": [0.3141590, 0.3141592, 0.3141594]})
key = serialiser.store(store, "prefix", df)
predicates = [[("float", "==", 0.3141592)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[1], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@type_stable_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_eval_string_types(serialiser, store, predicate_pushdown_to_io):
df = pd.DataFrame({b"a": [1, 2], "b": [3.0, 4.0]})
key = serialiser.store(store, "prefix", df)
df.columns = [ensure_unicode_string_type(col) for col in df.columns]
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
for col in ["a", b"a", "a"]:
predicates = [[(col, "==", 1)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
for col in ["b", b"b", "b"]:
predicates = [[(col, "==", 3.0)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
for preds in (
[[("a", "==", 1), ("b", "==", 3.0)]],
[[("a", "==", 1), (b"b", "==", 3.0)]],
[[(b"a", "==", 1), ("b", "==", 3.0)]],
):
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=preds,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@pytest.mark.parametrize(
"df,value",
[
(pd.DataFrame({"u": pd.Series([None], dtype=object)}), "foo"),
(pd.DataFrame({"b": pd.Series([None], dtype=object)}), b"foo"),
(pd.DataFrame({"f": pd.Series([np.nan], dtype=float)}), 1.2),
(
pd.DataFrame({"t": pd.Series([pd.NaT], dtype="datetime64[ns]")}),
pd.Timestamp("2017"),
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_pushdown_null_col(
store, df, value, predicate_pushdown_to_io, serialiser
):
key = serialiser.store(store, "prefix", df)
expected = df.iloc[[]].copy()
predicates = [[(df.columns[0], "==", value)]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
check_datetimelike_compat = (
isinstance(value, pd.Timestamp) and not serialiser.type_stable
)
pdt.assert_frame_equal(
result.reset_index(drop=True),
expected.reset_index(drop=True),
check_dtype=serialiser.type_stable,
check_datetimelike_compat=check_datetimelike_compat,
)
@pytest.mark.parametrize(
"df, op, value, expected_index",
[
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"==",
None,
[0, 2],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"in",
[None],
[0, 2],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"!=",
None,
[1],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"in",
[None, "x"],
[0, 1, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"==",
np.nan,
[0, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"in",
[np.nan],
[0, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"!=",
np.nan,
[1],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"in",
[np.nan, 1.0],
[0, 1, 2],
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_parsing_null_values(
store, df, op, value, expected_index, predicate_pushdown_to_io, serialiser
):
key = serialiser.store(store, "prefix", df)
expected = df.iloc[expected_index].copy()
predicates = [[(df.columns[0], op, value)]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
pdt.assert_frame_equal(
result.reset_index(drop=True),
expected.reset_index(drop=True),
check_dtype=serialiser.type_stable,
)
@pytest.mark.parametrize("op", ["<", "<=", ">", ">="])
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_parsing_null_values_failing(
store, op, predicate_pushdown_to_io, serialiser
):
df = pd.DataFrame({"u":
|
pd.Series([1.0, np.nan])
|
pandas.Series
|
import sys
from pathlib import Path
import pandas as pd
import pandas_flavor as pf
from janitor import coalesce
from pandas.api.types import is_numeric_dtype, is_string_dtype
sys.path.append(str(Path.cwd()))
from config import root_dir # noqa: E402
from utils import ( # noqa: E402
get_module_purpose,
map_player_names,
read_args,
read_ff_csv,
retrieve_team_abbreviation,
)
@pf.register_dataframe_method
def add_injury_feature_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Parse each row of injury data by player. Adds the following columns:
* has_dnp_status: Indicates if the player had a DNP (Did Not Play) status
during the week.
* has_limited_status: Indicates if the player had a limited status
during the week.
* most_recent_injury_status: The most recently reported injury status.
* n_injuries: The number of injuries reported for the player.
Args:
df (pd.DataFrame): The dataframe of week-player level injury data.
Returns:
pd.DataFrame: The original dataframe with the added injury columns.
"""
injury_features_lst = list()
for row in df.itertuples(index=False):
status = (
row.mon_status,
row.tue_status,
row.wed_status,
row.thu_status,
row.fri_status,
row.sat_status,
row.sun_status,
)
# '--' indicates an unknown status. Translate to misssing.
status = ["" if x == "--" else x for x in status]
# has DNP tag
has_dnp_tag = int(any([x for x in status if x == "DNP"]))
# has limited tag
has_limited_tag = int(any([x for x in status if x == "Limited"]))
# empty string indicates
try:
most_recent_status = [x for x in status if x][-1]
except IndexError:
most_recent_status = ""
# count number of injuries
n_injuries = len(row.injury_type.split(","))
feature_row = [
row.name,
row.position,
row.team,
row.week,
row.season_year,
has_dnp_tag,
has_limited_tag,
most_recent_status,
n_injuries,
]
injury_features_lst.append(feature_row)
injury_features_df = pd.DataFrame(
injury_features_lst,
columns=[
"name",
"position",
"team",
"week",
"season_year",
"has_dnp_tag",
"has_limited_tag",
"most_recent_injury_status",
"n_injuries",
],
)
df = pd.merge(
df,
injury_features_df,
how="inner",
on=["name", "position", "team", "week", "season_year"],
)
return df
def _convert_plural_injury_to_singular(injury: str) -> str:
"""Converts a plural injury type to a singular injury type.
For example, 'Broken Arms' becomes 'Broken Arm', or 'Ribs' becomes 'Rib'.
Args:
injury (str): The injury type to convert.
Returns:
str: The singularized injury type.
"""
injury_split = list(injury)
if injury_split[-1] == "s":
return injury[: len(injury_split) - 1]
return injury
@pf.register_dataframe_method
def process_injury_type(df: pd.DataFrame, column: str = "injury_type") -> pd.DataFrame:
"""Formats the injury type column by applying the following transformations:
* Convert all injury text to lower case
* Strip "left" and "right" from the end of the injury text
* Convert 'not injury related', 'non football injury',
and 'load management' to 'non-injury related'
* If multiple conditions are present, take the first injury reported,
splitting on comma or slash
* Convert "abdomen", "core", "stomach" to "abdomen"
* Convert a plural injury type to a singular injury type
Args:
df (pd.DataFrame): The dataframe of week-player level injury data.
column (str): The column name of the injury type column.
Defaults to "injury_type".
Returns:
pd.DataFrame: The original dataframe with the processed injury type column.
"""
injury_type = df[column]
injury_type = ["unknown" if not x else x for x in injury_type]
injury_type = [x.lower() for x in injury_type]
injury_type = [
x.replace("right", "").replace("left", "").strip() for x in injury_type
]
injury_type = [
"not injury related"
if "not injury related" in x
or "non football injury" in x
or "load management" in x
else x
for x in injury_type
]
injury_type = [x.split(",")[0] for x in injury_type]
injury_type = [x.split("/")[0] for x in injury_type]
injury_type = [
"abdomen" if x in ["abdomen", "core", "stomach"] else x for x in injury_type
]
injury_type = [_convert_plural_injury_to_singular(x) for x in injury_type]
df[column] = injury_type
return df
@pf.register_dataframe_method
def add_missing_values_for_non_injured_players(
df: pd.DataFrame, players_df: pd.DataFrame
) -> pd.DataFrame:
"""Identifies players who were not injured during the week by taking the
cross product of the weekly injuries and all players active within a season.
Args:
df (pd.DataFrame): The dataframe of week-player level injury data.
players_df (pd.DataFrame): The dataframe of all players active within a season.
Returns:
pd.DataFrame: All player-week-injury possible combinations.
"""
min_week, max_week = min(df["week"].astype(int)), max(df["week"].astype(int))
all_season_weeks_df = pd.DataFrame({"week": list(range(min_week, max_week + 1))})
cross_product_players_df =
|
pd.merge(players_df, all_season_weeks_df, how="cross")
|
pandas.merge
|
import pandas as pd
import matplotlib.pyplot as plt
'''
Introduction
Today we'll dive deep into a dataset all about LEGO. From the dataset we can ask whole bunch of interesting questions about the history of the LEGO company, their product offering, and which LEGO set ultimately rules them all:
What is the most enormous LEGO set ever created and how many parts did it have?
How did the LEGO company start out? In which year were the first LEGO sets released and how many sets did the company sell when it first launched?
Which LEGO theme has the most sets? Is it one of LEGO's own themes like Ninjago or a theme they licensed liked Harry Potter or Marvel Superheroes?
When did the LEGO company really expand its product offering? Can we spot a change in the company strategy based on how many themes and sets did it released year-on-year?
Did LEGO sets grow in size and complexity over time? Do older LEGO sets tend to have more or fewer parts than newer sets?
Data Source
Rebrickable has compiled data on all the LEGO pieces in existence. I recommend you use download the .csv files provided in this lesson.
'''
colors = pd.read_csv('data/colors.csv')
print(colors.head())
# using dot notation
print(colors.name.nunique())
colors.groupby('is_trans').count()
colors.is_trans.value_counts()
sets = pd.read_csv('data/sets.csv')
print(sets.head())
print(sets.tail())
sets.sort_values('year').head()
sets[sets.year == 1949]
sets.sort_values('num_parts', ascending=False).head()
sets_by_year = sets.groupby('year').count()
sets_by_year['set_num'].head()
sets_by_year['set_num'].tail()
#Using Matplotlib
# resizing chart
plt.figure(figsize=(16,10))
#resizing fonts
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# add labels axis
plt.xlabel('Year', fontsize=14)
plt.ylabel('Sets', fontsize=14)
# add limit to y axis
#plt.ylim(0, 800)
# plot the chart sets
#plt.plot(sets_by_year.index, sets_by_year.set_num)
plt.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2])
plt.show()
themes_by_year = sets.groupby('year').agg({'theme_id':pd.Series.nunique})
themes_by_year.rename(columns={'theme_id':'nr_themes'}, inplace=True)
themes_by_year.head()
themes_by_year.tail()
#Using Matplotlib
# resizing chart
plt.figure(figsize=(16,10))
#resizing fonts
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# add labels axis
plt.xlabel('Year', fontsize=14)
plt.ylabel('Themes', fontsize=14)
# add limit to y axis
#plt.ylim(0, 800)
# plot the chart sets
#plt.plot(sets_by_year.index, sets_by_year.set_num)
plt.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2])
plt.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2])
plt.show()
# Two Separate Axes
ax1 = plt.gca() # get current axes
ax2 = ax1.twinx()
ax1.set_xlabel('Year')
ax1.set_ylabel('Number of sets', color='green')
ax2.set_ylabel('Number of themes', color='blue')
ax1.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2], 'g')
ax2.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2], 'b')
plt.show()
avg_parts_set = sets.groupby('year').agg({'num_parts': pd.Series.nunique})
avg_parts_set.rename(columns={'num_parts':'average'}, inplace=True)
avg_parts_set.head()
avg_parts_set.tail()
plt.scatter(avg_parts_set.index[:-2], avg_parts_set.average[:-2])
set_theme_count = sets['theme_id'].value_counts()
print(set_theme_count[:5])
themes =
|
pd.read_csv('data/themes.csv')
|
pandas.read_csv
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import click
import logging
from dotenv import find_dotenv, load_dotenv
import pandas as pd
import numpy as np
import re
from sklearn.externals import joblib
# This program reads in both train and test data set
# and creates a dataset dictionary
# of cleaned and sanitized data.
# result format:
# {
# 'train': <pandas.DataFrame>
# 'test': <pandas.DataFrame>
# }
# extracts title from a name, i.e.
# extract_title('Caldwell, Mr. <NAME>') = 'Mr.'
def extract_title(name):
m = re.search('[^,]+, ([^\.]+)\..*', name)
return m.group(1)
@click.command()
@click.argument('train_filepath', type=click.Path(exists=True))
@click.argument('test_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(train_filepath, test_filepath, output_filepath):
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
df_train = pd.read_csv(train_filepath, dtype={'Age': np.float64})
df_test = pd.read_csv(test_filepath, dtype={'Age': np.float64})
# combine into one data frame to process at once
df = pd.concat([df_train, df_test])
# keep information which age entries were NaN (helpful for some learners
# think logistic regression vs decision trees)
df['Age_nan'] =
|
pd.isnull(df['Age'])
|
pandas.isnull
|
##? not sure what this is ...
from numpy.core.numeric import True_
import pandas as pd
import numpy as np
## this function gives detailed info on NaN values of input df
from data_clean import perc_null
#these functionas add a date column (x2) and correct mp season format
from data_fix_dates import game_add_mp_date, bet_add_mp_date, fix_mp_season
#these functions assign nhl_names eg 'NYR' to bet, mp, and game;
# functions use simple dictionaries
from data_fix_team_names import bet_to_nhl, mp_to_nhl, game_to_nhl
##these are two different functions for assigning game_id to df_betting, based on team, date, H/A
##one uses df_game as look up table ... other uses df_mp_teams as look up table
from data_bet_add_game_id import mp_to_bet_add_game_id_no_VH
##Stage 1. Import all the files
##file paths
path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/"
Kaggle_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Kaggle_Data_Ellis/"
mp_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Money_Puck_Data/"
betting_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Betting_Data/"
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Betting_Data/nhl odds 2007-08.xlsx
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Kaggle_Data_Ellis/
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Money_Puck_Data/
##Kaggle files
df_game = pd.read_csv(Kaggle_path+'game.csv')
df_game_team_stats = pd.read_csv(Kaggle_path+'game_teams_stats.csv')
df_game_skater_stats = pd.read_csv(Kaggle_path+'game_skater_stats.csv')
df_game_goalie_stats =
|
pd.read_csv(Kaggle_path+'game_goalie_stats.csv')
|
pandas.read_csv
|
import json
import csv
import pandas
import logging
import logging.handlers
handler = logging.handlers.RotatingFileHandler(
filename="./output/project.log",
maxBytes=1000000
)
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
log = logging.getLogger("project.error")
log.setLevel("INFO")
log.addHandler(handler)
class Index:
def __init__(self):
with open("./sp.json", "r") as configs:
self.config = json.load(configs)
self.data = {}
self.load_data()
def load_data(self):
for dataset in self.config["data"]["dataframe"]:
if dataset["type"] == "csv":
self.data[dataset["name"]] =
|
pandas.read_csv(dataset["path"])
|
pandas.read_csv
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# %% [markdown]
# # Comparative Linguistic Analysis of bioRxiv and PMC
# %%
# %load_ext autoreload
# %autoreload 2
from collections import defaultdict, Counter
import csv
import itertools
from pathlib import Path
import numpy as np
import pandas as pd
import pickle
import spacy
from scipy.stats import chi2_contingency
from tqdm import tqdm_notebook
from annorxiver_modules.corpora_comparison_helper import (
aggregate_word_counts,
dump_to_dataframe,
get_term_statistics,
KL_divergence,
)
# %% [markdown]
# # Full Text Comparison (Global)
# %% [markdown]
# ## Gather Word Frequencies
# %%
biorxiv_count_path = Path("output/total_word_counts/biorxiv_total_count.tsv")
pmc_count_path = Path("output/total_word_counts/pmc_total_count.tsv")
nytac_count_path = Path("output/total_word_counts/nytac_total_count.tsv")
# %%
if not biorxiv_count_path.exists():
biorxiv_corpus_count = aggregate_word_counts(
list(Path("output/biorxiv_word_counts").rglob("*tsv"))
)
dump_to_dataframe(biorxiv_corpus_count, "output/biorxiv_total_count.tsv")
biorxiv_corpus_count.most_common(10)
# %%
if not pmc_count_path.exists():
pmc_corpus_count = aggregate_word_counts(
list(Path("../../pmc/pmc_corpus/pmc_word_counts").rglob("*tsv"))
)
dump_to_dataframe(pmc_corpus_count, "output/pmc_total_count.tsv")
pmc_corpus_count.most_common(10)
# %%
if not nytac_count_path.exists():
nytac_corpus_count = aggregate_word_counts(
list(Path("../../nytac/corpora_stats/output").rglob("*tsv"))
)
dump_to_dataframe(nytac_corpus_count, "output/nytac_total_count.tsv")
nytac_corpus_count.most_common(10)
# %%
biorxiv_total_count_df = pd.read_csv(biorxiv_count_path.resolve(), sep="\t")
pmc_total_count_df = pd.read_csv(pmc_count_path.resolve(), sep="\t")
nytac_total_count_df = pd.read_csv(nytac_count_path.resolve(), sep="\t")
# %%
biorxiv_sentence_length = pickle.load(open("output/biorxiv_sentence_length.pkl", "rb"))
pmc_sentence_length = pickle.load(
open("../../pmc/pmc_corpus/pmc_sentence_length.pkl", "rb")
)
nytac_sentence_length = pickle.load(
open("../../nytac/corpora_stats/nytac_sentence_length.pkl", "rb")
)
# %%
spacy_nlp = spacy.load("en_core_web_sm")
stop_word_list = list(spacy_nlp.Defaults.stop_words)
# %% [markdown]
# ## Get Corpora Comparison Stats
# %%
biorxiv_sentence_len_list = list(biorxiv_sentence_length.items())
biorxiv_data = {
"document_count": len(biorxiv_sentence_length),
"sentence_count": sum(map(lambda x: len(x[1]), biorxiv_sentence_len_list)),
"token_count": biorxiv_total_count_df["count"].sum(),
"stop_word_count": (
biorxiv_total_count_df.query(f"lemma in {stop_word_list}")["count"].sum()
),
"avg_document_length": np.mean(
list(map(lambda x: len(x[1]), biorxiv_sentence_len_list))
),
"avg_sentence_length": np.mean(
list(itertools.chain(*list(map(lambda x: x[1], biorxiv_sentence_len_list))))
),
"negatives": (biorxiv_total_count_df.query(f"dep_tag =='neg'")["count"].sum()),
"coordinating_conjunctions": (
biorxiv_total_count_df.query(f"dep_tag =='cc'")["count"].sum()
),
"coordinating_conjunctions%": (
biorxiv_total_count_df.query(f"dep_tag =='cc'")["count"].sum()
)
/ biorxiv_total_count_df["count"].sum(),
"pronouns": (biorxiv_total_count_df.query(f"pos_tag =='PRON'")["count"].sum()),
"pronouns%": (biorxiv_total_count_df.query(f"pos_tag =='PRON'")["count"].sum())
/ biorxiv_total_count_df["count"].sum(),
"passives": (
biorxiv_total_count_df.query(
f"dep_tag in ['auxpass', 'nsubjpass', 'csubjpass']"
)["count"].sum()
),
"passive%": (
biorxiv_total_count_df.query(
f"dep_tag in ['auxpass', 'nsubjpass', 'csubjpass']"
)["count"].sum()
)
/ biorxiv_total_count_df["count"].sum(),
}
# %%
pmc_sentence_len_list = list(pmc_sentence_length.items())
pmc_data = {
"document_count": len(pmc_sentence_length),
"sentence_count": sum(map(lambda x: len(x[1]), pmc_sentence_len_list)),
"token_count": pmc_total_count_df["count"].sum(),
"stop_word_count": (
pmc_total_count_df.query(f"lemma in {stop_word_list}")["count"].sum()
),
"avg_document_length": np.mean(
list(map(lambda x: len(x[1]), pmc_sentence_len_list))
),
"avg_sentence_length": np.mean(
list(itertools.chain(*list(map(lambda x: x[1], pmc_sentence_len_list))))
),
"negatives": (pmc_total_count_df.query(f"dep_tag =='neg'")["count"].sum()),
"coordinating_conjunctions": (
pmc_total_count_df.query(f"dep_tag =='cc'")["count"].sum()
),
"coordinating_conjunctions%": (
pmc_total_count_df.query(f"dep_tag =='cc'")["count"].sum()
)
/ pmc_total_count_df["count"].sum(),
"pronouns": (pmc_total_count_df.query(f"pos_tag =='PRON'")["count"].sum()),
"pronouns%": (pmc_total_count_df.query(f"pos_tag =='PRON'")["count"].sum())
/ pmc_total_count_df["count"].sum(),
"passives": (
pmc_total_count_df.query(f"dep_tag in ['auxpass', 'nsubjpass', 'csubjpass']")[
"count"
].sum()
),
"passive%": (
pmc_total_count_df.query(f"dep_tag in ['auxpass', 'nsubjpass', 'csubjpass']")[
"count"
].sum()
)
/ pmc_total_count_df["count"].sum(),
}
# %%
nytac_sentence_len_list = list(nytac_sentence_length.items())
nytac_data = {
"document_count": len(nytac_sentence_length),
"sentence_count": sum(map(lambda x: len(x[1]), nytac_sentence_len_list)),
"token_count": nytac_total_count_df["count"].sum(),
"stop_word_count": (
nytac_total_count_df.query(f"lemma in {stop_word_list}")["count"].sum()
),
"avg_document_length": np.mean(
list(map(lambda x: len(x[1]), nytac_sentence_len_list))
),
"avg_sentence_length": np.mean(
list(itertools.chain(*list(map(lambda x: x[1], nytac_sentence_len_list))))
),
"negatives": (nytac_total_count_df.query(f"dep_tag =='neg'")["count"].sum()),
"coordinating_conjunctions": (
nytac_total_count_df.query(f"dep_tag =='cc'")["count"].sum()
),
"coordinating_conjunctions%": (
nytac_total_count_df.query(f"dep_tag =='cc'")["count"].sum()
)
/ nytac_total_count_df["count"].sum(),
"pronouns": (nytac_total_count_df.query(f"pos_tag =='PRON'")["count"].sum()),
"pronouns%": (nytac_total_count_df.query(f"pos_tag =='PRON'")["count"].sum())
/ nytac_total_count_df["count"].sum(),
"passives": (
nytac_total_count_df.query(f"dep_tag in ['auxpass', 'nsubjpass', 'csubjpass']")[
"count"
].sum()
),
"passive%": (
nytac_total_count_df.query(f"dep_tag in ['auxpass', 'nsubjpass', 'csubjpass']")[
"count"
].sum()
)
/ nytac_total_count_df["count"].sum(),
}
# %%
# This dataframe contains document statistics for each Corpus
# document count - the number of documents within the corpus
# Sentence count - the number of sentences within the corpus
# Token count - the number of tokens within the corpus
# Stop word counts - the number of stop words within the corpus
# Average document length - the average number of sentences within a document for a given corpus
# Average sentence length - the average number of words within a sentence for a given corpus
# Negatives - the number of negations (e.g. placing not in within a sentence) within a given corpus
# Coordinating Conjunctions - the number of coordinating conjunctions (and, but, for etc.) within a given corpus
# Pronouns - the number of pronouns within a given corpus
# Passive - the number of passive words within a given corpus
token_stats_df = pd.DataFrame.from_records(
[biorxiv_data, pmc_data, nytac_data], index=["bioRxiv", "PMC", "NYTAC"]
).T
token_stats_df.to_csv("output/figures/corpora_token_stats.tsv", sep="\t")
token_stats_df
# %% [markdown]
# ## LogLikelihood + Odds Ratio + KL Divergence Calculations
# %% [markdown]
# The goal here is to compare word frequencies between bioRxiv and pubmed central. The problem when comparing word frequencies is that non-meaningful words (aka stopwords) such as the, of, and, be, etc., appear the most often. To account for this problem the first step here is to remove those words from analyses.
# %% [markdown]
# ### Remove Stop words
# %%
biorxiv_total_count_df = (
biorxiv_total_count_df.query(f"lemma not in {stop_word_list}")
.groupby("lemma")
.agg({"count": "sum"})
.reset_index()
.sort_values("count", ascending=False)
)
biorxiv_total_count_df
# %%
pmc_total_count_df = (
pmc_total_count_df.query(f"lemma not in {stop_word_list}")
.groupby("lemma")
.agg({"count": "sum"})
.reset_index()
.sort_values("count", ascending=False)
.iloc[2:]
)
pmc_total_count_df
# %%
nytac_total_count_df = (
nytac_total_count_df.query(f"lemma not in {stop_word_list}")
.groupby("lemma")
.agg({"count": "sum"})
.reset_index()
.sort_values("count", ascending=False)
)
nytac_total_count_df
# %% [markdown]
# ### Calculate LogLikelihoods and Odds ratios
# %%
biorxiv_vs_pmc = get_term_statistics(biorxiv_total_count_df, pmc_total_count_df, 100)
biorxiv_vs_pmc.to_csv(
"output/comparison_stats/biorxiv_vs_pmc_comparison.tsv", sep="\t", index=False
)
biorxiv_vs_pmc
# %%
biorxiv_vs_nytac = get_term_statistics(
biorxiv_total_count_df, nytac_total_count_df, 100
)
biorxiv_vs_nytac.to_csv(
"output/comparison_stats/biorxiv_nytac_comparison.tsv", sep="\t", index=False
)
biorxiv_vs_nytac
# %%
pmc_vs_nytac = get_term_statistics(pmc_total_count_df, nytac_total_count_df, 100)
pmc_vs_nytac.to_csv(
"output/comparison_stats/pmc_nytac_comparison.tsv", sep="\t", index=False
)
pmc_vs_nytac
# %% [markdown]
# ## Calculate KL Divergence
# %%
term_grid = [100, 200, 300, 400, 500, 1000, 1500, 2000, 3000, 5000]
kl_data = []
for num_terms in tqdm_notebook(term_grid):
kl_data.append(
{
"num_terms": num_terms,
"KL_divergence": KL_divergence(
biorxiv_total_count_df, pmc_total_count_df, num_terms=num_terms
),
"comparison": "biorxiv_vs_pmc",
}
)
kl_data.append(
{
"num_terms": num_terms,
"KL_divergence": KL_divergence(
biorxiv_total_count_df, nytac_total_count_df, num_terms=num_terms
),
"comparison": "biorxiv_vs_nytac",
}
)
kl_data.append(
{
"num_terms": num_terms,
"KL_divergence": KL_divergence(
pmc_total_count_df, nytac_total_count_df, num_terms=num_terms
),
"comparison": "pmc_vs_nytac",
}
)
# %%
kl_metrics = pd.DataFrame.from_records(kl_data)
kl_metrics.to_csv(
"output/comparison_stats/corpora_kl_divergence.tsv", sep="\t", index=False
)
kl_metrics
# %% [markdown]
# # Preprint to Published View
# %%
mapped_doi_df = (
|
pd.read_csv("../journal_tracker/output/mapped_published_doi.tsv", sep="\t")
|
pandas.read_csv
|
from datetime import datetime
from unittest.mock import mock_open, patch
import numpy as np
import pandas as pd
import pytest
from pgcom import exc
from .conftest import commuter, delete_table, with_table
def create_test_table(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
var_1 timestamp,
var_2 integer NOT NULL PRIMARY KEY,
var_3 text,
var_4 real,
var_5 integer);
"""
def create_test_table_serial(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
id SERIAL PRIMARY KEY,
var_1 timestamp,
var_2 integer NOT NULL,
var_3 text,
var_4 real);
"""
def create_child_table(child_name, parent_name):
return f"""
CREATE TABLE IF NOT EXISTS {child_name} (
var_1 integer,
var_2 integer,
var_3 integer,
FOREIGN KEY (var_1) REFERENCES {parent_name}(var_2));
"""
def create_category_table(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
category_id SERIAL PRIMARY KEY,
category TEXT);
"""
def create_test_table_with_categories(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
var_1 integer NOT NULL PRIMARY KEY,
var_2 text,
var_3 text,
var_4 text);
"""
def create_composite_category_table(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
category_id SERIAL PRIMARY KEY,
category_1 TEXT,
category_2 TEXT,
category_3 TEXT);
"""
def create_test_data():
return pd.DataFrame(
{
"var_1": pd.date_range(datetime.now(), periods=3),
"var_2": [1, 2, 3],
"var_3": ["x", "xx", "xxx"],
"var_4": [1.1, 2.2, 3.3],
"var_5": [1, 2, 3],
}
)
def test_repr():
assert repr(commuter)[0] == "("
assert repr(commuter)[-1] == ")"
@with_table("test_table", create_test_table)
def test_execute():
assert commuter.is_table_exist("test_table")
with pytest.raises(exc.QueryExecutionError) as e:
commuter.execute("SELECT 1 FROM fake_table")
assert e.type == exc.QueryExecutionError
@with_table("test_table", create_test_table)
def test_execute_script():
assert commuter.is_table_exist("test_table")
with patch("builtins.open", mock_open(read_data="DROP TABLE test_table")):
commuter.execute_script("path/to/open")
assert not commuter.is_table_exist("test_table")
@with_table("test_table", create_test_table)
def test_select_insert():
commuter.insert("test_table", create_test_data())
df = commuter.select("SELECT * FROM test_table")
df["date"] = pd.to_datetime(df["var_1"])
assert df["date"][0].date() == datetime.now().date()
assert len(df) == 3
@with_table("test_table", create_test_table)
def test_multiple_select():
commuter.insert("test_table", create_test_data())
n_conn = commuter.get_connections_count()
for i in range(300):
df = commuter.select("SELECT * FROM test_table")
assert len(df) == 3
assert commuter.get_connections_count() - n_conn < 10
def test_insert():
with pytest.raises(exc.QueryExecutionError) as e:
commuter.insert("fake_table", create_test_data())
assert e.type == exc.QueryExecutionError
@with_table("test_table", create_test_table)
def test_select_one():
cmd = "SELECT MAX(var_2) FROM test_table"
value = commuter.select_one(cmd=cmd, default=0)
assert value == 0
commuter.copy_from("test_table", create_test_data())
value = commuter.select_one("SELECT MAX(var_2) FROM test_table")
assert value == 3
cmd = "SELECT MAX(var_2) FROM test_table WHERE var_2 > 10"
value = commuter.select_one(cmd=cmd, default=-1)
assert value == -1
value = commuter.select_one("DROP TABLE test_table", default=1)
assert value == 1
@with_table("test_table", create_test_table)
def test_table_exist():
assert commuter.is_table_exist("test_table")
delete_table(table_name="test_table")
assert not commuter.is_table_exist("test_table")
@with_table("test_table", create_test_table)
def test_copy_from():
commuter.copy_from("test_table", create_test_data())
df = commuter.select("SELECT * FROM test_table")
df["date"] = pd.to_datetime(df["var_1"])
assert df["date"][0].date() == datetime.now().date()
assert len(df) == 3
with pytest.raises(exc.CopyError) as e:
commuter.copy_from("fake_table", create_test_data())
assert e.type == exc.CopyError
@with_table("model.test_table", create_test_table)
def test_copy_from_schema():
assert commuter.is_table_exist("model.test_table")
df = create_test_data()
df["var_2"] = [1, 2, 3.01]
df["new_var_1"] = 1
df.insert(loc=0, column="new_var_2", value=[3, 2, 1])
assert df.shape == (3, 7)
commuter.copy_from("model.test_table", df, format_data=True)
data = commuter.select("SELECT * FROM model.test_table")
data["date"] = pd.to_datetime(data["var_1"])
assert data["date"][0].date() == datetime.now().date()
assert len(data) == 3
commuter.copy_from(
"model.test_table", df, format_data=True, where="var_2 in (1,2,3)"
)
assert data["date"][0].date() == datetime.now().date()
assert len(data) == 3
@with_table("model.test_table", create_test_table_serial)
def test_copy_from_incomplete_data():
df = pd.DataFrame({"var_2": [1, 2, 3], "var_3": ["x", "y", "z"]})
commuter.copy_from("model.test_table", df, format_data=True)
assert commuter.select_one("SELECT COUNT(*) FROM model.test_table") == 3
@with_table("test_table", create_test_table)
def test_format_data():
df = create_test_data()
df["var_5"] = [np.nan, np.nan, 1]
commuter.copy_from("test_table", df, format_data=True)
assert commuter.select_one("SELECT COUNT(*) FROM test_table") == 3
@with_table("test_table", create_test_table)
def test_format_text_columns():
df = create_test_data()
df["var_3"] = ["abc", "abc.abc", "abc,abc"]
commuter.copy_from("test_table", df, format_data=True)
df = commuter.select("SELECT * FROM test_table")
assert df["var_3"].to_list() == ["abc", "abc.abc", "abcabc"]
commuter.execute("DELETE FROM test_table WHERE 1=1")
df["var_3"] = [np.nan, np.nan, np.nan]
commuter.copy_from("test_table", df, format_data=True)
df = commuter.select("SELECT * FROM test_table")
assert df["var_3"].to_list() == [None, None, None]
def test_execute_with_params():
delete_table(table_name="people")
who = "Yeltsin"
age = 72
cmd = "CREATE TABLE IF NOT EXISTS people(name text, age integer)"
commuter.execute(cmd=cmd)
commuter.execute(cmd="INSERT INTO people VALUES (%s, %s)", values=(who, age))
df = commuter.select("SELECT * FROM people")
assert df["age"][0] == 72
assert len(df) == 1
delete_table(table_name="people")
@with_table("model.test_table", create_test_table)
def test_resolve_primary_conflicts():
data = create_test_data()
commuter.copy_from("model.test_table", data)
df = commuter.resolve_primary_conflicts(
"model.test_table", data, where="var_2 in (1,2,3)"
)
assert df.empty
df = commuter.resolve_primary_conflicts(
"model.test_table", data, where=f"var_1 > '{datetime(2020,1,1)}'"
)
assert df.empty
_data = data.copy()
_data["var_2"] = [-1, 2, -3]
df = commuter.resolve_primary_conflicts(
"model.test_table", _data, where=f"var_1 > '{datetime(2020,1,1)}'"
)
assert len(df) == 2
@with_table("model.test_table", create_test_table)
@with_table("child_table", create_child_table, "model.test_table")
def test_resolve_foreign_conflicts():
parent_data = create_test_data()
child_data = pd.DataFrame(
{"var_1": [1, 1, 3, 4, 5], "var_2": [1] * 5, "var_3": ["x"] * 5}
)
commuter.copy_from("model.test_table", parent_data)
df = commuter.resolve_foreign_conflicts(
table_name="child_table",
parent_name="model.test_table",
data=child_data,
where="var_2=1",
)
assert len(df) == 2
@with_table("model.test_table", create_test_table)
def test_insert_row():
commuter.insert_row(
table_name="model.test_table",
var_1=datetime(2019, 12, 9),
var_2=7,
var_3="test",
)
df = commuter.select("SELECT * FROM model.test_table")
assert len(df) == 1
assert df["var_1"][0] == datetime(2019, 12, 9)
@with_table("test_table", create_test_table)
def test_insert_numpy_types():
commuter.insert_row(
table_name="test_table",
var_1=datetime(2019, 12, 9),
var_2=np.int64(7),
var_3="test",
var_4=np.float64(7.1),
)
df = commuter.select("SELECT * FROM test_table")
assert df["var_2"][0] == 7
assert df["var_4"][0] == 7.1
@with_table("test_table", create_test_table)
def test_insert_string_with_quotes():
commuter.insert_row("test_table", var_2=1, var_3="test 'message'")
msg = commuter.select_one("SELECT var_3 FROM test_table")
assert msg == "test 'message'"
@with_table("model.test_table", create_test_table_serial)
def test_insert_row_return():
row_id = commuter.insert_row(
table_name="model.test_table",
return_id="id",
var_1=datetime(2019, 12, 9),
var_2=7,
var_3="test",
)
assert row_id == 1
df = commuter.select("SELECT * FROM model.test_table")
assert len(df) == 1
assert df["var_1"][0] == datetime(2019, 12, 9)
cmd = """
INSERT INTO model.test_table (var_1, var_2, var_3)
VALUES (%s, %s, %s)
"""
row_id = commuter.insert_return(
cmd=cmd, values=(datetime(2019, 12, 9), 8, "test"), return_id="id"
)
assert row_id == 2
with pytest.raises(exc.QueryExecutionError) as e:
_ = commuter.insert_return(
cmd="INSERT INTO model.test_table VALUES (%s,%s)",
values=(1, 1),
return_id="id",
)
assert e.type == exc.QueryExecutionError
sid = commuter.insert_return("DROP TABLE model.test_table")
assert sid == 0
@with_table("test_table", create_test_table)
def test_entry_exist():
commuter.insert("test_table", data=create_test_data())
exist = commuter.is_entry_exist("test_table", var_2=1, var_3="x")
assert exist
exist = commuter.is_entry_exist("test_table", var_2=1, var_3="xxx")
assert not exist
@with_table("test_table", create_test_table)
def test_delete_entry():
commuter.insert("test_table", data=create_test_data())
exist = commuter.is_entry_exist("test_table", var_2=1, var_3="x")
assert exist
commuter.delete_entry("test_table", var_2=1, var_3="x")
exist = commuter.is_entry_exist("test_table", var_2=1, var_3="x")
assert not exist
@with_table("model.test_table", create_test_table)
@with_table("model.category_table", create_category_table)
def test_encode_category():
data =
|
pd.DataFrame({"var_2": [1, 2, 3], "var_3": ["xxx", "x", "xxx"]})
|
pandas.DataFrame
|
import pandas as pd
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import random
from sklearn import svm
from keras.optimizers import Adam
from keras.layers import LeakyReLU
from nltk.stem import WordNetLemmatizer
import operator
from textblob import TextBlob
from nltk.tokenize import sent_tokenize, word_tokenize
import nltk
import re
from wordcloud import WordCloud
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.models import Model
from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.callbacks import EarlyStopping
class MBTI():
def __init__(self):
self.csv_path = "mbti_1.csv"
self.df = pd.read_csv(self.csv_path)
self.original_df = self.df.copy()
self.porter = PorterStemmer()
self.lancaster = LancasterStemmer()
self.lemmatizer = WordNetLemmatizer()
self.all_words = {}
def store_clean_df(self):
self.df.to_csv('clean.csv')
def load_clean_df(self):
self.df = pd.read_csv('clean.csv')
def transform_df(self):
# Transform the df into four different df - one for each subproblem (IE,JP,NS,TF)
transformed_df = self.df.copy()
transformed_df['posts'] = transformed_df['posts'].apply(lambda x: x.replace('|||', ''))
transformed_df['posts'] = transformed_df['posts'].apply(lambda x: ''.join([i for i in x if not i.isdigit()]))
counter = 0
print(transformed_df.size)
transformed_df['posts'] = transformed_df.apply(lambda row: nltk.word_tokenize(row['posts']), axis=1)
for row_posts in transformed_df['posts'].tolist():
print(counter)
print(row_posts)
counter+=1
for feature in row_posts:
try:
self.all_words[feature] += 1
except:
self.all_words[feature] = 0
print('Features found')
self.all_words = dict(sorted(self.all_words.items(), key=operator.itemgetter(1), reverse=True))
keys = list(self.all_words.keys())[:5000]
exists = {}
counter = 0
for word in keys:
counter +=1
print(counter)
exists[word] = []
for row_posts in transformed_df['posts'].tolist():
features = row_posts
exists[word].append(features.count(word))
for word in exists:
transformed_df[word]= exists[word]
del transformed_df['type']
del transformed_df['posts']
IE_df = transformed_df.copy()
del IE_df['JP']
del IE_df['TF']
del IE_df['NS']
del IE_df['Unnamed: 0']
JP_df = transformed_df.copy()
del JP_df['IE']
del JP_df['TF']
del JP_df['NS']
del JP_df['Unnamed: 0']
TF_df = transformed_df.copy()
del TF_df['JP']
del TF_df['IE']
del TF_df['NS']
del TF_df['Unnamed: 0']
NS_df = transformed_df.copy()
del NS_df['JP']
del NS_df['IE']
del NS_df['TF']
del NS_df['Unnamed: 0']
print('Finished')
return IE_df, JP_df, TF_df, NS_df
def post_cleaner(self, post):
post = post.lower()
post = re.sub(
r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''',
'', post, flags=re.MULTILINE)
puncs1 = ['@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '_', '+', '=', '{', '}', '[', ']', '\\', '"',
"'", ';', ':', '<', '>', '/']
for punc in puncs1:
post = post.replace(punc, '')
puncs2 = [',', '.', '?', '!', '\n']
for punc in puncs2:
post = post.replace(punc, ' ')
post = re.sub('\s+', ' ', post).strip()
return post
def perform_eda(self):
# ++++++ Print information and description of the data
#print("+++++++++++ self.df.info:")
print(self.df.info())
types = self.df.type.tolist()
pd.Series(types).value_counts().plot(kind="bar")
plt.savefig("plot1.png")
def stemSentence(self, sentence):
token_words = word_tokenize(sentence)
stem_sentence = []
for word in token_words:
stem_sentence.append(self.lemmatizer.lemmatize(word))
stem_sentence.append(" ")
return "".join(stem_sentence)
def prepare_df(self):
posts = self.df.posts.tolist()
#clean
posts = [self.post_cleaner(post) for post in posts]
#lemmatize
posts = [self.stemSentence(post) for post in posts]
self.df['posts'] = posts
#print(self.df.head(1))
# Create 4 more columns for binary classification - LABEL ENCODING, ONE-HOT ENCODING
map1 = {"I": 0, "E": 1}
map2 = {"N": 0, "S": 1}
map3 = {"T": 0, "F": 1}
map4 = {"J": 0, "P": 1}
self.df['IE'] = self.df['type'].astype(str).str[0]
self.df['IE'] = self.df['IE'].map(map1)
self.df['NS'] = self.df['type'].astype(str).str[1]
self.df['NS'] = self.df['NS'].map(map2)
self.df['TF'] = self.df['type'].astype(str).str[2]
self.df['TF'] = self.df['TF'].map(map3)
self.df['JP'] = self.df['type'].astype(str).str[3]
self.df['JP'] = self.df['JP'].map(map4)
def add_features(self):
# Add new features, such as words per comment, links per comment, images per comment...
self.df['ellipsis_per_comment'] = self.df['posts'].apply(lambda x: x.count('...') / (x.count("|||") + 1))
self.df['words_per_comment'] = self.df['posts'].apply(lambda x: x.count(' ') / (x.count("|||") + 1))
self.df['words'] = self.df['posts'].apply(lambda x: x.count(' '))
self.df['link_per_comment'] = self.df['posts'].apply(lambda x: x.count('http') / (x.count("|||") + 1))
self.df['smiles_per_comment'] = self.df['posts'].apply(lambda x: (x.count(':-)') + x.count(':)') + x.count(':-D') + x.count(':D')) / (x.count("|||") + 1))
self.df['sad'] = self.df['posts'].apply(lambda x: (x.count(':(') + x.count('):') ) / (x.count("|||") + 1))
self.df['heart'] = self.df['posts'].apply(lambda x: x.count('<3') / (x.count("|||") + 1))
self.df['smiling'] = self.df['posts'].apply(lambda x: x.count(';)') / (x.count("|||") + 1))
self.df['exclamation_mark_per_comment'] = self.df['posts'].apply(lambda x: x.count("!") / (x.count("|||") + 1))
self.df['question_mark_per_comment'] = self.df['posts'].apply(lambda x: x.count("?") / (x.count("|||") + 1))
self.df['polarity'] = self.df['posts'].apply(lambda x: TextBlob(x).sentiment.polarity)
def plot(self):
# Plot each category to see if it is balanced - We observe that IE and NS are fairly imbalanced.
binary1 = self.df.IE.tolist()
pd.Series(binary1).value_counts().plot(kind="bar", title="0=I, 1=E")
# plt.show()
plt.savefig("IE.png")
binary1 = self.df.NS.tolist()
|
pd.Series(binary1)
|
pandas.Series
|
import os
import sys
import zipfile
import requests
import shutil
from shapely.geometry.polygon import Polygon
from shapely.ops import transform
import pyproj
from osgeo import gdal
from pandas import DataFrame
import pandas as pd
import numpy as np
from osgeo import gdal
import pickle
from tqdm import tqdm
import click
from threading import Thread
from multiprocessing.dummy import Pool as ThreadPool
import json
import subprocess
# pixel area only depends on latitude (not longitude)
# we re-project WGS84 to cylindrical equal area
def pixel_area(pix_deg):
project = lambda x, y: pyproj.transform(pyproj.Proj(init='epsg:4326'), pyproj.Proj(proj='cea'), x, y)
offset = pix_deg / 2
lts = np.arange(90-offset, -90, -pix_deg)
area = np.empty_like(lts)
lon = 0
for y, lat in enumerate(lts):
pixel1 = Polygon([(lon - offset, lat + offset), (lon + offset, lat + offset), (lon + offset, lat - offset), (lon - offset, lat - offset)])
pixel2 = transform(project, pixel1)
area[y] = pixel2.area
return area
def get_flow_dir(row):
if not os.path.exists(f'tiles/dir/3s/{row.tile}'):
tqdm.write(f'Downloading {row.tile}...')
r = requests.get(row.url + row.tile)
with open(f'tiles/dir/3s/{row.tile}', 'wb') as f:
f.write(r.content)
try:
with zipfile.ZipFile(f'tiles/dir/3s/{row.tile}', 'r') as z:
z.extractall(path = 'tmp/')
flow_dir = gdal.Open(f'tmp/{row.tile[:-9]}/{row.tile[:-9]}/w001001.adf')
geo = flow_dir.GetGeoTransform()
ySize, xSize = flow_dir.RasterYSize, flow_dir.RasterXSize
flow_dir = flow_dir.ReadAsArray()
shutil.rmtree(f'tmp/{row.tile[:-9]}')
# data is padded into a 6000x6000 array (some tiles may be smaller):
array_5x5 = np.ones((6000, 6000), dtype='uint8') * 255
y0 = int(round((geo[3] - row.lat) / geo[5]))
y1 = 6000 - int(round(((row.lat - 5) - (geo[3] + geo[5] * ySize)) / geo[5]))
x0 = int(round((geo[0] - row.lon) / geo[1]))
x1 = 6000 - int(round(((row.lon + 5) - (geo[0] + geo[1] * xSize)) / geo[1]))
array_5x5[y0:y1, x0:x1] = flow_dir
except:
tqdm.write('Not a ZIP file!')
array_5x5 = np.zeros((6000, 6000), dtype='uint8')
return array_5x5
def pass1(cpu, drop_pixel, pix_area, df):
for row in df.iterrows():
process_tile(cpu, drop_pixel, pix_area, row[1], df, True)
def pass2(parallel, drop_pixel, pix_area, df):
i = 0
acc_dict = {}
udlr_dict = {}
while not np.all(df.done):
row = df[df.done==False].iloc[0]
process_tile(0, drop_pixel, pix_area, row, df, False, acc_dict, udlr_dict)
i += 1
if (i % 100) == 0:
print('Compressing tiles...')
compress_tiles(parallel)
for k, v in acc_dict.items():
np.savez_compressed(f'tiles/acc/3s/{k}', a=v)
compress_tiles(parallel)
# create TIFF files
print('Creating TIFF files')
for row in tqdm(df.iterrows()):
name = row[1].tile.replace('_dir_grid.zip', '_acc')
a = np.load(f'tiles/acc/3s/{name}.npz')['a']
driver = gdal.GetDriverByName('GTiff')
ds = driver.Create(f'tiles/acc/3s/{name}.tif', a.shape[1], a.shape[0], 1, gdal.GDT_Float64, ['COMPRESS=LZW'])
ds.SetGeoTransform((row[1].lon, 5/6000, 0.0, row[1].lat, 0.0, -5/6000))
ds.SetProjection('GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433],AUTHORITY[\"EPSG\",\"4326\"]]')
band = ds.GetRasterBand(1)
band.WriteArray(a)
band.SetNoDataValue(0.)
ds = None
os.remove(f'tiles/acc/3s/{name}.npz')
print('Creating VRT (Virtual Dataset)')
subprocess.check_call('gdalbuildvrt acc.vrt tiles/acc/3s/*.tif', shell=True)
def compress_tiles(parallel):
paths = []
for path in ['tmp/udlr', 'tiles/acc/3s']:
for fname in os.listdir(path):
if fname.endswith('.npy'):
paths.append(f'{path}/{fname}')
pool = ThreadPool(parallel)
pool.map(compress_tile, paths)
def compress_tile(path):
a = np.load(path)
np.savez_compressed(path[:-4], a=a)
os.remove(path)
def process_tile(cpu, drop_pixel, pix_area, row, df, first_pass, acc_dict={}, udlr_dict={}):
lat, lon = row['lat'], row['lon']
flow_dir = get_flow_dir(row)
name = row['tile'][:-len('_dir_grid.zip')]
if first_pass:
udlr_in = np.zeros((4, 6000), dtype='float64')
else:
df.loc[(df.lat==lat) & (df.lon==lon), 'done'] = True
if f'udlr_{lat}_{lon}' in udlr_dict:
udlr_in = udlr_dict[f'udlr_{lat}_{lon}']
elif os.path.exists(f'tmp/udlr/udlr_{lat}_{lon}.npz'):
udlr_in = np.load(f'tmp/udlr/udlr_{lat}_{lon}.npz')['a']
elif os.path.exists(f'tmp/udlr/udlr_{lat}_{lon}.npy'):
udlr_in = np.load(f'tmp/udlr/udlr_{lat}_{lon}.npy')
else:
print(f'Nothing to do for {name}')
df.to_pickle('tmp/df.pkl')
return
if (not first_pass) and (f'{name}_acc' in acc_dict):
flow_acc = acc_dict[f'{name}_acc']
elif os.path.exists(f'tiles/acc/3s/{name}_acc.npz'):
flow_acc = np.load(f'tiles/acc/3s/{name}_acc.npz')['a']
elif os.path.exists(f'tiles/acc/3s/{name}_acc.npy'):
flow_acc = np.load(f'tiles/acc/3s/{name}_acc.npy')
else:
flow_acc = np.zeros((6000, 6000), dtype='float64')
udlr_out = np.zeros((4, 6000+2), dtype='float64')
do_inside = first_pass
tqdm.write(f'Processing {name} (inside: {do_inside})')
row_nb = 60
for row_i in tqdm(range(0, 6000, row_nb)):
pix_area2 = pix_area[6000*((90-lat)//5):]
drop_pixel(flow_dir, flow_acc, udlr_in, udlr_out, pix_area2, do_inside, row_i, row_nb)
if first_pass:
np.savez_compressed(f'tiles/acc/3s/{name}_acc', a=flow_acc)
else:
if (f'{name}_acc' in acc_dict) or (len(acc_dict) < 10):
acc_dict[f'{name}_acc'] = flow_acc
else:
first = list(acc_dict.keys())[0]
print(f'Saving {first}')
np.save(f'tiles/acc/3s/{first}', acc_dict[first])
del acc_dict[first]
acc_dict[f'{name}_acc'] = flow_acc
if f'udlr_{lat}_{lon}' in udlr_dict:
del udlr_dict[f'udlr_{lat}_{lon}']
if os.path.exists(f'tiles/acc/3s/{name}_acc.npz'):
os.remove(f'tiles/acc/3s/{name}_acc.npz')
if os.path.exists(f'tmp/udlr/udlr_{lat}_{lon}.npz'):
os.remove(f'tmp/udlr/udlr_{lat}_{lon}.npz')
if os.path.exists(f'tmp/udlr/udlr_{lat}_{lon}.npy'):
os.remove(f'tmp/udlr/udlr_{lat}_{lon}.npy')
var = [[5, 0, 1, 0, (0, 0), (1, -1), 5, -5], [-5, 0, 0, 1, (0, -1), (1, 0), 5, 5], [0, -5, 3, 2, (1, 0), (0, -1), -5, -5], [0, 5, 2, 3, (1, -1), (0, 0), -5, 5]]
for i in range(4):
# do the sides
if np.any(udlr_out[i][1:-1]):
lat2 = lat + var[i][0]
lon2 = lon + var[i][1]
if first_pass:
udlr_name = f'tmp/udlr{cpu}/udlr_{lat2}_{lon2}'
else:
df.loc[(df.lat==lat2) & (df.lon==lon2), 'done'] = False
udlr_name = f'tmp/udlr/udlr_{lat2}_{lon2}'
if (not first_pass) and (os.path.basename(udlr_name) in udlr_dict):
udlr = udlr_dict[os.path.basename(udlr_name)]
elif os.path.exists(f'{udlr_name}.npz'):
udlr = np.load(f'{udlr_name}.npz')['a']
elif os.path.exists(f'{udlr_name}.npy'):
udlr = np.load(f'{udlr_name}.npy')
else:
udlr = np.zeros((4, 6000), dtype='float64')
udlr[var[i][2]] += udlr_out[var[i][3]][1:-1]
if first_pass:
np.savez_compressed(udlr_name, a=udlr)
else:
if (os.path.basename(udlr_name) in udlr_dict) or (len(udlr_dict) < 10):
udlr_dict[os.path.basename(udlr_name)] = udlr
else:
first = list(udlr_dict.keys())[0]
print(f'Saving {first}')
np.save(f'tmp/udlr/{first}', udlr_dict[first])
del udlr_dict[first]
udlr_dict[os.path.basename(udlr_name)] = udlr
if os.path.exists(f'{udlr_name}.npz'):
os.remove(f'{udlr_name}.npz')
# do the corners
if udlr_out[var[i][4]] != 0:
lat2 = lat + var[i][6]
lon2 = lon + var[i][7]
if first_pass:
udlr_name = f'tmp/udlr{cpu}/udlr_{lat2}_{lon2}'
else:
df.loc[(df.lat==lat2) & (df.lon==lon2), 'done'] = False
udlr_name = f'tmp/udlr/udlr_{lat2}_{lon2}'
if (not first_pass) and (os.path.basename(udlr_name) in udlr_dict):
udlr = udlr_dict[os.path.basename(udlr_name)]
elif os.path.exists(f'{udlr_name}.npz'):
udlr = np.load(f'{udlr_name}.npz')['a']
elif os.path.exists(f'{udlr_name}.npy'):
udlr = np.load(f'{udlr_name}.npy')
else:
udlr = np.zeros((4, 6000), dtype='float64')
udlr[var[i][5]] += udlr_out[var[i][4]]
if first_pass:
np.savez_compressed(udlr_name, a=udlr)
else:
if (os.path.basename(udlr_name) in udlr_dict) or (len(udlr_dict) < 10):
udlr_dict[os.path.basename(udlr_name)] = udlr
else:
first = list(udlr_dict.keys())[0]
print(f'Saving {first}')
np.save(f'tmp/udlr/{first}', udlr_dict[first])
del udlr_dict[first]
udlr_dict[os.path.basename(udlr_name)] = udlr
if os.path.exists(f'{udlr_name}.npz'):
os.remove(f'{udlr_name}.npz')
if first_pass:
df.to_pickle(f'tmp/df{cpu}.pkl')
else:
df.to_pickle('tmp/df.pkl')
@click.command()
@click.option('-n', '--numba', is_flag=True, help='Use Numba as the computing backend (otherwise, use Cython).')
@click.option('-p1', '--parallel1', default=1, help='Number of CPU cores to use for first pass.')
@click.option('-p2', '--parallel2', default=1, help='Number of CPU cores to use for second pass.')
@click.option('-r', '--reset', is_flag=1, help="Start the processing from scratch (don't download tiles if already downloaded).")
def acc_flow(numba, parallel1, parallel2, reset):
if reset:
shutil.rmtree('tmp', ignore_errors=True)
shutil.rmtree('tiles/acc', ignore_errors=True)
if numba:
sys.path.append('numba')
print('Using Numba')
else:
sys.path.append('cython')
print('Using Cython')
from drop_pixel import drop_pixel
os.makedirs('tiles/dir/3s', exist_ok=True)
os.makedirs('tiles/acc/3s', exist_ok=True)
for cpu in range(parallel1):
os.makedirs(f'tmp/udlr{cpu}', exist_ok=True)
if os.path.exists('tmp/pix_area.npy'):
pix_area = np.load('tmp/pix_area.npy')
else:
pix_area = pixel_area(5 / 6000)
pix_area = pix_area / np.max(pix_area)
np.save('tmp/pix_area', pix_area)
if os.path.exists('tmp/df.pkl'):
df = pd.read_pickle(f'tmp/df.pkl')
else:
# first pass
print('1st pass')
df = []
df_ok = True
for cpu in range(parallel1):
if os.path.exists(f'tmp/df{cpu}.pkl'):
df.append(pd.read_pickle(f'tmp/df{cpu}.pkl'))
else:
df_ok = False
if not df_ok:
with open('tiles.json') as f:
dire = json.load(f)
urls, tiles, lats, lons = [], [], [], []
for continent in dire:
for tile in dire[continent][1]:
lat = int(tile[1:3])
if tile[0] == 's':
lat = -lat
lon = int(tile[4:7])
if tile[3] == 'w':
lon = -lon
if tile not in tiles:
lats.append(lat + 5) # upper left
lons.append(lon)
tiles.append(tile)
urls.append(dire[continent][0])
df =
|
DataFrame({'lat': lats, 'lon': lons, 'tile': tiles, 'url': urls})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 6 14:56:53 2020
@author: robson
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
import time
import urllib3
urllib3.disable_warnings()
#Definindo variáveis
colunas = ['Nome','Condição', 'Categoria', 'Abrangência', 'UF', 'Município', 'Coordenada(s)_geográfica(s)', 'Quantidade_de_imagens', 'Quantidade_de_videos',
'Quantidade_de_audios','Descrição_do_bem_imaterial','Localização_específica','Ações_vinculados_Tipo','Ações_vinculados_Nome','Ações_vinculados_Instrumento',
'Ações_vinculados_Situação','Área_de_ocorrência_UF','Área_de_ocorrência_Municípios','Página']
#colunas = ['Nome','Condição', 'Categoria', 'Abrangência', 'UF', 'Município', 'Coordenada(s) geográfica(s)', 'Quantidade de imagens', 'Quantidade de videos',
# 'Quantidade de audios','Descrição','Página']
url_base = "https://sicg.iphan.gov.br/sicg/bemImaterial/rel/"
clases = ['acaoInst','m']
qtd_link = 643 #quandtidade de sites que vão ser raspados 643
df =
|
pd.DataFrame(columns=colunas)
|
pandas.DataFrame
|
import argparse
import csv
import json
import os
import time
from collections import OrderedDict
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import yaml
def _create_model_training_folder(writer, files_to_same=0):
model_checkpoints_folder = os.path.join(writer.log_dir, 'checkpoints')
if not os.path.exists(model_checkpoints_folder):
os.makedirs(model_checkpoints_folder)
def write_score(writer, iter, mode, metrics):
writer.add_scalar(mode + '/loss', metrics.data['loss'], iter)
writer.add_scalar(mode + '/acc', metrics.data['correct'] / metrics.data['total'], iter)
def write_train_val_score(writer, epoch, train_stats, val_stats):
writer.add_scalars('Loss', {'train': train_stats[0],
'val' : val_stats[0],
}, epoch)
writer.add_scalars('Coeff', {'train': train_stats[1],
'val' : val_stats[1],
}, epoch)
writer.add_scalars('Air', {'train': train_stats[2],
'val' : val_stats[2],
}, epoch)
writer.add_scalars('CSF', {'train': train_stats[3],
'val' : val_stats[3],
}, epoch)
writer.add_scalars('GM', {'train': train_stats[4],
'val' : val_stats[4],
}, epoch)
writer.add_scalars('WM', {'train': train_stats[5],
'val' : val_stats[5],
}, epoch)
return
def showgradients(model):
for param in model.parameters():
print(type(param.data), param.SIZE())
print("GRADS= \n", param.grad)
def datestr():
now = time.gmtime()
return '{}{:02}{:02}_{:02}{:02}'.format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
def save_checkpoint(state, path, filename='last'):
name = os.path.join(path, filename + '_checkpoint.pth.tar')
print(name)
torch.save(state, name)
def load_checkpoint(checkpoint, model, strict=True, optimizer=None, load_seperate_layers=False):
"""Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Args:
checkpoint: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
checkpoint1 = torch.load(checkpoint, map_location='cpu')
print(checkpoint1.keys())
# e = checkpoint1['state_dict']['embed.weight']
# print(e,e.shape)
# torch.save( e,'/home/iliask/PycharmProjects/MScThesis/checkpoints/SSL/dataset_DM/model_idprnn
# /date_29_10_2021_22.07.39/_embed.pth')
if 'state_dict' in checkpoint1.keys():
pretrained_dict = checkpoint1['state_dict']
else:
pretrained_dict = checkpoint1
model_dict = model.state_dict()
print(pretrained_dict.keys())
print(model_dict.keys())
# # # 1. filter out unnecessary keys
# # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
pretrained_dictnew = {}
for k, v in pretrained_dict.items():
# if 'module.' in k:
# k = k[7:]
pretrained_dictnew[k] = v
print(pretrained_dictnew.keys())
if not os.path.exists(checkpoint):
raise ("File doesn't exist {}".format(checkpoint))
if (not load_seperate_layers):
# model.load_state_dict(checkpoint1['model_dict'] , strict=strict)p
model.load_state_dict(pretrained_dictnew, strict=strict)
epoch = 0
if optimizer != None:
optimizer.load_state_dict(checkpoint['optimizer_dict'])
return checkpoint1, epoch
def save_model(cpkt_dir, model, optimizer, loss, epoch, name):
save_path = cpkt_dir
make_dirs(save_path)
state = {'epoch' : epoch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'loss' : loss}
name = os.path.join(cpkt_dir, name + '_checkpoint.pth.tar')
print(name)
torch.save(state, name)
def make_dirs(path):
if not os.path.exists(path):
os.makedirs(path)
def make_dirs_if_not_present(path):
"""
creates new directory if not present
"""
if not os.path.exists(path):
os.makedirs(path)
def create_stats_files(path):
train_f = open(os.path.join(path, 'train.csv'), 'w')
val_f = open(os.path.join(path, 'val.csv'), 'w')
return train_f, val_f
def read_json_file(fname):
with open(fname, 'r') as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json_file(content, fname):
with open(fname, 'w') as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def read_filepaths(file):
paths, labels = [], []
with open(file, 'r') as f:
lines = f.read().splitlines()
for idx, line in enumerate(lines):
if ('/ c o' in line):
break
subjid, path, label = line.split(' ')
paths.append(path)
labels.append(label)
return paths, labels
def read_filepaths2(file):
paths, labels = [], []
with open(file, 'r') as f:
lines = f.read().splitlines()
for idx, line in enumerate(lines):
# print(line, line.split('|'))
if ('/ c o' in line):
break
path, label, dataset = line.split('|')
path = path.split(' ')[-1]
paths.append(path)
labels.append(label)
return paths, labels
class MetricTracker:
def __init__(self, *keys, writer=None, mode='/'):
self.writer = writer
self.mode = mode + '/'
self.keys = keys
# print(self.keys)
self._data =
|
pd.DataFrame(index=keys, columns=['total', 'counts', 'average'])
|
pandas.DataFrame
|
# Author: <NAME>
# Created on: April 2020
# Last modified on: Sep 2020
"""
This script pulls COVID 19 data and creates interactive plots of COVID 19 cases in the world.
"""
#!pip install pycountry_convert
#!pip install requests
#!pip install pandas
#!pip install plotly
import requests as r
import pandas as pd
import plotly.io as pio
import pycountry_convert as pc
import plotly.offline as offline
import plotly.graph_objs as go
import numpy as np
from ipywidgets import widgets
from IPython.display import display, Javascript, Markdown, HTML, clear_output
from ipywidgets import interact, interact_manual, widgets, Layout, VBox, HBox, Button,fixed,interactive
def extract_latest(final_df):
# This function gets latest for each country
conf_dic = {}
latest_arr = []
cont_code_arr = []
country_arr = []
for country in final_df['country']:
latest = float(final_df[final_df['country']==country]['latest'].sum())
cont_code = final_df[final_df['country']==country]['continent code'].unique()[0]
latest_arr.append(latest)
cont_code_arr.append(cont_code)
country_arr.append(country)
conf_dic['country'] = country_arr
conf_dic['continent code'] = cont_code_arr
conf_dic['latest'] = latest_arr
conf_df = pd.DataFrame(conf_dic)
return conf_df
def generate_levels(df,case_type):
# The sunburst plot requires weights (values), labels, and parent (region, or World)
# We build the corresponding table here
# Inspired and adapted from https://pypi.org/project/world-bank-data/
columns = ['labels','parents', 'values']
# Build the levels
# Level 1 - Countries
level1 = df.copy()
# Rename columns
level1.columns = columns
# Add a text column - format values column
level1['text'] = level1['values'].apply(lambda pop:' ' + str(case_type)+ ' Cases: {:,.0f}'.format(pop))
level1['World total'] = level1['values'].sum()
# Create level 2 - Continents
#Group by continent code
level2 = level1.groupby(['parents']).values.sum().reset_index()[['parents', 'parents', 'values']]
# Rename columns
level2.columns = columns
level2['parents'] = 'World'
# move value to text for this level
level2['text'] = level2['values'].apply(lambda pop: ' ' + str(case_type)+ ' Cases: {:,.0f}'.format(pop))
## Create level 3 - world total as of latest date
level3 = pd.DataFrame({'parents': ['World'], 'labels': ['World'],
'values': [level1.groupby("parents").sum().sum()[0]], 'text':['{:,.0f}'.format(level1.groupby("parents").sum().sum()[0])]})
#Create master dataframe with all levels
all_levels = pd.concat([level1,level2], axis=0,sort=True)
return all_levels
def plot_sunburst(df,case_type):
last_date = pd.to_datetime('today').date()
fig = offline.iplot(dict(
data=[dict(type='sunburst', hoverinfo='text', **df,name='Overview')],
layout=dict(title='COVID-19' + ' ' + str(case_type) + ' Cases as of ' + str(last_date),
width=800,height=800,insidetextorientation='radial')),validate=False)
return fig
# Define a function to drop the history.prefix
# Create function drop_prefix
def drop_prefix(self, prefix):
self.columns = self.columns.str.lstrip(prefix)
return self
# Call function
pd.core.frame.DataFrame.drop_prefix = drop_prefix
# Define function which removes history. prefix, and orders the column dates in ascending order
def order_dates(flat_df):
# Drop prefix
flat_df.drop_prefix('history.')
flat_df.drop_prefix("coordinates.")
# Isolate dates columns
flat_df.iloc[:,6:].columns = pd.to_datetime(flat_df.iloc[:,6:].columns)
# Transform to datetim format
sub = flat_df.iloc[:,6:]
sub.columns = pd.to_datetime(sub.columns)
# Sort
sub2 = sub.reindex(sorted(sub.columns), axis=1)
sub3 = flat_df.reindex(sorted(flat_df.columns),axis=1).iloc[:,-5:]
# Concatenate
final = pd.concat([sub2,sub3], axis=1, sort=False)
return final
# We will plot the log projection along with the cumulative number of cases
def plot_log_function(country,final_df,type_case,case):
latest_arr = []
date_arr = []
for item in final_df[final_df.index==country].iloc[:,0:-5].columns:
date_arr.append(item)
latest_arr.append(final_df[final_df.index==country][item].sum())
final_confirmed_red = pd.DataFrame({"Date":date_arr,"CumulativeTotal":latest_arr})
non_cumulative_cases = final_confirmed_red.diff(axis=0)
x = final_confirmed_red.Date
if case==True:
y = final_confirmed_red.CumulativeTotal
else:
y = non_cumulative_cases.CumulativeTotal
npy = np.array(y.to_list())
l_y = np.log10(npy, where=0<npy, out=np.nan*npy)
trace1 = go.Bar(x=x,y=y,name=country)
trace2 = go.Scatter(x=x,y=l_y,name='Log ' + str(country),yaxis='y2')
layout = go.Layout(
title= ('Number of ' + str(type_case) + ' cases for ' + str(country)),
yaxis=dict(title='Total Number of ' + str(type_case).capitalize() + ' Cases',\
titlefont=dict(color='blue'), tickfont=dict(color='blue')),
yaxis2=dict(title=str(type_case).capitalize() + ' Cases (logarithmic scale)', titlefont=dict(color='red'), \
tickfont=dict(color='red'), overlaying='y', side='right'),
showlegend=False)
fig = go.Figure(data=[trace1,trace2],layout=layout)
fig.update_yaxes(showgrid=True)
fig.show()
def draw_results(b):
country = all_the_widgets[0].value
case = all_the_widgets[1].value
clear_output()
display(tab) ## Have to redraw the widgets
plot_log_function(country,final_confirmed,"confirmed",case)
plot_log_function(country,final_deaths,"fatal",case)
if __name__ == "__main__":
# Get the latest data
# Confirmed
try:
API_response_confirmed = r.get("https://covid19api.herokuapp.com/confirmed")
data = API_response_confirmed.json() # Check the JSON Response Content documentation below
confirmed_df = pd.json_normalize(data,record_path=["locations"])
print("Confirmed cases download was successful!")
except:
print("Error: check GitHub is functioning appropriately, check https://covid19api.herokuapp.com/ is not down, check fields were not renamed")
# Deaths
try:
API_response_death = r.get("https://covid19api.herokuapp.com/deaths")
data1 = API_response_death.json() # Check the JSON Response Content documentation below
death_df = pd.json_normalize(data1,record_path=["locations"])
print("Fatal cases download was successful!")
except:
print("Error: check GitHub is functioning appropriately, check https://covid19api.herokuapp.com/ is not down, check fields were not renamed")
# Latest
try:
API_summary = r.get("https://covid19api.herokuapp.com/latest")
data2 = API_summary.json()
summary =
|
pd.json_normalize(data2)
|
pandas.json_normalize
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep
from logbook import Logger
import pandas as pd
from zipline.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
MINUTE_END,
BEFORE_TRADING_START_BAR,
MARKETS_CLOSED
)
log = Logger('Realtime Clock')
class RealtimeClock(object):
"""
Realtime clock for live trading.
This class is a drop-in replacement for
:class:`zipline.gens.sim_engine.MinuteSimulationClock`.
The key difference between the two is that the RealtimeClock's event
emission is synchronized to the (broker's) wall time clock, while
MinuteSimulationClock yields a new event on every iteration (regardless of
wall clock).
The :param:`time_skew` parameter represents the time difference between
the Broker and the live trading machine's clock.
"""
def __init__(self,
sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission,
time_skew=pd.Timedelta("0s"),
is_broker_alive=None,
execution_id=None,
stop_execution_callback=None):
today =
|
pd.to_datetime('now', utc=True)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 5 16:51:39 2017
@author: AnthonyN
https://www.quantstart.com/articles/Forecasting-Financial-Time-Series-Part-1
Predicting Price Returns
"""
import numpy as np
import pandas as pd
lags = 5
start_test = pd.to_datetime('2017-06-18')
from sklearn.linear_model import LogisticRegression
ts = pd.read_csv('data\XMA.csv', index_col='Date')
ts.index =
|
pd.to_datetime(ts.index)
|
pandas.to_datetime
|
from thermostat.stats import combine_output_dataframes
from thermostat.stats import compute_summary_statistics
from thermostat.stats import summary_statistics_to_csv
from .fixtures.thermostats import thermostat_emg_aux_constant_on_outlier
from thermostat.multiple import multiple_thermostat_calculate_epa_field_savings_metrics
from thermostat.exporters import COLUMNS
from scipy.stats import norm, randint
import pandas as pd
import numpy as np
import json
from datetime import datetime
import tempfile
from itertools import islice, cycle
import pytest
def get_fake_output_df(n_columns):
columns = [
'sw_version',
'ct_identifier',
'equipment_type',
'heating_or_cooling',
'station',
'zipcode',
'climate_zone',
'start_date',
'end_date',
'n_days_in_inputfile_date_range',
'n_days_both_heating_and_cooling',
'n_days_insufficient_data',
'n_core_cooling_days',
'n_core_heating_days',
'baseline_percentile_core_cooling_comfort_temperature',
'baseline_percentile_core_heating_comfort_temperature',
'regional_average_baseline_cooling_comfort_temperature',
'regional_average_baseline_heating_comfort_temperature',
'percent_savings_baseline_percentile',
'avoided_daily_mean_core_day_runtime_baseline_percentile',
'avoided_total_core_day_runtime_baseline_percentile',
'baseline_daily_mean_core_day_runtime_baseline_percentile',
'baseline_total_core_day_runtime_baseline_percentile',
'_daily_mean_core_day_demand_baseline_baseline_percentile',
'percent_savings_baseline_regional',
'avoided_daily_mean_core_day_runtime_baseline_regional',
'avoided_total_core_day_runtime_baseline_regional',
'baseline_daily_mean_core_day_runtime_baseline_regional',
'baseline_total_core_day_runtime_baseline_regional',
'_daily_mean_core_day_demand_baseline_baseline_regional',
'mean_demand',
'alpha',
'tau',
'mean_sq_err',
'root_mean_sq_err',
'cv_root_mean_sq_err',
'mean_abs_err',
'mean_abs_pct_err',
'total_core_cooling_runtime',
'total_core_heating_runtime',
'total_auxiliary_heating_core_day_runtime',
'total_emergency_heating_core_day_runtime',
'daily_mean_core_cooling_runtime',
'daily_mean_core_heating_runtime',
'core_cooling_days_mean_indoor_temperature',
'core_cooling_days_mean_outdoor_temperature',
'core_heating_days_mean_indoor_temperature',
'core_heating_days_mean_outdoor_temperature',
'core_mean_indoor_temperature',
'core_mean_outdoor_temperature',
'rhu1_aux_duty_cycle',
'rhu1_emg_duty_cycle',
'rhu1_compressor_duty_cycle',
'rhu1_00F_to_05F',
'rhu1_05F_to_10F',
'rhu1_10F_to_15F',
'rhu1_15F_to_20F',
'rhu1_20F_to_25F',
'rhu1_25F_to_30F',
'rhu1_30F_to_35F',
'rhu1_35F_to_40F',
'rhu1_40F_to_45F',
'rhu1_45F_to_50F',
'rhu1_50F_to_55F',
'rhu1_55F_to_60F',
'rhu1_less10F',
'rhu1_10F_to_20F',
'rhu1_20F_to_30F',
'rhu1_30F_to_40F',
'rhu1_40F_to_50F',
'rhu1_50F_to_60F',
'rhu1_00F_to_05F_aux_duty_cycle',
'rhu1_05F_to_10F_aux_duty_cycle',
'rhu1_10F_to_15F_aux_duty_cycle',
'rhu1_15F_to_20F_aux_duty_cycle',
'rhu1_20F_to_25F_aux_duty_cycle',
'rhu1_25F_to_30F_aux_duty_cycle',
'rhu1_30F_to_35F_aux_duty_cycle',
'rhu1_35F_to_40F_aux_duty_cycle',
'rhu1_40F_to_45F_aux_duty_cycle',
'rhu1_45F_to_50F_aux_duty_cycle',
'rhu1_50F_to_55F_aux_duty_cycle',
'rhu1_55F_to_60F_aux_duty_cycle',
'rhu1_less10F_aux_duty_cycle',
'rhu1_10F_to_20F_aux_duty_cycle',
'rhu1_20F_to_30F_aux_duty_cycle',
'rhu1_30F_to_40F_aux_duty_cycle',
'rhu1_40F_to_50F_aux_duty_cycle',
'rhu1_50F_to_60F_aux_duty_cycle',
'rhu1_00F_to_05F_emg_duty_cycle',
'rhu1_05F_to_10F_emg_duty_cycle',
'rhu1_10F_to_15F_emg_duty_cycle',
'rhu1_15F_to_20F_emg_duty_cycle',
'rhu1_20F_to_25F_emg_duty_cycle',
'rhu1_25F_to_30F_emg_duty_cycle',
'rhu1_30F_to_35F_emg_duty_cycle',
'rhu1_35F_to_40F_emg_duty_cycle',
'rhu1_40F_to_45F_emg_duty_cycle',
'rhu1_45F_to_50F_emg_duty_cycle',
'rhu1_50F_to_55F_emg_duty_cycle',
'rhu1_55F_to_60F_emg_duty_cycle',
'rhu1_less10F_emg_duty_cycle',
'rhu1_10F_to_20F_emg_duty_cycle',
'rhu1_20F_to_30F_emg_duty_cycle',
'rhu1_30F_to_40F_emg_duty_cycle',
'rhu1_40F_to_50F_emg_duty_cycle',
'rhu1_50F_to_60F_emg_duty_cycle',
'rhu1_00F_to_05F_compressor_duty_cycle',
'rhu1_05F_to_10F_compressor_duty_cycle',
'rhu1_10F_to_15F_compressor_duty_cycle',
'rhu1_15F_to_20F_compressor_duty_cycle',
'rhu1_20F_to_25F_compressor_duty_cycle',
'rhu1_25F_to_30F_compressor_duty_cycle',
'rhu1_30F_to_35F_compressor_duty_cycle',
'rhu1_35F_to_40F_compressor_duty_cycle',
'rhu1_40F_to_45F_compressor_duty_cycle',
'rhu1_45F_to_50F_compressor_duty_cycle',
'rhu1_50F_to_55F_compressor_duty_cycle',
'rhu1_55F_to_60F_compressor_duty_cycle',
'rhu1_less10F_compressor_duty_cycle',
'rhu1_10F_to_20F_compressor_duty_cycle',
'rhu1_20F_to_30F_compressor_duty_cycle',
'rhu1_30F_to_40F_compressor_duty_cycle',
'rhu1_40F_to_50F_compressor_duty_cycle',
'rhu1_50F_to_60F_compressor_duty_cycle',
'rhu2_aux_duty_cycle',
'rhu2_emg_duty_cycle',
'rhu2_compressor_duty_cycle',
'rhu2_00F_to_05F',
'rhu2_05F_to_10F',
'rhu2_10F_to_15F',
'rhu2_15F_to_20F',
'rhu2_20F_to_25F',
'rhu2_25F_to_30F',
'rhu2_30F_to_35F',
'rhu2_35F_to_40F',
'rhu2_40F_to_45F',
'rhu2_45F_to_50F',
'rhu2_50F_to_55F',
'rhu2_55F_to_60F',
'rhu2_less10F',
'rhu2_10F_to_20F',
'rhu2_20F_to_30F',
'rhu2_30F_to_40F',
'rhu2_40F_to_50F',
'rhu2_50F_to_60F',
'rhu2_00F_to_05F_aux_duty_cycle',
'rhu2_05F_to_10F_aux_duty_cycle',
'rhu2_10F_to_15F_aux_duty_cycle',
'rhu2_15F_to_20F_aux_duty_cycle',
'rhu2_20F_to_25F_aux_duty_cycle',
'rhu2_25F_to_30F_aux_duty_cycle',
'rhu2_30F_to_35F_aux_duty_cycle',
'rhu2_35F_to_40F_aux_duty_cycle',
'rhu2_40F_to_45F_aux_duty_cycle',
'rhu2_45F_to_50F_aux_duty_cycle',
'rhu2_50F_to_55F_aux_duty_cycle',
'rhu2_55F_to_60F_aux_duty_cycle',
'rhu2_less10F_aux_duty_cycle',
'rhu2_10F_to_20F_aux_duty_cycle',
'rhu2_20F_to_30F_aux_duty_cycle',
'rhu2_30F_to_40F_aux_duty_cycle',
'rhu2_40F_to_50F_aux_duty_cycle',
'rhu2_50F_to_60F_aux_duty_cycle',
'rhu2_00F_to_05F_emg_duty_cycle',
'rhu2_05F_to_10F_emg_duty_cycle',
'rhu2_10F_to_15F_emg_duty_cycle',
'rhu2_15F_to_20F_emg_duty_cycle',
'rhu2_20F_to_25F_emg_duty_cycle',
'rhu2_25F_to_30F_emg_duty_cycle',
'rhu2_30F_to_35F_emg_duty_cycle',
'rhu2_35F_to_40F_emg_duty_cycle',
'rhu2_40F_to_45F_emg_duty_cycle',
'rhu2_45F_to_50F_emg_duty_cycle',
'rhu2_50F_to_55F_emg_duty_cycle',
'rhu2_55F_to_60F_emg_duty_cycle',
'rhu2_less10F_emg_duty_cycle',
'rhu2_10F_to_20F_emg_duty_cycle',
'rhu2_20F_to_30F_emg_duty_cycle',
'rhu2_30F_to_40F_emg_duty_cycle',
'rhu2_40F_to_50F_emg_duty_cycle',
'rhu2_50F_to_60F_emg_duty_cycle',
'rhu2_00F_to_05F_compressor_duty_cycle',
'rhu2_05F_to_10F_compressor_duty_cycle',
'rhu2_10F_to_15F_compressor_duty_cycle',
'rhu2_15F_to_20F_compressor_duty_cycle',
'rhu2_20F_to_25F_compressor_duty_cycle',
'rhu2_25F_to_30F_compressor_duty_cycle',
'rhu2_30F_to_35F_compressor_duty_cycle',
'rhu2_35F_to_40F_compressor_duty_cycle',
'rhu2_40F_to_45F_compressor_duty_cycle',
'rhu2_45F_to_50F_compressor_duty_cycle',
'rhu2_50F_to_55F_compressor_duty_cycle',
'rhu2_55F_to_60F_compressor_duty_cycle',
'rhu2_less10F_compressor_duty_cycle',
'rhu2_10F_to_20F_compressor_duty_cycle',
'rhu2_20F_to_30F_compressor_duty_cycle',
'rhu2_30F_to_40F_compressor_duty_cycle',
'rhu2_40F_to_50F_compressor_duty_cycle',
'rhu2_50F_to_60F_compressor_duty_cycle',
]
string_placeholder = ["PLACEHOLDER"] * n_columns
zero_column = [0 if randint.rvs(0, 30) > 0 else (None if randint.rvs(0, 2) > 0 else np.inf)
for i in randint.rvs(0, 1, size=n_columns)]
one_column = [1 if randint.rvs(0, 30) > 0 else (None if randint.rvs(0, 2) > 0 else np.inf)
for i in randint.rvs(0, 1, size=n_columns)]
float_column = [i if randint.rvs(0, 30) > 0 else (None if randint.rvs(0, 2) > 0 else np.inf)
for i in norm.rvs(size=n_columns)]
zipcodes = ["01234", "12345", "23456", "34567", "43210", "54321", "65432", "76543"]
zipcode_column = [i for i in islice(cycle(zipcodes), None, n_columns)]
core_day_set_names = ["cooling_2012", "heating_2012-2013", "cooling_2013"]
core_day_set_name_column = [i for i in islice(cycle(core_day_set_names), None, n_columns)]
data = {
'sw_version': string_placeholder,
'ct_identifier': string_placeholder,
'equipment_type': string_placeholder,
'heating_or_cooling': core_day_set_name_column,
'station': string_placeholder,
'zipcode': zipcode_column,
'climate_zone': string_placeholder,
'start_date': datetime(2011, 1, 1),
'end_date': datetime(2012, 1, 1),
'n_days_both_heating_and_cooling': one_column,
'n_days_in_inputfile_date_range': one_column,
'n_days_insufficient_data': zero_column,
'n_core_heating_days': one_column,
'baseline_percentile_core_cooling_comfort_temperature': float_column,
'baseline_percentile_core_heating_comfort_temperature': float_column,
'regional_average_baseline_cooling_comfort_temperature': float_column,
'regional_average_baseline_heating_comfort_temperature': float_column,
'percent_savings_baseline_percentile': float_column,
'avoided_daily_mean_core_day_runtime_baseline_percentile': float_column,
'avoided_total_core_day_runtime_baseline_percentile': float_column,
'baseline_daily_mean_core_day_runtime_baseline_percentile': float_column,
'baseline_total_core_day_runtime_baseline_percentile': float_column,
'_daily_mean_core_day_demand_baseline_baseline_percentile': float_column,
'percent_savings_baseline_regional': float_column,
'avoided_daily_mean_core_day_runtime_baseline_regional': float_column,
'avoided_total_core_day_runtime_baseline_regional': float_column,
'baseline_daily_mean_core_day_runtime_baseline_regional': float_column,
'baseline_total_core_day_runtime_baseline_regional': float_column,
'_daily_mean_core_day_demand_baseline_baseline_regional': float_column,
'mean_demand': float_column,
'alpha': float_column,
'tau': float_column,
'mean_sq_err': float_column,
'root_mean_sq_err': float_column,
'cv_root_mean_sq_err': float_column,
'mean_abs_err': float_column,
'mean_abs_pct_err': float_column,
'total_core_cooling_runtime': float_column,
'total_core_heating_runtime': float_column,
'total_auxiliary_heating_core_day_runtime': float_column,
'total_emergency_heating_core_day_runtime': float_column,
'daily_mean_core_cooling_runtime': float_column,
'daily_mean_core_heating_runtime': float_column,
'core_cooling_days_mean_indoor_temperature': float_column,
'core_cooling_days_mean_outdoor_temperature': float_column,
'core_heating_days_mean_indoor_temperature': float_column,
'core_heating_days_mean_outdoor_temperature': float_column,
'core_mean_indoor_temperature': float_column,
'core_mean_outdoor_temperature': float_column,
'rhu1_aux_duty_cycle': float_column,
'rhu1_emg_duty_cycle': float_column,
'rhu1_compressor_duty_cycle': float_column,
'rhu1_00F_to_05F': float_column,
'rhu1_05F_to_10F': float_column,
'rhu1_10F_to_15F': float_column,
'rhu1_15F_to_20F': float_column,
'rhu1_20F_to_25F': float_column,
'rhu1_25F_to_30F': float_column,
'rhu1_30F_to_35F': float_column,
'rhu1_35F_to_40F': float_column,
'rhu1_40F_to_45F': float_column,
'rhu1_45F_to_50F': float_column,
'rhu1_50F_to_55F': float_column,
'rhu1_55F_to_60F': float_column,
'rhu1_less10F': float_column,
'rhu1_10F_to_20F': float_column,
'rhu1_20F_to_30F': float_column,
'rhu1_30F_to_40F': float_column,
'rhu1_40F_to_50F': float_column,
'rhu1_50F_to_60F': float_column,
'rhu1_00F_to_05F_aux_duty_cycle': float_column,
'rhu1_05F_to_10F_aux_duty_cycle': float_column,
'rhu1_10F_to_15F_aux_duty_cycle': float_column,
'rhu1_15F_to_20F_aux_duty_cycle': float_column,
'rhu1_20F_to_25F_aux_duty_cycle': float_column,
'rhu1_25F_to_30F_aux_duty_cycle': float_column,
'rhu1_30F_to_35F_aux_duty_cycle': float_column,
'rhu1_35F_to_40F_aux_duty_cycle': float_column,
'rhu1_40F_to_45F_aux_duty_cycle': float_column,
'rhu1_45F_to_50F_aux_duty_cycle': float_column,
'rhu1_50F_to_55F_aux_duty_cycle': float_column,
'rhu1_55F_to_60F_aux_duty_cycle': float_column,
'rhu1_less10F_aux_duty_cycle': float_column,
'rhu1_10F_to_20F_aux_duty_cycle': float_column,
'rhu1_20F_to_30F_aux_duty_cycle': float_column,
'rhu1_30F_to_40F_aux_duty_cycle': float_column,
'rhu1_40F_to_50F_aux_duty_cycle': float_column,
'rhu1_50F_to_60F_aux_duty_cycle': float_column,
'rhu1_00F_to_05F_emg_duty_cycle': float_column,
'rhu1_05F_to_10F_emg_duty_cycle': float_column,
'rhu1_10F_to_15F_emg_duty_cycle': float_column,
'rhu1_15F_to_20F_emg_duty_cycle': float_column,
'rhu1_20F_to_25F_emg_duty_cycle': float_column,
'rhu1_25F_to_30F_emg_duty_cycle': float_column,
'rhu1_30F_to_35F_emg_duty_cycle': float_column,
'rhu1_35F_to_40F_emg_duty_cycle': float_column,
'rhu1_40F_to_45F_emg_duty_cycle': float_column,
'rhu1_45F_to_50F_emg_duty_cycle': float_column,
'rhu1_50F_to_55F_emg_duty_cycle': float_column,
'rhu1_55F_to_60F_emg_duty_cycle': float_column,
'rhu1_less10F_emg_duty_cycle': float_column,
'rhu1_10F_to_20F_emg_duty_cycle': float_column,
'rhu1_20F_to_30F_emg_duty_cycle': float_column,
'rhu1_30F_to_40F_emg_duty_cycle': float_column,
'rhu1_40F_to_50F_emg_duty_cycle': float_column,
'rhu1_50F_to_60F_emg_duty_cycle': float_column,
'rhu1_00F_to_05F_compressor_duty_cycle': float_column,
'rhu1_05F_to_10F_compressor_duty_cycle': float_column,
'rhu1_10F_to_15F_compressor_duty_cycle': float_column,
'rhu1_15F_to_20F_compressor_duty_cycle': float_column,
'rhu1_20F_to_25F_compressor_duty_cycle': float_column,
'rhu1_25F_to_30F_compressor_duty_cycle': float_column,
'rhu1_30F_to_35F_compressor_duty_cycle': float_column,
'rhu1_35F_to_40F_compressor_duty_cycle': float_column,
'rhu1_40F_to_45F_compressor_duty_cycle': float_column,
'rhu1_45F_to_50F_compressor_duty_cycle': float_column,
'rhu1_50F_to_55F_compressor_duty_cycle': float_column,
'rhu1_55F_to_60F_compressor_duty_cycle': float_column,
'rhu1_less10F_compressor_duty_cycle': float_column,
'rhu1_10F_to_20F_compressor_duty_cycle': float_column,
'rhu1_20F_to_30F_compressor_duty_cycle': float_column,
'rhu1_30F_to_40F_compressor_duty_cycle': float_column,
'rhu1_40F_to_50F_compressor_duty_cycle': float_column,
'rhu1_50F_to_60F_compressor_duty_cycle': float_column,
'rhu2_aux_duty_cycle': float_column,
'rhu2_emg_duty_cycle': float_column,
'rhu2_compressor_duty_cycle': float_column,
'rhu2_00F_to_05F': float_column,
'rhu2_05F_to_10F': float_column,
'rhu2_10F_to_15F': float_column,
'rhu2_15F_to_20F': float_column,
'rhu2_20F_to_25F': float_column,
'rhu2_25F_to_30F': float_column,
'rhu2_30F_to_35F': float_column,
'rhu2_35F_to_40F': float_column,
'rhu2_40F_to_45F': float_column,
'rhu2_45F_to_50F': float_column,
'rhu2_50F_to_55F': float_column,
'rhu2_55F_to_60F': float_column,
'rhu2_less10F': float_column,
'rhu2_10F_to_20F': float_column,
'rhu2_20F_to_30F': float_column,
'rhu2_30F_to_40F': float_column,
'rhu2_40F_to_50F': float_column,
'rhu2_50F_to_60F': float_column,
'rhu2_00F_to_05F_aux_duty_cycle': float_column,
'rhu2_05F_to_10F_aux_duty_cycle': float_column,
'rhu2_10F_to_15F_aux_duty_cycle': float_column,
'rhu2_15F_to_20F_aux_duty_cycle': float_column,
'rhu2_20F_to_25F_aux_duty_cycle': float_column,
'rhu2_25F_to_30F_aux_duty_cycle': float_column,
'rhu2_30F_to_35F_aux_duty_cycle': float_column,
'rhu2_35F_to_40F_aux_duty_cycle': float_column,
'rhu2_40F_to_45F_aux_duty_cycle': float_column,
'rhu2_45F_to_50F_aux_duty_cycle': float_column,
'rhu2_50F_to_55F_aux_duty_cycle': float_column,
'rhu2_55F_to_60F_aux_duty_cycle': float_column,
'rhu2_less10F_aux_duty_cycle': float_column,
'rhu2_10F_to_20F_aux_duty_cycle': float_column,
'rhu2_20F_to_30F_aux_duty_cycle': float_column,
'rhu2_30F_to_40F_aux_duty_cycle': float_column,
'rhu2_40F_to_50F_aux_duty_cycle': float_column,
'rhu2_50F_to_60F_aux_duty_cycle': float_column,
'rhu2_00F_to_05F_emg_duty_cycle': float_column,
'rhu2_05F_to_10F_emg_duty_cycle': float_column,
'rhu2_10F_to_15F_emg_duty_cycle': float_column,
'rhu2_15F_to_20F_emg_duty_cycle': float_column,
'rhu2_20F_to_25F_emg_duty_cycle': float_column,
'rhu2_25F_to_30F_emg_duty_cycle': float_column,
'rhu2_30F_to_35F_emg_duty_cycle': float_column,
'rhu2_35F_to_40F_emg_duty_cycle': float_column,
'rhu2_40F_to_45F_emg_duty_cycle': float_column,
'rhu2_45F_to_50F_emg_duty_cycle': float_column,
'rhu2_50F_to_55F_emg_duty_cycle': float_column,
'rhu2_55F_to_60F_emg_duty_cycle': float_column,
'rhu2_less10F_emg_duty_cycle': float_column,
'rhu2_10F_to_20F_emg_duty_cycle': float_column,
'rhu2_20F_to_30F_emg_duty_cycle': float_column,
'rhu2_30F_to_40F_emg_duty_cycle': float_column,
'rhu2_40F_to_50F_emg_duty_cycle': float_column,
'rhu2_50F_to_60F_emg_duty_cycle': float_column,
'rhu2_00F_to_05F_compressor_duty_cycle': float_column,
'rhu2_05F_to_10F_compressor_duty_cycle': float_column,
'rhu2_10F_to_15F_compressor_duty_cycle': float_column,
'rhu2_15F_to_20F_compressor_duty_cycle': float_column,
'rhu2_20F_to_25F_compressor_duty_cycle': float_column,
'rhu2_25F_to_30F_compressor_duty_cycle': float_column,
'rhu2_30F_to_35F_compressor_duty_cycle': float_column,
'rhu2_35F_to_40F_compressor_duty_cycle': float_column,
'rhu2_40F_to_45F_compressor_duty_cycle': float_column,
'rhu2_45F_to_50F_compressor_duty_cycle': float_column,
'rhu2_50F_to_55F_compressor_duty_cycle': float_column,
'rhu2_55F_to_60F_compressor_duty_cycle': float_column,
'rhu2_less10F_compressor_duty_cycle': float_column,
'rhu2_10F_to_20F_compressor_duty_cycle': float_column,
'rhu2_20F_to_30F_compressor_duty_cycle': float_column,
'rhu2_30F_to_40F_compressor_duty_cycle': float_column,
'rhu2_40F_to_50F_compressor_duty_cycle': float_column,
'rhu2_50F_to_60F_compressor_duty_cycle': float_column,
}
df =
|
pd.DataFrame(data, columns=columns)
|
pandas.DataFrame
|
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
from pandas.testing import assert_frame_equal
from gators.feature_generation.polynomial_features import PolynomialFeatures
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data_inter():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
@pytest.fixture
def data_int16_inter():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.int16)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
).astype(np.int16)
return obj, X, X_expected
@ pytest.fixture
def data_all():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float32)
obj = PolynomialFeatures(
interaction_only=False, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4.],
[3., 4., 5., 9., 12., 15., 16., 20., 25.],
[6., 7., 8., 36., 42., 48., 49., 56., 64.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B',
'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C']
).astype(np.float32)
return obj, X, X_expected
@ pytest.fixture
def data_degree():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=False, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4., 0., 0.,
0., 0., 0., 0., 1., 2., 4., 8.],
[3., 4., 5., 9., 12., 15., 16., 20., 25., 27., 36.,
45., 48., 60., 75., 64., 80., 100., 125.],
[6., 7., 8., 36., 42., 48., 49., 56., 64., 216., 252.,
288., 294., 336., 384., 343., 392., 448., 512.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B', 'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C',
'A__x__A__x__A', 'A__x__A__x__B', 'A__x__A__x__C', 'A__x__B__x__B', 'A__x__B__x__C',
'A__x__C__x__C', 'B__x__B__x__B', 'B__x__B__x__C', 'B__x__C__x__C', 'C__x__C__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_inter_degree():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2., 0.],
[3., 4., 5., 12., 15., 20., 60.],
[6., 7., 8., 42., 48., 56., 336.]]),
columns=['A', 'B', 'C', 'A__x__B',
'A__x__C', 'B__x__C', 'A__x__B__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_subset():
X = pd.DataFrame(np.arange(12).reshape(
3, 4), columns=list('ABCD'), dtype=np.float64)
obj = PolynomialFeatures(
columns=['A', 'B', 'C'], interaction_only=True, degree=2).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 3., 0., 0., 2.],
[4., 5., 6., 7., 20., 24., 30.],
[8., 9., 10., 11., 72., 80., 90.]]),
columns=['A', 'B', 'C', 'D', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
@pytest.fixture
def data_inter_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
@pytest.fixture
def data_int16_inter_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.int16)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
).astype(np.int16)
return obj, X, X_expected
@ pytest.fixture
def data_all_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float32)
obj = PolynomialFeatures(
interaction_only=False, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4.],
[3., 4., 5., 9., 12., 15., 16., 20., 25.],
[6., 7., 8., 36., 42., 48., 49., 56., 64.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B',
'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C']
).astype(np.float32)
return obj, X, X_expected
@ pytest.fixture
def data_degree_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=False, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4., 0., 0.,
0., 0., 0., 0., 1., 2., 4., 8.],
[3., 4., 5., 9., 12., 15., 16., 20., 25., 27., 36.,
45., 48., 60., 75., 64., 80., 100., 125.],
[6., 7., 8., 36., 42., 48., 49., 56., 64., 216., 252.,
288., 294., 336., 384., 343., 392., 448., 512.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B', 'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C',
'A__x__A__x__A', 'A__x__A__x__B', 'A__x__A__x__C', 'A__x__B__x__B', 'A__x__B__x__C',
'A__x__C__x__C', 'B__x__B__x__B', 'B__x__B__x__C', 'B__x__C__x__C', 'C__x__C__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_inter_degree_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2., 0.],
[3., 4., 5., 12., 15., 20., 60.],
[6., 7., 8., 42., 48., 56., 336.]]),
columns=['A', 'B', 'C', 'A__x__B',
'A__x__C', 'B__x__C', 'A__x__B__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_subset_ks():
X = ks.DataFrame(np.arange(12).reshape(
3, 4), columns=list('ABCD'), dtype=np.float64)
obj = PolynomialFeatures(
columns=['A', 'B', 'C'], interaction_only=True, degree=2).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 3., 0., 0., 2.],
[4., 5., 6., 7., 20., 24., 30.],
[8., 9., 10., 11., 72., 80., 90.]]),
columns=['A', 'B', 'C', 'D', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
def test_inter_pd(data_inter):
obj, X, X_expected = data_inter
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_inter_ks(data_inter_ks):
obj, X, X_expected = data_inter_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_inter_pd_np(data_inter):
obj, X, X_expected = data_inter
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
@pytest.mark.koalas
def test_inter_ks_np(data_inter_ks):
obj, X, X_expected = data_inter_ks
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
def test_int16_inter_pd(data_int16_inter):
obj, X, X_expected = data_int16_inter
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_int16_inter_ks(data_int16_inter_ks):
obj, X, X_expected = data_int16_inter_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_int16_inter_pd_np(data_int16_inter):
obj, X, X_expected = data_int16_inter
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
@pytest.mark.koalas
def test_int16_inter_ks_np(data_int16_inter_ks):
obj, X, X_expected = data_int16_inter_ks
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
def test_all_pd(data_all):
obj, X, X_expected = data_all
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_all_ks(data_all_ks):
obj, X, X_expected = data_all_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_all_pd_np(data_all):
obj, X, X_expected = data_all
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_all_ks_np(data_all_ks):
obj, X, X_expected = data_all_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_degree_pd(data_degree):
obj, X, X_expected = data_degree
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_degree_ks(data_degree_ks):
obj, X, X_expected = data_degree_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_degree_pd_np(data_degree):
obj, X, X_expected = data_degree
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_degree_ks_np(data_degree_ks):
obj, X, X_expected = data_degree_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new =
|
pd.DataFrame(X_numpy_new)
|
pandas.DataFrame
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import gzip
import json
import os
import random
import unittest
from collections import OrderedDict
import numpy as np
import pandas as pd
from monty.json import MontyEncoder, MontyDecoder
from pymatgen.core import yaml
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.lammps.data import (
CombinedData,
ForceField,
LammpsBox,
LammpsData,
Topology,
lattice_2_lmpbox,
structure_2_lmpdata,
)
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "lammps")
class LammpsBoxTest(PymatgenTest):
@classmethod
def setUpClass(cls):
cls.peptide = LammpsBox(
bounds=[
[36.840194, 64.211560],
[41.013691, 68.385058],
[29.768095, 57.139462],
]
)
cls.quartz = LammpsBox(
bounds=[[0, 4.913400], [0, 4.255129], [0, 5.405200]],
tilt=[-2.456700, 0.0, 0.0],
)
def test_volume(self):
obounds = np.array(self.peptide.bounds)
ov = np.prod(obounds[:, 1] - obounds[:, 0])
self.assertEqual(self.peptide.volume, ov)
self.assertAlmostEqual(self.quartz.volume, 113.00733165874873)
def test_get_string(self):
peptide = self.peptide.get_string(5)
peptide_5 = """36.84019 64.21156 xlo xhi
41.01369 68.38506 ylo yhi
29.76809 57.13946 zlo zhi"""
self.assertEqual(peptide, peptide_5)
quartz = self.quartz.get_string(4)
quartz_4 = """0.0000 4.9134 xlo xhi
0.0000 4.2551 ylo yhi
0.0000 5.4052 zlo zhi
-2.4567 0.0000 0.0000 xy xz yz"""
self.assertEqual(quartz, quartz_4)
def test_get_box_shift(self):
peptide = self.peptide
self.assertEqual(peptide.get_box_shift([1, 0, 0])[0], 64.211560 - 36.840194)
self.assertEqual(peptide.get_box_shift([0, 0, -1])[-1], 29.768095 - 57.139462)
quartz = self.quartz
np.testing.assert_array_almost_equal(quartz.get_box_shift([0, 0, 1]), [0, 0, 5.4052], 4)
np.testing.assert_array_almost_equal(quartz.get_box_shift([0, 1, -1]), [-2.4567, 4.2551, -5.4052], 4)
np.testing.assert_array_almost_equal(quartz.get_box_shift([1, -1, 0]), [4.9134 + 2.4567, -4.2551, 0], 4)
def test_to_lattice(self):
peptide = self.peptide.to_lattice()
np.testing.assert_array_almost_equal(peptide.abc, [27.371367] * 3)
self.assertTrue(peptide.is_orthogonal)
quartz = self.quartz.to_lattice()
np.testing.assert_array_almost_equal(
quartz.matrix,
[[4.913400, 0, 0], [-2.456700, 4.255129, 0], [0, 0, 5.405200]],
)
class LammpsDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.peptide = LammpsData.from_file(filename=os.path.join(test_dir, "data.peptide"))
cls.ethane = LammpsData.from_file(filename=os.path.join(test_dir, "ethane.data"))
cls.quartz = LammpsData.from_file(filename=os.path.join(test_dir, "data.quartz"), atom_style="atomic")
cls.virus = LammpsData.from_file(filename=os.path.join(test_dir, "virus.data"), atom_style="angle")
cls.tatb = LammpsData.from_file(
filename=os.path.join(test_dir, "tatb.data"),
atom_style="charge",
sort_id=True,
)
def test_structure(self):
quartz = self.quartz.structure
np.testing.assert_array_almost_equal(
quartz.lattice.matrix,
[[4.913400, 0, 0], [-2.456700, 4.255129, 0], [0, 0, 5.405200]],
)
self.assertEqual(quartz.formula, "Si3 O6")
self.assertNotIn("molecule-ID", self.quartz.atoms.columns)
ethane = self.ethane.structure
np.testing.assert_array_almost_equal(ethane.lattice.matrix, np.diag([10.0] * 3))
lbounds = np.array(self.ethane.box.bounds)[:, 0]
coords = self.ethane.atoms[["x", "y", "z"]].values - lbounds
np.testing.assert_array_almost_equal(ethane.cart_coords, coords)
np.testing.assert_array_almost_equal(ethane.site_properties["charge"], self.ethane.atoms["q"])
tatb = self.tatb.structure
frac_coords = tatb.frac_coords[381]
real_frac_coords = frac_coords - np.floor(frac_coords)
np.testing.assert_array_almost_equal(real_frac_coords, [0.01553397, 0.71487872, 0.14134139])
co = Structure.from_spacegroup(194, Lattice.hexagonal(2.50078, 4.03333), ["Co"], [[1 / 3, 2 / 3, 1 / 4]])
ld_co = LammpsData.from_structure(co)
self.assertEqual(ld_co.structure.composition.reduced_formula, "Co")
ni = Structure.from_spacegroup(225, Lattice.cubic(3.50804), ["Ni"], [[0, 0, 0]])
ld_ni = LammpsData.from_structure(ni)
self.assertEqual(ld_ni.structure.composition.reduced_formula, "Ni")
def test_sort_structure(self):
s = Structure(Lattice.cubic(4), ["S", "Fe"], [[0, 0, 0], [0.5, 0.5, 0.5]])
lmp = LammpsData.from_structure(s, is_sort=False)
lmp.write_file("test1.data")
lmp2 = LammpsData.from_file("test1.data", atom_style="charge")
# internally element:type will be {Fe: 1, S: 2},
# therefore without sorting the atom types in structure
# will be [2, 1], i.e., (S, Fe)
self.assertListEqual(lmp2.atoms["type"].values.tolist(), [2, 1])
# with sorting the atom types in structures will be [1, 2]
lmp = LammpsData.from_structure(s, is_sort=True)
lmp.write_file("test1.data")
lmp2 = LammpsData.from_file("test1.data", atom_style="charge")
self.assertListEqual(lmp2.atoms["type"].values.tolist(), [1, 2])
def test_get_string(self):
pep = self.peptide.get_string(distance=7, velocity=5, charge=4)
pep_lines = pep.split("\n")
pep_kws = [
"Masses",
"Pair Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"Atoms",
"Velocities",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]
kw_inds = {l: i for i, l in enumerate(pep_lines) if l in pep_kws}
# section sequence
self.assertListEqual([k for k in sorted(kw_inds, key=kw_inds.get)], pep_kws)
# header
pep_header = "\n".join(pep_lines[: kw_inds["Masses"]])
pep_header_7 = """Generated by pymatgen.io.lammps.data.LammpsData
2004 atoms
1365 bonds
786 angles
207 dihedrals
12 impropers
14 atom types
18 bond types
31 angle types
21 dihedral types
2 improper types
36.8401940 64.2115600 xlo xhi
41.0136910 68.3850580 ylo yhi
29.7680950 57.1394620 zlo zhi
"""
self.assertEqual(pep_header, pep_header_7)
# int vs float for coeffs
pep_dihedral_coeff = pep_lines[kw_inds["Dihedral Coeffs"] + 2]
self.assertEqual(pep_dihedral_coeff, "1 0.200 1 180 1.0")
# distance and charge
pep_atom = pep_lines[kw_inds["Atoms"] + 2]
self.assertEqual(
pep_atom,
"1 1 1 0.5100 43.9999300 58.5267800 36.7855000 0 0 0",
)
# velocity
pep_velo = pep_lines[kw_inds["Velocities"] + 2]
self.assertEqual(pep_velo, "1 -0.00067 -0.00282 0.00383")
# no floats in topology sections
pep_topos = "\n".join(pep_lines[kw_inds["Bonds"] :])
self.assertNotIn(".", pep_topos)
c2h6 = self.ethane.get_string(distance=5, charge=3)
c2h6_lines = c2h6.split("\n")
c2h6_kws = [
"Masses",
"Pair Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"BondBond Coeffs",
"BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
"AngleAngle Coeffs",
"Atoms",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]
kw_inds = {l: i for i, l in enumerate(c2h6_lines) if l in c2h6_kws}
# section sequence
self.assertListEqual([k for k in sorted(kw_inds, key=kw_inds.get)], c2h6_kws)
# header
c2h6_header = "\n".join(c2h6_lines[: kw_inds["Masses"]])
c2h6_header_5 = """Generated by pymatgen.io.lammps.data.LammpsData
8 atoms
7 bonds
12 angles
9 dihedrals
8 impropers
2 atom types
2 bond types
2 angle types
1 dihedral types
2 improper types
0.21455 10.21454 xlo xhi
0.11418 10.11418 ylo yhi
-10.00014 -0.00015 zlo zhi
"""
self.assertEqual(c2h6_header, c2h6_header_5)
# distance and charge
c2h6_atom = c2h6_lines[kw_inds["Atoms"] + 2]
self.assertEqual(c2h6_atom, "1 1 1 -0.080 4.46291 5.14833 -5.00041" " 0 0 0")
# no floats in topology sections
c2h6_topos = "\n".join(c2h6_lines[kw_inds["Bonds"] :])
self.assertNotIn(".", c2h6_topos)
quartz = self.quartz.get_string(distance=4)
quartz_lines = quartz.split("\n")
quartz_kws = ["Masses", "Atoms"]
kw_inds = {l: i for i, l in enumerate(quartz_lines) if l in quartz_kws}
# header
quartz_header = "\n".join(quartz_lines[: kw_inds["Masses"]])
quartz_header_4 = """Generated by pymatgen.io.lammps.data.LammpsData
9 atoms
2 atom types
0.0000 4.9134 xlo xhi
0.0000 4.2551 ylo yhi
0.0000 5.4052 zlo zhi
-2.4567 0.0000 0.0000 xy xz yz
"""
self.assertEqual(quartz_header, quartz_header_4)
# distance
quartz_atom = quartz_lines[kw_inds["Atoms"] + 2]
self.assertEqual(quartz_atom, "1 1 2.3088 0.0000 3.6035")
virus = self.virus.get_string()
virus_lines = virus.split("\n")
pairij_coeff = virus_lines[virus_lines.index("PairIJ Coeffs") + 5]
self.assertEqual(pairij_coeff.strip().split(), ["1", "4", "1", "1.000", "1.12250"])
def test_write_file(self):
filename1 = "test1.data"
self.ethane.write_file(filename=filename1)
c2h6 = LammpsData.from_file(filename1)
pd.testing.assert_frame_equal(c2h6.masses, self.ethane.masses)
pd.testing.assert_frame_equal(c2h6.atoms, self.ethane.atoms)
ff_kw = random.sample(self.ethane.force_field.keys(), 1)[0]
pd.testing.assert_frame_equal(c2h6.force_field[ff_kw], self.ethane.force_field[ff_kw], ff_kw)
topo_kw = random.sample(self.ethane.topology.keys(), 1)[0]
pd.testing.assert_frame_equal(c2h6.topology[topo_kw], self.ethane.topology[topo_kw], topo_kw)
filename2 = "test2.data"
self.virus.write_file(filename=filename2)
v = LammpsData.from_file(filename2, atom_style="angle")
|
pd.testing.assert_frame_equal(v.force_field["PairIJ Coeffs"], self.virus.force_field["PairIJ Coeffs"])
|
pandas.testing.assert_frame_equal
|
import os
import re
from urllib import request
import numpy as np
import pandas as pd
import altair as alt
data_folder = (os.path.join(os.path.dirname(__file__), 'data_files')
if '__file__' in locals() else 'data_files')
COL_REGION = 'Country/Region'
pd.set_option('display.max_colwidth', 300)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 10000)
SAVE_JHU_DATA = False
class SourceData:
df_mappings = pd.read_csv(os.path.join(data_folder, 'mapping_countries.csv'))
mappings = {'replace.country': dict(df_mappings.dropna(subset=['Name'])
.set_index('Country')['Name']),
'map.continent': dict(df_mappings.set_index('Name')['Continent'])
}
@classmethod
def _cache_csv_path(cls, name):
return os.path.join(data_folder, f'covid_jhu/{name}_transposed.csv')
@classmethod
def _save_covid_df(cls, df, name):
df.T.to_csv(cls._cache_csv_path(name))
@classmethod
def _load_covid_df(cls, name):
df = pd.read_csv(cls._cache_csv_path(name), index_col=0).T
df[df.columns[2:]] = df[df.columns[2:]].apply(pd.to_numeric, errors='coerce')
return df
@classmethod
def _download_covid_df(cls, name):
url = ('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/'
f'csse_covid_19_time_series/time_series_covid19_{name}_global.csv')
df = pd.read_csv(url)
return df
@classmethod
def get_covid_dataframe(cls, name):
df = cls._download_covid_df(name)
if SAVE_JHU_DATA:
cls._save_covid_df(df, name)
# rename countries
df[COL_REGION] = df[COL_REGION].replace(cls.mappings['replace.country'])
return df
@staticmethod
def get_dates(df):
return df.columns[~df.columns.isin(['Province/State', COL_REGION, 'Lat', 'Long'])]
class AgeAdjustedData:
# https://population.un.org/wpp/Download/Standard/Population/
# https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/EXCEL_FILES/1_Population/WPP2019_POP_F07_1_POPULATION_BY_AGE_BOTH_SEXES.xlsx
csv_path = os.path.join(data_folder, 'world_pop_age_2020.csv')
class Cols:
# o = original
o4 = '0-4'
o9 = '5-9'
o14 = '10-14'
o19 = '15-19'
o24 = '20-24'
o29 = '25-29'
o34 = '30-34'
o39 = '35-39'
o44 = '40-44'
o49 = '45-49'
o54 = '50-54'
o59 = '55-59'
o64 = '60-64'
o69 = '65-69'
o74 = '70-74'
o79 = '75-79'
o84 = '80-84'
o89 = '85-89'
o94 = '90-94'
o99 = '95-99'
o100p = '100+'
# https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3590771
# ny = new york
ny17 = 'ny17' # 0-17
ny44 = 'ny44' # 18-44
ny64 = 'ny64' # 45-64
ny74 = 'ny74' # 65-74
ny75p = 'ny75p' # 75+
@classmethod
def load(cls):
df_raw = pd.read_csv(cls.csv_path)
df_filt = df_raw[df_raw['Type'].isin(['Subregion', 'Country/Area'])]
df_filt = (df_filt
.drop(columns=['Index', 'Variant', 'Notes', 'Country code', 'Parent code',
'Reference date (as of 1 July)', 'Type'])
.rename(columns={'Region, subregion, country or area *': COL_REGION}))
# adjust country names
df_filt[COL_REGION] = df_filt[COL_REGION].map({
'United States of America': 'US',
'China, Taiwan Province of China': 'Taiwan*',
'United Republic of Tanzania': 'Tanzania',
'Iran (Islamic Republic of)': 'Iran',
'Republic of Korea': 'South Korea',
'Bolivia (Plurinational State of)': 'Bolivia',
'Venezuela (Bolivarian Republic of)': 'Venezuela',
'Republic of Moldova': 'Moldova',
'Russian Federation': 'Russia',
'State of Palestine': 'West Bank and Gaza',
'Côte d\'Ivoire': 'Cote d\'Ivoire',
'Democratic Republic of the Congo': 'Congo (Kinshasa)',
'Congo': 'Congo (Brazzaville)',
'Syrian Arab Republic': 'Syria',
'Myanmar': 'Burma',
'Viet Nam': 'Vietnam',
'Brunei Darussalam': 'Brunei',
'Lao People\'s Democratic Republic': 'Laos'
}).fillna(df_filt[COL_REGION])
df_num = df_filt.set_index(COL_REGION)
# convert to numbers
df_num = df_num.apply(lambda s:
pd.Series(s)
.str.replace(' ', '')
.apply(pd.to_numeric, errors='coerce'))
population_s = df_num.sum(1) * 1000
# convert to ratios
df_pct = (df_num.T / df_num.sum(1)).T
# calulate NY bucket percentages
cols = cls.Cols
df_pct[cols.ny17] = df_pct[[cols.o4, cols.o9,
cols.o14, cols.o19]].sum(1)
df_pct[cols.ny44] = df_pct[[cols.o24, cols.o29,
cols.o34, cols.o39,
cols.o44]].sum(1)
df_pct[cols.ny64] = df_pct[[cols.o49,
cols.o54, cols.o59,
cols.o64]].sum(1)
df_pct[cols.ny74] = df_pct[[cols.o69, cols.o74]].sum(1)
df_pct[cols.ny75p] = df_pct[[cols.o79,
cols.o84, cols.o89,
cols.o94, cols.o99,
cols.o100p]].sum(1)
# check: df_pct[[cols.ny17, cols.ny44, cols.ny64, cols.ny74, cols.ny75p]].sum(1)
# calculate IFR
# https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3590771
# Table 1
ifr_s = pd.Series(np.dot(df_pct
[[cols.ny17, cols.ny44, cols.ny64, cols.ny74, cols.ny75p]],
[0.00002, 0.00087, 0.00822, 0.02626, 0.07137]),
index=df_pct.index)
## icu need estimation
## https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf
## 4.4% serious symptomatic cases for UK
## adjusting here by age by using IFRs ratios
## adjusting by UK's past testing bias (14) since the 4.4% figure is for reported cases
icu_percent_s = 0.044 * (ifr_s / ifr_s['United Kingdom']) / 14
return ifr_s, population_s, icu_percent_s
class ScrapedTableBase:
page = 'https://page.com/table'
file_name = 'file.csv'
@classmethod
def csv_path(cls):
return os.path.join(data_folder, cls.file_name)
@classmethod
def scrape(cls):
# !pip install beautifulsoup4
# !pip install lxml
import bs4
# read html
source = request.urlopen(cls.page).read()
soup = bs4.BeautifulSoup(source, 'lxml')
# get pandas df
table = soup.find_all('table')
return pd.read_html(str(table))[0]
@classmethod
def load(cls):
if not os.path.exists(cls.csv_path()):
cls.download()
return pd.read_csv(cls.csv_path())
@classmethod
def download(cls):
df = cls.scrape()
df.to_csv(cls.csv_path(), index=False)
class HostpitalBeds(ScrapedTableBase):
file_name = 'hospital_beds.csv'
page = 'https://en.wikipedia.org/wiki/List_of_countries_by_hospital_beds'
@classmethod
def download(cls):
df_wiki = cls.scrape()
# clean up df wikie
df_wiki = df_wiki.droplevel([0, 1], axis=1)
rename_map = {'Country/territory': 'country',
'ICU-CCB beds/100,000 inhabitants': 'icu_per_100k',
df_wiki.columns[df_wiki.columns.str.startswith('Occupancy')][0]: 'occupancy',
'2017': 'beds_per_1000_2017',
}
df_clean = df_wiki.rename(rename_map, axis=1)[rename_map.values()]
df_clean['icu_per_100k'] = pd.to_numeric(df_clean['icu_per_100k'].str
.replace(r'\[\d*\]', ''))
# load df for asian countries
# file manually created from
# https://www.researchgate.net/publication/338520008_Critical_Care_Bed_Capacity_in_Asian_Countries_and_Regions
df_asia = pd.read_csv(os.path.join(data_folder, 'ccb_asian_countries.csv'))
df_clean = pd.concat([df_clean,
df_asia[~df_asia['country'].isin(df_clean['country'])]])
df_clean.to_csv(cls.csv_path(), index=False)
class EmojiFlags(ScrapedTableBase):
file_name = 'emoji_flags.csv'
page = 'https://apps.timwhitlock.info/emoji/tables/iso3166'
emoji_col = 'emoji_code'
@classmethod
def download(cls):
df = cls.scrape()
df_filt = df.rename(columns={'Name': COL_REGION,
'Unicode': cls.emoji_col}
).drop(columns=['Emoji'])
# rename countries
df_filt[COL_REGION] = df_filt[COL_REGION].map({
'United States': 'US',
'Taiwan': 'Taiwan*',
'Macedonia': 'North Macedonia',
'Cape Verde': 'Cabo Verde',
'Saint Vincent and The Grenadines': 'Saint Vincent and the Grenadines',
'Palestinian Territory': 'West Bank and Gaza',
'Côte D\'Ivoire': 'Cote d\'Ivoire',
'Syrian Arab Republic': 'Syria',
'Myanmar': 'Burma',
'Viet Nam': 'Vietnam',
'Brunei Darussalam': 'Brunei',
'Lao People\'s Democratic Republic': 'Laos',
'Czech Republic': 'Czechia',
}).fillna(df_filt[COL_REGION])
# congo
df_filt.loc[df_filt['ISO'] == 'CD', COL_REGION] = 'Congo (Kinshasa)'
df_filt.loc[df_filt['ISO'] == 'CG', COL_REGION] = 'Congo (Brazzaville)'
# convert emoji hex codes to decimal
df_filt[cls.emoji_col] = df_filt[cls.emoji_col].apply(
lambda s: ''.join(f'&#{int(hex, 16)};'
for hex in re.findall(r'U\+(\S+)', s)))
df_filt.to_csv(cls.csv_path(), index=False)
class CovidData:
COL_REGION = COL_REGION
ABS_COLS = ['Cases.total', 'Deaths.total', 'Cases.new', 'Deaths.new']
PER_100K_COLS = [f'{c}.per100k' for c in ABS_COLS]
CASES_COLS = ABS_COLS[::2] + PER_100K_COLS[::2]
EST_COLS = [f'{c}.est' for c in CASES_COLS]
dft_cases = SourceData.get_covid_dataframe('confirmed')
dft_deaths = SourceData.get_covid_dataframe('deaths')
dft_recovered = SourceData.get_covid_dataframe('recovered')
dt_cols_all = SourceData.get_dates(dft_cases)
cur_date =
|
pd.to_datetime(dt_cols_all[-1])
|
pandas.to_datetime
|
import tempfile
import shutil
from typing import List, Optional
from pathlib import Path
import numpy as np
import pandas as pd
from xhtml2pdf import pisa
from xhtml2pdf.config.httpconfig import httpConfig
from quara.data_analysis import (
physicality_violation_check,
data_analysis,
)
from quara.data_analysis import computation_time as ctime
from quara.objects.state import State
from quara.objects.povm import Povm
from quara.objects.gate import Gate
from quara.objects.mprocess import MProcess
from quara.protocol.qtomography.estimator import EstimationResult
from quara.simulation import standard_qtomography_simulation_check
from quara.simulation.standard_qtomography_simulation import (
SimulationResult,
StandardQTomographySimulationSetting,
load_simulation_results,
)
_temp_dir_path = ""
_css = f"""
body {{color: #666666;}}
h1 {{
line-height: 100%;
border-top: 2px #dcdcdc solid;
padding: 20px 0 0 0;
font-size: 25px;}}
h2 {{font-size: 20px;
line-height:90%;
padding: 5px 0 5px 0;
margin: 10px 0 0 0;}}
h3 {{font-size: 15px;
color: #618CBC;
line-height:90%;
padding: 5px 0 5px 0;
margin: 2px 0 0 0;}}
h4 {{color:#EB9348;
font-size: 15px;
-pdf-outline: false;
line-height:90%;
padding: 5px 0 5px 0;
margin: 0 0 0 0;
}}
h5 {{color:#666666;
font-size: 13px;
-pdf-outline: false;
padding: 0 0 0 0;
margin: 0 0 0 0;
line-height:90%;
vertical-align: text-bottom;}}
h6 {{color:#666666;
font-size: 13px;
font-style:italic;
-pdf-outline: false;
padding: 0 0 0 0;
margin: 0 0 0 0;}}
#footer_content {{text-align: right;}}
"""
_table_css = """
table{
border: solid 1px #d3d3d3;
border-collapse: collapse;
border-spacing: 0;
table-layout: fixed;
width:100%;
}
table tr{
border: solid 1px #d3d3d3;
}
table th{
text-align: right;
background-color: #666666;
color: #ffffff;
border: solid 1px #ffffff;
font-size: 13px;
width: 100px;
padding-top: 3px;
padding-right: 3px;
}
table td{
text-align: right;
font-size: 13px;
padding-top: 3px;
padding-right: 3px;
width: 400px;
word-break: break-all;
}
.comp_time_table {
width: 380px;
}
.comp_time_table th{
width: 50px;
}
"""
_table_contents_css = """
pdftoc {
color: #666;
}
pdftoc.pdftoclevel0 {
font-weight: bold;
margin-top: 0.5em;
}
pdftoc.pdftoclevel1 {
margin-left: 1em;
}
pdftoc.pdftoclevel2 {
margin-left: 2em;
}
pdftoc.pdftoclevel3 {
margin-left: 3em;
font-style: italic;
}
"""
_inline_block_css = """
.box{
display: inline-block;
width: 400px;
}
.box_col2{
display: inline-block;
width: 400px;
padding: 0;
}
.box_col3{
display: inline-block;
width: 250px;
padding: 0;
}
.box_col4{
display: inline-block;
width: 190px;
padding: 0;
}
.div_line{
padding: 0 0 0 0;
margin: 0 0 35px 0;
}
"""
_col2_fig_width = 500
_col2_fig_height = 400
def _convert_html2pdf(source_html: str, output_path: str):
httpConfig.save_keys("nosslcheck", True)
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w+b") as f:
pisa_status = pisa.CreatePDF(source_html, dest=f)
return pisa_status.err
def _save_fig_to_tmp_dir(fig: "Figure", fig_name: str) -> str:
dir_path = Path(_temp_dir_path)
path = str(dir_path / f"{fig_name}.png")
dir_path.mkdir(exist_ok=True)
fig.write_image(path)
return path
def _make_graph_trace_seq(
estimation_results: List["EstimationResult"], num_data: List[int], case_id: int
) -> list:
fig_info_list = []
for i, num in enumerate(num_data):
fig = physicality_violation_check.make_graph_trace(
estimation_results, num_data_index=i, num_data=num_data
)
fig_name = f"case={case_id}_trace_num={num}_0"
fig.update_layout(width=_col2_fig_width, height=_col2_fig_height)
path = _save_fig_to_tmp_dir(fig, fig_name)
fig_info_list.append(dict(image_path=path, fig=fig, fig_name=fig_name))
return fig_info_list
def _generate_trace_div(fig_info_list: List[dict]) -> str:
col_n = len(fig_info_list) if len(fig_info_list) <= 4 else 4
css_class = f"box_col{col_n}"
div_lines = []
div_line = ""
for i, fig_info in enumerate(fig_info_list):
div_line += f"<div class='{css_class}'><img src={fig_info['image_path']}></div>"
if i % col_n == col_n - 1:
div_lines.append(f"<div class='div_line'>{div_line}</div>")
div_line = ""
else:
if div_line:
div_lines.append(f"<div class='div_line'>{div_line}</div>")
graph_block_html = f"<div class='div_line'>{''.join(div_lines)}</div>"
return graph_block_html
def generate_trace_div(
estimation_results: List["EstimationResult"], num_data: List[int], case_id: int
):
fig_info_list = _make_graph_trace_seq(
estimation_results, num_data=num_data, case_id=case_id
)
div_html = _generate_trace_div(fig_info_list)
return div_html
def _make_graph_sum_vecs_seq(
estimation_results: List["EstimationResult"],
num_data: List[int],
case_id: int,
true_object: Povm,
) -> List[List["Figure"]]:
fig_info_list_list = []
for num_data_index, num in enumerate(num_data):
figs = physicality_violation_check.make_graphs_sum_vecs(
estimation_results,
true_object,
num_data=num_data,
num_data_index=num_data_index,
)
fig_info_list = []
for alpha, fig in enumerate(figs):
fig_name = f"case={case_id}_trace_num={num}_alpha={alpha}"
fig.update_layout(width=_col2_fig_width, height=_col2_fig_height)
path = _save_fig_to_tmp_dir(fig, fig_name)
fig_info_list.append(
dict(image_path=path, fig=fig, fig_name=fig_name, num=num, alpha=alpha)
)
fig_info_list_list.append(fig_info_list)
return fig_info_list_list
def _generate_fig_info_list_list_div(
fig_info_list_list: List[List[dict]], col_n=2
) -> str:
graph_block_html_all = ""
css_class = "box" if col_n <= 2 else "box_col4"
for fig_info_list in fig_info_list_list: # num
num = fig_info_list[0]["num"]
graph_block_html = f"<h5>N={num}</h5>"
div_lines = []
div_line = ""
for i, fig_info in enumerate(fig_info_list): # alpha
div_line += (
f"<div class='{css_class}'><img src={fig_info['image_path']}></div>"
)
if i % col_n == col_n - 1:
div_lines.append(f"<div class='div_line'>{div_line}</div>")
div_line = ""
else:
div_lines.append(f"<div class='div_line'>{div_line}</div>")
graph_block_html_all += graph_block_html + "".join(div_lines)
return graph_block_html_all
def generate_sum_vecs_div(
estimation_results: List["EstimationResult"],
num_data: List[int],
case_id: int,
true_object: Povm,
col_n: int,
):
fig_info_list_list = _make_graph_sum_vecs_seq(
estimation_results, num_data=num_data, case_id=case_id, true_object=true_object
)
div_html = _generate_fig_info_list_list_div(fig_info_list_list, col_n=col_n)
return div_html
def _generate_graph_eigenvalues_seq(
estimation_results: List["EstimationResult"],
num_data: List[int],
case_id: int,
true_object: "QOperation",
bin_size: float = 0.0001,
) -> list:
fig_info_list_list = []
for num_data_index in range(len(num_data)):
fig_list = physicality_violation_check.make_graphs_eigenvalues(
estimation_results,
true_object,
num_data=num_data,
num_data_index=num_data_index,
bin_size=bin_size,
)
fig_info_list = []
num = num_data[num_data_index]
for i, fig in enumerate(fig_list):
fig_name = f"case={case_id}_eigenvalues_num={num_data_index}_i={i}"
fig.update_layout(width=_col2_fig_width, height=_col2_fig_height)
path = _save_fig_to_tmp_dir(fig, fig_name)
fig_info_list.append(
dict(image_path=path, fig=fig, fig_name=fig_name, num=num)
)
fig_info_list_list.append(fig_info_list)
return fig_info_list_list
def _generate_eigenvalues_div(
fig_info_list_list: List[List[dict]], col_n: int = 2
) -> str:
graph_block_html_all = ""
for fig_info_list in fig_info_list_list:
num = fig_info_list[0]["num"]
graph_block_html = f"<h5>N={num}</h5>"
graph_block_html = _generate_figs_div(fig_info_list, col_n=col_n)
graph_block_html_all += graph_block_html
return graph_block_html_all
def _generate_eigenvalues_div_3loop(
fig_info_list3: List[List[List[dict]]], col_n: int = None
) -> str:
graph_block_html_all = ""
fig_n = fig_info_list3[0][0]
if col_n is None:
col_n = len(fig_n) if len(fig_n) <= 4 else 4
css_class = f"box_col{col_n}"
for fig_info_list2 in fig_info_list3: # num_data
num = fig_info_list2[0][0]["num"]
graph_block_html = f"<h5>N={num}</h5>"
for fig_info_list in fig_info_list2: # measurement
x_i = fig_info_list[0]["x"]
div_lines = []
div_line = ""
for i, fig_info in enumerate(fig_info_list):
div_line += (
f"<div class='{css_class}'><img src={fig_info['image_path']}></div>"
)
if i % col_n == col_n - 1:
div_lines.append(f"<div class='div_line'>{div_line}</div>")
div_line = ""
else:
if div_line:
div_lines.append(f"<div class='div_line'>{div_line}</div>")
graph_block_html += f"<h6>x={x_i}</h6>" + "".join(div_lines)
graph_block_html_all += graph_block_html
return graph_block_html_all
def _generate_graph_eigenvalues_seq_3loop(
estimation_results: List["EstimationResult"],
num_data: List[int],
case_id: int,
true_object: "QOperation",
) -> list:
fig_info_list3 = []
for num_data_index in range(len(num_data)):
fig_list_list = physicality_violation_check.make_graphs_eigenvalues(
estimation_results,
true_object,
num_data=num_data,
num_data_index=num_data_index,
)
fig_info_list2 = []
for x_i, fig_list in enumerate(fig_list_list):
fig_info_list = []
for i, fig in enumerate(fig_list):
fig_name = (
f"case={case_id}_eigenvalues_num={num_data_index}_x={x_i}_i={i}"
)
fig.update_layout(width=_col2_fig_width, height=_col2_fig_height)
path = _save_fig_to_tmp_dir(fig, fig_name)
fig_info = dict(
image_path=path,
fig=fig,
fig_name=fig_name,
num=num_data[num_data_index],
x=x_i,
i=i,
)
fig_info_list.append(fig_info)
fig_info_list2.append(fig_info_list)
fig_info_list3.append(fig_info_list2)
return fig_info_list3
def generate_eigenvalues_div(
estimation_results: List["EstimationResult"],
num_data: List[int],
case_id: int,
true_object: "QOperation",
):
if type(true_object) == State:
fig_info_list_list = _generate_graph_eigenvalues_seq(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
vals = true_object.calc_eigenvalues()
col_n = 2 if len(vals) <= 2 else 4
div_html = _generate_eigenvalues_div(fig_info_list_list, col_n=col_n)
elif type(true_object) == Povm:
fig_info_list3 = _generate_graph_eigenvalues_seq_3loop(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
vals = true_object.calc_eigenvalues()
col_n = 2 if len(vals[0]) <= 2 else 4
div_html = _generate_eigenvalues_div_3loop(fig_info_list3, col_n=col_n)
elif type(true_object) == Gate:
fig_info_list_list = _generate_graph_eigenvalues_seq(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
v, _ = np.linalg.eig(true_object.to_choi_matrix())
col_n = 2 if len(v) <= 2 else 4
div_html = _generate_eigenvalues_div(fig_info_list_list, col_n=col_n)
elif type(true_object) == MProcess:
fig_info_list3 = _generate_graph_eigenvalues_seq_3loop(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
v, _ = np.linalg.eig(true_object.to_choi_matrix(0))
col_n = 2 if len(v) <= 2 else 4
div_html = _generate_eigenvalues_div_3loop(fig_info_list3, col_n=col_n)
else:
error_message = f"The type of true_object must be Status, Povm, Gate, or MProcess, not {type(true_object)}."
raise TypeError(error_message)
return div_html
def _generate_graph_sum_eigenvalues_seq(
estimation_results: List["EstimationResult"],
num_data: List[int],
case_id: int,
true_object,
) -> List[List[dict]]:
fig_info_list_list = []
for num_data_index in range(len(num_data)):
fig_list = physicality_violation_check.make_graphs_sum_unphysical_eigenvalues(
estimation_results,
num_data=num_data,
num_data_index=num_data_index,
)
n_unphysical = physicality_violation_check.calc_unphysical_qobjects_n(
estimation_results, num_data_index=num_data_index
)
fig_info_list = []
for i, fig in enumerate(fig_list):
fig_name = f"case={case_id}_sum-unphysical-eigenvalues_num={num_data_index}_type={i}"
fig.update_layout(width=_col2_fig_width, height=_col2_fig_height)
path = _save_fig_to_tmp_dir(fig, fig_name)
fig_info_list.append(
dict(
image_path=path,
fig=fig,
fig_name=fig_name,
num=num_data[num_data_index],
n_unphysical=n_unphysical,
)
)
fig_info_list_list.append(fig_info_list)
return fig_info_list_list
def _generate_sum_eigenvalues_div(fig_info_list_list: List[List[dict]]) -> str:
graph_block_html_all = ""
col_n = len(fig_info_list_list[0])
css_class = "box" if col_n <= 2 else "box_col4"
for fig_info_list in fig_info_list_list:
num = fig_info_list[0]["num"]
n_unphysical = fig_info_list[0]["n_unphysical"]
graph_block_html = (
f"<h5>N={num}<br>Number of unphysical estimates={n_unphysical}</h5>"
)
for fig_info in fig_info_list:
graph_subblock = (
f"<div class='{css_class}'><img src={fig_info['image_path']}></div>"
)
graph_block_html += graph_subblock
graph_block_html_all += f"<div class='div_line'>{graph_block_html}</div>"
return graph_block_html_all
def generate_sum_eigenvalues_div(
estimation_results: List["EstimationResult"],
num_data: List[int],
case_id: int,
true_object,
):
fig_info_list_list = _generate_graph_sum_eigenvalues_seq(
estimation_results, num_data=num_data, case_id=case_id, true_object=true_object
)
div_html = _generate_sum_eigenvalues_div(fig_info_list_list)
return div_html
def _calc_legend_y(num_legend):
return -0.07 * num_legend - 0.1
def generate_mse_analytical_div(
estimation_results_list: List[List[EstimationResult]],
true_object: "QOperation",
estimator_list: list,
num_data: List[int],
qtomography_list,
) -> str:
figs = data_analysis.make_mses_graph_analytical(
estimation_results_list=estimation_results_list,
true_object=true_object,
estimator_list=estimator_list,
num_data=num_data,
qtomography_list=qtomography_list,
)
mse_div_list = []
for i, fig in enumerate(figs):
fig_name = f"mse_analytical_{i}"
fig.update_layout(width=600, height=600)
num_legend = len(fig.data)
legend_y = _calc_legend_y(num_legend)
fig.update_layout(
legend=dict(yanchor="bottom", y=legend_y, xanchor="left", x=0)
)
path = _save_fig_to_tmp_dir(fig, fig_name)
if i % 2 == 0:
mse_div_list.append("<div>")
mse_div = f"<div class='box'><img src='{path}'></div>"
mse_div_list.append(mse_div)
if i % 2 == 1 or i + 1 == len(figs):
mse_div_list.append("</div>")
return "".join(mse_div_list)
def generate_empi_dist_mse_div(
simulation_result: SimulationResult,
true_object: "QOperation",
) -> str:
# fig = data_analysis.make_empi_dists_mse_graph(
# estimation_results_list[0], true_object
# )
fig = data_analysis.make_empi_dists_mse_graph(simulation_result, true_object)
fig_name = f"empi_dists_mse"
path = _save_fig_to_tmp_dir(fig, fig_name)
div = f"<img src='{path}'>"
return div
def _convert_object_to_datafrane(qoperation: "QOperation") -> pd.DataFrame:
values = []
max_line_width = 120
for value in qoperation._info().values():
if type(value) == list and type(value[0]) == np.ndarray:
lines_list = []
for a_array in value:
lines_list.append(np.array_str(a_array, max_line_width=max_line_width))
text = "\n,\n".join(lines_list).replace("\n", "<br>")
elif type(value) == np.ndarray:
text = np.array_str(value, max_line_width=max_line_width).replace(
"\n", "<br>"
)
else:
text = value.__str__().replace("\n", "<br>")
values.append(text)
item_names = qoperation._info().keys()
df = pd.DataFrame(values, item_names).rename(columns={0: "value"})
return df
def _convert_objects_to_multiindex_dataframe(
qoperations: List["QOperation"],
) -> pd.DataFrame:
df_dict = {}
for i in range(len(qoperations)):
df_dict[i] = _convert_object_to_datafrane(qoperations[i])
objects_df_multiindex = pd.concat(df_dict, axis=0)
return objects_df_multiindex
def _generate_physicality_violation_test_div_for_state(
estimation_results_list: List[List["EstimationResult"]],
num_data: List[int],
case_name_list: List[str],
true_object: State,
):
test_eq_const_divs = ""
test_ineq_const_eigenvalues_divs = ""
test_ineq_const_sum_eigenvalues_divs = ""
for case_id, case_name in enumerate(case_name_list):
estimation_results = estimation_results_list[case_id]
# Test of equality constraint violation
div = generate_trace_div(estimation_results, num_data=num_data, case_id=case_id)
# <h5> is dummy
test_eq_const_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
<h5></h5>
{div}
"""
# Test of inequality constraint violation
div = generate_eigenvalues_div(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
test_ineq_const_eigenvalues_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
{div}
"""
div = generate_sum_eigenvalues_div(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
test_ineq_const_sum_eigenvalues_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
{div}
"""
eq_all_div = f"""
<h2>Test of equality constraint violation</h2>
{test_eq_const_divs}
"""
ineq_all_div = f"""
<h2>Test of inequality constraint violation</h2>
<h3>Eigenvalue</h3>
{test_ineq_const_eigenvalues_divs}
<h3>Sum of unphysical eigenvalues </h3>
{test_ineq_const_sum_eigenvalues_divs}
"""
return eq_all_div, ineq_all_div
def _generate_physicality_violation_test_div_for_povm(
estimation_results_list: List[List["EstimationResult"]],
num_data: List[int],
case_name_list: List[str],
true_object: Povm,
):
test_eq_const_divs = ""
test_ineq_const_eigenvalues_divs = ""
test_ineq_const_sum_eigenvalues_divs = ""
for case_id, case_name in enumerate(case_name_list):
estimation_results = estimation_results_list[case_id]
# Test of equality constraint violation
div = generate_sum_vecs_div(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
col_n=4,
)
# <h5> is dummy
test_eq_const_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
<h5></h5>
{div}
"""
# Test of inequality constraint violation
div = generate_eigenvalues_div(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
test_ineq_const_eigenvalues_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
{div}
"""
div = generate_sum_eigenvalues_div(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
test_ineq_const_sum_eigenvalues_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
{div}
"""
eq_all_div = f"""
<h2>Test of equality constraint violation</h2>
{test_eq_const_divs}
"""
ineq_all_div = f"""
<h2>Test of inequality constraint violation</h2>
<h3>Eigenvalue</h3>
{test_ineq_const_eigenvalues_divs}
<h3>Sum of unphysical eigenvalues </h3>
{test_ineq_const_sum_eigenvalues_divs}
"""
return eq_all_div, ineq_all_div
def _generate_physicality_violation_test_div_for_gate(
estimation_results_list: List[List["EstimationResult"]],
num_data: List[int],
case_name_list: List[str],
true_object: State,
):
test_eq_const_divs = ""
test_eq_const_error_sum_divs = ""
test_ineq_const_eigenvalues_divs = ""
test_ineq_const_sum_eigenvalues_divs = ""
for case_id, case_name in enumerate(case_name_list):
estimation_results = estimation_results_list[case_id]
# Test of equality constraint violation
div = generate_fig_list_list_div(
estimation_results=estimation_results,
case_id=case_id,
fig_type="physicality-violation-eq-trace-error",
make_graphs_func=physicality_violation_check.make_graphs_trace_error,
col_n=4,
num_data=num_data,
)
# <h5> is dummy
test_eq_const_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
<h5></h5>
{div}
"""
num_data_len = len(num_data)
col_n = num_data_len if num_data_len <= 4 else 4
div = generate_figs_div(
func=_make_fig_info_list,
estimation_results=estimation_results,
case_id=case_id,
fig_type="physicality-violation-eq-trace-sum-error",
size=(_col2_fig_width, _col2_fig_height),
make_graphs_func=physicality_violation_check.make_graphs_trace_error_sum,
col_n=col_n,
num_data=num_data,
)
test_eq_const_error_sum_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
<h5></h5>
{div}
"""
# Test of inequality constraint violation
div = generate_eigenvalues_div(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
test_ineq_const_eigenvalues_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
{div}
"""
div = generate_sum_eigenvalues_div(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
test_ineq_const_sum_eigenvalues_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
{div}
"""
eq_all_div = f"""
<h2>Test of equality constraint violation</h2>
<h3>Error</h3>
{test_eq_const_divs}
<h3>Route Squared of Error</h3>
{test_eq_const_error_sum_divs}
"""
ineq_all_div = f"""
<h2>Test of inequality constraint violation</h2>
<h3>Eigenvalue</h3>
{test_ineq_const_eigenvalues_divs}
<h3>Sum of unphysical eigenvalues </h3>
{test_ineq_const_sum_eigenvalues_divs}
"""
return eq_all_div, ineq_all_div
def _generate_physicality_violation_test_div_for_mprocess(
estimation_results_list: List[List["EstimationResult"]],
num_data: List[int],
case_name_list: List[str],
true_object: State,
):
test_eq_const_divs = ""
test_eq_const_error_sum_divs = ""
test_ineq_const_eigenvalues_divs = ""
test_ineq_const_sum_eigenvalues_divs = ""
for case_id, case_name in enumerate(case_name_list):
estimation_results = estimation_results_list[case_id]
# Test of equality constraint violation
div = generate_fig_list_list_div(
estimation_results=estimation_results,
case_id=case_id,
fig_type="physicality-violation-eq-trace-error",
make_graphs_func=physicality_violation_check.make_graphs_trace_error,
col_n=4,
num_data=num_data,
)
# <h5> is dummy
test_eq_const_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
<h5></h5>
{div}
"""
num_data_len = len(num_data)
col_n = num_data_len if num_data_len <= 4 else 4
div = generate_figs_div(
func=_make_fig_info_list,
estimation_results=estimation_results,
case_id=case_id,
fig_type="physicality-violation-eq-trace-sum-error",
size=(_col2_fig_width, _col2_fig_height),
make_graphs_func=physicality_violation_check.make_graphs_trace_error_sum,
col_n=col_n,
num_data=num_data,
)
test_eq_const_error_sum_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
<h5></h5>
{div}
"""
# Test of inequality constraint violation
div = generate_eigenvalues_div(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
test_ineq_const_eigenvalues_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
{div}
"""
div = generate_sum_eigenvalues_div(
estimation_results,
num_data=num_data,
case_id=case_id,
true_object=true_object,
)
test_ineq_const_sum_eigenvalues_divs += f"""
<h4>Case {case_id}: {case_name}<h4>
{div}
"""
eq_all_div = f"""
<h2>Test of equality constraint violation</h2>
<h3>Error</h3>
{test_eq_const_divs}
<h3>Route Squared of Error</h3>
{test_eq_const_error_sum_divs}
"""
ineq_all_div = f"""
<h2>Test of inequality constraint violation</h2>
<h3>Eigenvalue</h3>
{test_ineq_const_eigenvalues_divs}
<h3>Sum of unphysical eigenvalues </h3>
{test_ineq_const_sum_eigenvalues_divs}
"""
return eq_all_div, ineq_all_div
def generate_physicality_violation_test_div(
estimation_results_list: List[List["EstimationResult"]],
case_name_list: List[str],
true_object: "QOperation",
num_data: List[int],
):
if type(true_object) == State:
(
true_all_div,
false_all_div,
) = _generate_physicality_violation_test_div_for_state(
estimation_results_list, num_data, case_name_list, true_object
)
elif type(true_object) == Povm:
(
true_all_div,
false_all_div,
) = _generate_physicality_violation_test_div_for_povm(
estimation_results_list, num_data, case_name_list, true_object
)
elif type(true_object) == Gate:
(
true_all_div,
false_all_div,
) = _generate_physicality_violation_test_div_for_gate(
estimation_results_list, num_data, case_name_list, true_object
)
elif type(true_object) == MProcess:
(
true_all_div,
false_all_div,
) = _generate_physicality_violation_test_div_for_mprocess(
estimation_results_list, num_data, case_name_list, true_object
)
else:
message = f"true_object must be State, Povm, or Gate, not {type(true_object)}"
raise TypeError(message)
physicality_violation_test_div = f"""
{true_all_div}
{false_all_div}
"""
return physicality_violation_test_div
def generate_case_table(
case_name_list: List["str"],
qtomography_list: List["QTomography"],
estimator_list: List["Estimator"],
):
para_list = [qtomo.on_para_eq_constraint for qtomo in qtomography_list]
case_dict = dict(
Name=case_name_list,
Param=para_list,
Tomography=[t.__class__.__name__ for t in qtomography_list],
Estimator=[
e.__class__.__name__.replace("Estimator", "") for e in estimator_list
],
)
case_df = pd.DataFrame(case_dict)
styles = [
dict(selector=".col0", props=[("width", "400px")]),
dict(selector=".col1", props=[("width", "180px")]),
dict(selector=".col2", props=[("width", "200px")]),
]
case_table = case_df.style.set_table_styles(styles).render()
return case_table
def generate_condition_table(
qtomography_list: List["QTomography"],
n_rep: int,
num_data: List[int],
seed: Optional[int],
) -> str:
type_tomography_values = list(
set([qt.__class__.__name__ for qt in qtomography_list])
)
info = {
"Type of tomography": type_tomography_values,
"Nrep": [n_rep],
"N": [num_data],
"RNG seed": [seed],
}
condition_df = pd.DataFrame(info).T
condition_table = condition_df.to_html(
classes="condition_table", escape=False, header=False
)
return condition_table
def generate_consistency_check_table(simulation_results: List[SimulationResult]):
check_results = []
simulation_settings = []
for sim_result in simulation_results:
check_results.append(sim_result.check_result)
simulation_settings.append(sim_result.simulation_setting)
qtomography_list = [sim_result.qtomography for sim_result in simulation_results]
result_list = []
para_list = [qtomo.on_para_eq_constraint for qtomo in qtomography_list]
if len(check_results) == len(simulation_results):
# Use the results of pre-run checks
def _extract_consistency_check_results(check_result: "CheckResult") -> dict:
for r in check_result["results"]:
if r["name"] == "Consistency":
return r["detail"]
result_list = [_extract_consistency_check_results(cr) for cr in check_results]
else:
# Execute Consistency Check
for sim_result in simulation_results:
sim_check = standard_qtomography_simulation_check.StandardQTomographySimulationCheck(
sim_result
)
result_dict = sim_check.execute_consistency_check(show_detail=False)
result_list.append(result_dict)
def _insert_white_space(text: str) -> str:
# If there is an upper case, insert a half-width space.
# Before: LossMinimization
# After: Loss Minimization
# Line breaks are not applied if there is no half-width space.
converted = text[0]
for char in text[1:]:
if char.isupper():
converted += f" {char}"
else:
converted += char
return converted
type_tomography_values = [
_insert_white_space(qt.__class__.__name__) for qt in qtomography_list
]
type_estimator_values = [
_insert_white_space(s.estimator.__class__.__name__.replace("Estimator", ""))
for s in simulation_settings
]
type_loss_values = [
_insert_white_space(s.loss.__class__.__name__) if s.loss else "None"
for s in simulation_settings
]
type_algo_values = [
_insert_white_space(s.algo.__class__.__name__) if s.algo else "None"
for s in simulation_settings
]
result_dict = {
"Name": [s.name for s in simulation_settings],
"Type of tomography": type_tomography_values,
"Param": para_list,
"Estimator": type_estimator_values,
"Loss": type_loss_values,
"Algo": type_algo_values,
"Squared Error to True": [
f"{r['squared_error_to_true']:.2e}" for r in result_list
],
"Possibly OK": [f"{'OK' if r['possibly_ok'] else 'NG'}" for r in result_list],
"To be checked": [
f"{'need debug' if r['to_be_checked'] else 'not need debug'}"
for r in result_list
],
}
styles = [
dict(selector=".col0", props=[("width", "400px"), ("font-size", "10px")]),
dict(selector=".col1", props=[("width", "250px"), ("font-size", "10px")]),
dict(selector=".col2", props=[("width", "120px"), ("font-size", "10px")]),
dict(selector=".col3", props=[("width", "200px"), ("font-size", "10px")]),
dict(selector=".col4", props=[("width", "300px"), ("font-size", "10px")]),
dict(selector=".col5", props=[("width", "300px"), ("font-size", "10px")]),
dict(selector=".col6", props=[("width", "150px"), ("font-size", "10px")]),
dict(selector=".col7", props=[("width", "150px"), ("font-size", "10px")]),
dict(selector=".col8", props=[("width", "150px"), ("font-size", "10px")]),
]
table_df = pd.DataFrame(result_dict)
consistency_check_table = table_df.style.set_table_styles(styles).render()
return consistency_check_table
def generate_computation_time_table(
estimation_results_list: List[List["EstimationResult"]],
) -> pd.DataFrame:
total_time = 0
for results in estimation_results_list:
total_time += sum([sum(r.computation_times) for r in results])
computation_time_text = "{0}".format(total_time / 60) + "min."
info = {
"Total": [computation_time_text],
}
computation_time_table = pd.DataFrame(info).T.to_html(
classes="computation_time_table", escape=False, header=False
)
return computation_time_table
def generate_tolerance_table_div() -> pd.DataFrame:
data = [
[
physicality_violation_check.get_eq_const_eps(True),
physicality_violation_check.get_eq_const_eps(False),
],
[physicality_violation_check.get_ineq_const_eps()] * 2,
]
first_index = "Tolerance at physicality violation test"
index = [[first_index] * 2, ["equality constraint", "inequality constraint"]]
columns = ["True", "False"]
df = pd.DataFrame(data, index=index, columns=columns)
df = df.applymap(lambda x: f"{x:.2e}")
styles = [
dict(selector=".col0", props=[("width", "100px")]),
dict(selector=".col1", props=[("width", "100px")]),
]
tolerance_table = df.style.set_table_styles(styles).render()
tolerance_table_div = f"""
<h1>Tolerance of physicality constraint violation</h1>
<div>
{tolerance_table}
</div>
"""
return tolerance_table_div
def _make_graphs_mses(make_graphs_func, mse_type: "str", **kwargs) -> list:
figs = make_graphs_func(**kwargs)
fig_info_list = []
for i, fig in enumerate(figs):
fig_name = f"mse_type={mse_type}_{i}"
fig.update_layout(width=600, height=600)
num_legend = len(fig.data)
legend_y = _calc_legend_y(num_legend)
fig.update_layout(
legend=dict(yanchor="bottom", y=legend_y, xanchor="left", x=0)
)
path = _save_fig_to_tmp_dir(fig, fig_name)
fig_info_list.append(dict(image_path=path, fig=fig, fig_name=fig_name))
return fig_info_list
def _make_fig_info_list(
make_graphs_func, fig_type: "str", case_id: int = None, size=(600, 600), **kwargs
) -> list:
arg_names = make_graphs_func.__code__.co_varnames[
: make_graphs_func.__code__.co_argcount
]
new_kwargs = {k: v for k, v in kwargs.items() if k in arg_names}
figs = make_graphs_func(**new_kwargs)
fig_info_list = []
if type(figs) != list:
figs = [figs]
for i, fig in enumerate(figs):
fig_name = f"fig_type={fig_type}_{i}"
if case_id is not None:
fig_name = f"case={case_id}_{fig_name}"
fig.update_layout(width=size[0], height=size[1])
fig.update_layout(legend=dict(yanchor="bottom", y=-0.5, xanchor="left", x=0))
path = _save_fig_to_tmp_dir(fig, fig_name)
fig_info_list.append(dict(image_path=path, fig=fig, fig_name=fig_name))
return fig_info_list
def _make_fig_info_list_list(
estimation_results: List["EstimationResult"],
num_data: List[int],
case_id: int,
fig_type: str,
make_graphs_func,
**kwargs,
) -> List[List["Figure"]]:
fig_info_list_list = []
for num_data_index, num in enumerate(num_data):
func_parameter_names = make_graphs_func.__code__.co_varnames[
: make_graphs_func.__code__.co_argcount
]
if "num_data" in func_parameter_names:
figs = make_graphs_func(
estimation_results=estimation_results,
num_data=num_data,
num_data_index=num_data_index,
**kwargs,
)
else:
figs = make_graphs_func(
estimation_results=estimation_results,
num_data_index=num_data_index,
**kwargs,
)
if type(figs) != list:
figs = [figs]
fig_info_list = []
for alpha, fig in enumerate(figs):
fig_name = f"case={case_id}_{fig_type}_num={num}_alpha={alpha}"
fig.update_layout(width=_col2_fig_width, height=_col2_fig_height)
path = _save_fig_to_tmp_dir(fig, fig_name)
fig_info_list.append(
dict(image_path=path, fig=fig, fig_name=fig_name, num=num, alpha=alpha)
)
fig_info_list_list.append(fig_info_list)
return fig_info_list_list
def _generate_figs_div(fig_info_list: List[dict], col_n: int = 2) -> str:
graph_block_html = ""
subblock_list = []
css_class = "box" if col_n <= 2 else "box_col4"
for fig_info in fig_info_list:
graph_subblock = (
f"<div class='{css_class}'><img src={fig_info['image_path']}></div>"
)
subblock_list.append(graph_subblock)
div_line = ""
div_lines = []
for i, block in enumerate(subblock_list):
div_line += block
if i % col_n == col_n - 1:
div_lines.append(f"<div class='div_line'>{div_line}</div>")
div_line = ""
else:
if div_line:
div_lines.append(f"<div class='div_line'>{div_line}</div>")
graph_block_html = "".join(div_lines)
return graph_block_html
def generate_fig_list_list_div(
estimation_results: List["EstimationResult"],
num_data,
case_id,
fig_type: str,
make_graphs_func,
col_n: int = 2,
**kwargs,
):
fig_info_list_list = _make_fig_info_list_list(
estimation_results, num_data, case_id, fig_type, make_graphs_func, **kwargs
)
div_html = _generate_fig_info_list_list_div(fig_info_list_list, col_n=col_n)
return div_html
def generate_figs_div(func, **kwargs):
fig_info_list = func(**kwargs)
if "col_n" in kwargs:
col_n = kwargs["col_n"]
div_html = _generate_figs_div(fig_info_list, col_n=col_n)
else:
div_html = _generate_figs_div(fig_info_list)
return div_html
def generate_computation_time_of_estimators_table(
estimation_results_list, simulation_settings, unit: str = "sec"
) -> str:
def _generate_computation_time_df(
estimation_results: list, name, unit
) -> pd.DataFrame:
n_rep = len(estimation_results)
if unit == "min":
time_unit = 60
elif unit == "sec":
time_unit = 1
else:
raise ValueError("'unit' must be 'sec' or 'min'.")
num_list = []
mean_list = []
std_list = []
num_data = simulation_settings[0].num_data
for i, num in enumerate(num_data):
comp_times = [result.computation_times[i] for result in estimation_results]
num_list.append(num)
mean_list.append(np.mean(comp_times) / time_unit)
std_list.append(np.std(comp_times) / time_unit)
data_dict = {
"Name": [name] + [' " '] * (len(num_list) - 1),
"N": num_list,
"Nrep": [n_rep] * len(num_list),
f"Mean ({unit})": mean_list,
f"Std ({unit})": std_list,
}
time_df =
|
pd.DataFrame(data_dict)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pickle
import warnings
from tqdm import tqdm
import time
from collections import defaultdict
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
train = pd.read_csv('../IA2-train.csv')
dev =
|
pd.read_csv('../IA2-dev.csv')
|
pandas.read_csv
|
import os.path
from typing import Tuple, Dict
import pandas
from django.conf import settings
from django.contrib.admin import ModelAdmin, register
from django.forms import ModelForm, FileField, FileInput
from django.shortcuts import redirect
from faker import Faker
from common.models import Example, Client, IdUpload, Template
from common.ocr import ocr
class BaseModelAdmin(ModelAdmin):
list_filter: Tuple = ("created", "modified")
readonly_fields: Tuple = ("created", "modified")
class SlugableModelAdmin(ModelAdmin):
prepopulated_fields: Dict[str, Tuple] = {"slug": ("name",)}
CREATED_MODIFIED = (
"Created / Modified",
{
"fields": ("created", "modified"),
"description": "Info about the time this entry was added here or updated",
},
)
@register(Example)
class ExampleAdmin(BaseModelAdmin):
fieldsets = (
(None, {"fields": ("name", "status", "status_changed", "published_at")}),
CREATED_MODIFIED,
)
list_display = ("name", "status", "status_changed", "published_at")
list_editable = ("status",)
readonly_fields = BaseModelAdmin.readonly_fields + (
"status_changed",
"published_at",
)
@register(Client)
class ClientsAdmin(BaseModelAdmin):
fieldsets = (
(
"Date client",
{
"fields": (
"first_name",
"last_name",
"cnp",
"residence",
"birthday",
"id_series",
"id_number",
"id_emitted_by",
"id_emitted_at",
"registration_number",
"face",
"back",
"template",
"generated_doc",
)
},
),
(
"Chitanță",
{
"fields": (
"cost",
"tax",
"receipt_series",
"receipt_number",
"receipt",
)
},
),
CREATED_MODIFIED,
)
readonly_fields = ("generated_doc", "receipt") + BaseModelAdmin.readonly_fields
list_display = ("__str__", "template", "generated_doc", "receipt")
@register(IdUpload)
class IdUploadAdmin(ModelAdmin):
def has_change_permission(self, request, obj=None) -> bool:
return False
def has_view_permission(self, request, obj=None) -> bool:
return False
def save_model(self, request, obj: IdUpload, form, change: bool) -> None:
client = Client()
fake = Faker()
result = ocr(
os.path.join(settings.MEDIA_ROOT, obj.face.name),
os.path.join(settings.BASE_DIR, "templates", "template.jpeg"),
)
client.first_name = result["first_name"]
client.last_name = result["last_name"]
client.birthday = pandas.to_datetime(result["birthday"]).date()
client.id_emitted_at =
|
pandas.to_datetime(result["id_emitted_at"])
|
pandas.to_datetime
|
import pandas as pd
from scripts.python.routines.manifest import get_manifest
import numpy as np
from sklearn.linear_model import ElasticNetCV
from sklearn.model_selection import RepeatedKFold
from sklearn.metrics import mean_squared_error, mean_absolute_error
from scipy.stats import mannwhitneyu
import statsmodels.formula.api as smf
import plotly.graph_objects as go
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.scatter import add_scatter_trace
from scripts.python.routines.plot.violin import add_violin_trace
from scripts.python.routines.plot.layout import add_layout
import os
from scripts.python.routines.filter.pheno import filter_pheno
platform = "GPL13534"
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_train = ["GSE84727", "GSE147221", "GSE125105", "GSE111629", "GSE128235", "GSE72774", "GSE53740", "GSE144858"]
datasets_test = ["GSE147221", "GSE84727", "GSE125105", "GSE111629", "GSE128235", "GSE72774", "GSE53740", "GSE144858", "GSE42861", "GSE87648", "GSE106648"]
dnam_acc_type = 'DNAmGrimAgeAcc'
target = f"Age_Status"
path_save = f"{path}/{platform}/combo/EWAS/meta/{target}"
if not os.path.exists(f"{path_save}/clock"):
os.makedirs(f"{path_save}/clock")
manifest = get_manifest(platform)
pheno_all = pd.DataFrame(columns=['Age', 'Status'])
pheno_all.index.name = 'subject_id'
for d_id, dataset in enumerate(datasets_train):
print(dataset)
status_col = get_column_name(dataset, 'Status').replace(' ', '_')
age_col = get_column_name(dataset, 'Age').replace(' ', '_')
sex_col = get_column_name(dataset, 'Sex').replace(' ', '_')
status_dict = get_status_dict(dataset)
status_vals = sorted(list(status_dict.values()))
status_names_dict = get_status_names_dict(dataset)
sex_dict = get_sex_dict(dataset)
continuous_vars = {'Age': age_col}
categorical_vars = {status_col: status_dict, sex_col: sex_dict}
pheno = pd.read_pickle(f"{path}/{platform}/{dataset}/pheno_xtd.pkl")
pheno = filter_pheno(dataset, pheno, continuous_vars, categorical_vars)
betas = pd.read_pickle(f"{path}/{platform}/{dataset}/betas.pkl")
na_cols = betas.columns[betas.isna().any()].tolist()
if len(na_cols) > 0:
print(f"CpGs with NaNs in {dataset}: {na_cols}")
s = betas.stack(dropna=False)
na_pairs = [list(x) for x in s.index[s.isna()]]
print(*na_pairs, sep='\n')
betas.dropna(axis='columns', how='any', inplace=True)
df = pd.merge(pheno, betas, left_index=True, right_index=True)
pheno = df[[age_col, status_col]]
status_dict_inverse = dict((v, k) for k, v in status_dict.items())
pheno.loc[:, status_col] = pheno[status_col].map(status_dict_inverse)
pheno.rename(columns={age_col: 'Age', status_col: 'Status'}, inplace=True)
pheno_all = pheno_all.append(pheno, verify_integrity=True)
cpgs = betas.columns.values
betas = df[cpgs].T
if d_id == 0:
betas_all = betas
else:
betas_all = betas_all.merge(betas, how='inner', left_index=True, right_index=True)
betas_all = betas_all.T
betas_all.index.name = "subject_id"
df_all = pd.merge(pheno_all, betas_all, left_index=True, right_index=True)
with open(f"cpgs.txt") as f:
cpgs_target = f.read().splitlines()
cpgs_target = set.intersection(set(betas_all.columns.values), set(cpgs_target))
X_target = df_all.loc[df_all['Status'] == 'Control', cpgs_target].to_numpy()
y_target = df_all.loc[df_all['Status'] == 'Control', 'Age'].to_numpy()
X_all = df_all.loc[:, cpgs_target].to_numpy()
y_all = df_all.loc[:, 'Age'].to_numpy()
cv = RepeatedKFold(n_splits=5, n_repeats=5, random_state=1337)
model = ElasticNetCV(n_alphas=20, cv=cv, n_jobs=2, verbose=1)
model.fit(X_target, y_target)
model_dict = {'feature': ['Intercept'], 'coef': [model.intercept_]}
num_features = 0
for cpg_id, cpg in enumerate(cpgs_target):
coef = model.coef_[cpg_id]
if abs(coef) > 0:
model_dict['feature'].append(cpg)
model_dict['coef'].append(coef)
num_features += 1
model_df = pd.DataFrame(model_dict)
if not os.path.exists(f"{path_save}/clock/{num_features}"):
os.makedirs(f"{path_save}/clock/{num_features}")
model_df.to_excel(f"{path_save}/clock/{num_features}/clock.xlsx", index=False)
metrics_dict = {'alpha': model.alpha_, 'l1_ratio': model.l1_ratio_, 'num_features': num_features}
y_target_pred = model.predict(X_target)
metrics_dict['R2_Control'] = model.score(X_target, y_target)
metrics_dict['RMSE_Control'] = np.sqrt(mean_squared_error(y_target_pred, y_target))
metrics_dict['MAE_Control'] = mean_absolute_error(y_target_pred, y_target)
y_all_pred = model.predict(X_all)
metrics_dict['R2_All'] = model.score(X_all, y_all)
metrics_dict['RMSE_All'] = np.sqrt(mean_squared_error(y_all_pred, y_all))
metrics_dict['MAE_All'] = mean_absolute_error(y_all_pred, y_all)
metrics_df = pd.DataFrame(metrics_dict, index=[0])
metrics_df.to_excel(f"{path_save}/clock/{num_features}/metrics.xlsx", index=False)
pheno_all[f'AgeEST'] = y_all_pred
formula = f"AgeEST ~ Age"
reg = smf.ols(formula=formula, data=pheno_all.loc[pheno_all['Status'] == 'Control', :]).fit()
res_dict = {'R2': reg.rsquared, 'R2_adj': reg.rsquared_adj}
res_dict['RMSE'] = np.sqrt(mean_squared_error(reg.fittedvalues.values, pheno_all.loc[pheno_all['Status'] == 'Control', 'Age'].values))
res_dict['MAE'] = mean_absolute_error(reg.fittedvalues.values, pheno_all.loc[pheno_all['Status'] == 'Control', 'Age'].values)
pheno_all['Acceleration'] = pheno_all[f'AgeEST'] - reg.predict(pheno_all)
pheno_all.to_excel(f"{path_save}/clock/{num_features}/pheno.xlsx", index=True)
scatter = go.Figure()
add_scatter_trace(scatter, pheno_all.loc[pheno_all['Status'] == 'Case', 'Age'].values, pheno_all.loc[pheno_all['Status'] == 'Case', 'AgeEST'].values, 'Case')
add_scatter_trace(scatter, pheno_all.loc[pheno_all['Status'] == 'Control', 'Age'].values, pheno_all.loc[pheno_all['Status'] == 'Control', 'AgeEST'].values, 'Control')
add_scatter_trace(scatter, pheno_all.loc[pheno_all['Status'] == 'Control', 'Age'].values, reg.fittedvalues.values, "", "lines")
add_layout(scatter, "Age", "AgeEST", f"Control: R2: {res_dict['R2']:0.2f}, RMSE: {res_dict['RMSE']:0.2f}, MAE: {res_dict['MAE']:0.2f}")
scatter.update_layout({'colorway': ['red', 'blue', 'blue']})
save_figure(scatter, f"{path_save}/clock/{num_features}/scatter_Age_AgeEST")
statistic, pvalue = mannwhitneyu(pheno_all.loc[pheno_all['Status'] == 'Control', 'Acceleration'].values, pheno_all.loc[pheno_all['Status'] == 'Case', 'Acceleration'].values)
res_dict['MW_statistic'] = statistic
res_dict['MW_pvalue'] = pvalue
res_df = pd.DataFrame(res_dict, index=[0])
res_df.to_excel(f"{path_save}/clock/{num_features}/res.xlsx", index=False)
box = go.Figure()
add_violin_trace(box, pheno_all.loc[pheno_all['Status'] == 'Control', 'Acceleration'].values, 'Control')
add_violin_trace(box, pheno_all.loc[pheno_all['Status'] == 'Case', 'Acceleration'].values, 'Case')
add_layout(box, "", 'Acceleration', f"p-val = {pvalue:0.4e}")
box.update_layout({'colorway': ['blue', 'red']})
save_figure(box, f"{path_save}/clock/{num_features}/box_Acceleration")
for d_id, dataset in enumerate(datasets_test):
print(dataset)
status_col = get_column_name(dataset, 'Status').replace(' ', '_')
age_col = get_column_name(dataset, 'Age').replace(' ', '_')
sex_col = get_column_name(dataset, 'Sex').replace(' ', '_')
status_dict = get_status_dict(dataset)
status_vals = sorted(list(status_dict.values()))
status_names_dict = get_status_names_dict(dataset)
sex_dict = get_sex_dict(dataset)
continuous_vars = {'Age': age_col}
categorical_vars = {status_col: status_dict, sex_col: sex_dict}
pheno = pd.read_pickle(f"{path}/{platform}/{dataset}/pheno_xtd.pkl")
pheno = filter_pheno(dataset, pheno, continuous_vars, categorical_vars)
betas =
|
pd.read_pickle(f"{path}/{platform}/{dataset}/betas.pkl")
|
pandas.read_pickle
|
import pandas as pd
import datetime
import subprocess
import localModuleForMinpou
#----
def atting_program(row):
recorder_for_minpou_command_line = 'python '+localModuleForMinpou.RECORDER_FOR_MINPOU+' "{0}" "{1}" "{2}" "{3}" "{4}" "{5}" "{6}"'.format(
row.station_id,
int((row.air_time + datetime.timedelta(seconds=localModuleForMinpou.MARGIN_SECOND*2)).total_seconds()),
row.start_time.strftime('%Y'),
row.station_name,
row.title,
row.image_url,
row.start_time
)
at_launch_time = row.start_time - datetime.timedelta(seconds=localModuleForMinpou.MARGIN_SECOND)
command_line = "echo 'sleep {0}; {1}' | at -t {2}".format(
at_launch_time.strftime('%S'),
recorder_for_minpou_command_line,
at_launch_time.strftime('%Y%m%d%H%M'),
)
res = subprocess.check_output(command_line, shell=True)
# print(command_line)
#----
table = pd.read_csv(localModuleForMinpou.TABLE_FILE)
table['start_time'] = pd.to_datetime(table['start_time'])
table['end_time'] = pd.to_datetime(table['end_time'])
table['air_time'] =
|
pd.to_timedelta(table['air_time'])
|
pandas.to_timedelta
|
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_to_hdf_errors(format, setup_path):
data = ["\ud800foo"]
ser = Series(data, index=Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_create_table_index(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append("f2", df, index=["string"], data_columns=["string", "string2"])
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
msg = "cannot create table index on a Fixed format store"
with pytest.raises(TypeError, match=msg):
store.create_table_index("f2")
def test_create_table_index_data_columns_argument(setup_path):
# GH 28156
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
msg = "'Cols' object has no attribute 'string2'"
with pytest.raises(AttributeError, match=msg):
col("f", "string2").is_indexed
# try to index a col which isn't a data_column
msg = (
"column string2 is not a data_column.\n"
"In order to read column string2 you must reload the dataframe \n"
"into HDFStore and include string2 with the data_columns argument."
)
with pytest.raises(AttributeError, match=msg):
store.create_table_index("f", columns=["string2"])
def test_mi_data_columns(setup_path):
# GH 14435
idx = MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_table_mixed_dtypes(setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_calendar_roundtrip_issue(setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_remove(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_same_name_scoping(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(20, 2), index=date_range("20130101", periods=20))
store.put("df", df, format="table")
expected = df[df.index > Timestamp("20130105")]
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
# changes what 'datetime' points to in the namespace where
# 'select' does the lookup
from datetime import datetime # noqa:F401
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_store_index_name(setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(table_format, setup_path):
# GH #13492
idx = Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@pytest.mark.filterwarnings("ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning")
def test_overwrite_node(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts =
|
tm.makeTimeSeries()
|
pandas._testing.makeTimeSeries
|
from math import sqrt
import numpy as np
import pandas as pd
from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import Classes.Configurations as cfg
import os
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
def partial_leasts_square_regression(x, y, train_split_percentage, file_name):
# Define X and Y matrix after cleaned by PCA and Mahalanobis distance
x_df = pd.DataFrame(x)
y_df = pd.DataFrame(y)
# split data to train and test
x_test, x_train, y_test, y_train = train_test_split(x_df, y_df, test_size=train_split_percentage, random_state=0)
# Train one PLS model for each Y parameter
parameters = len(y_df.columns)
models = []
rmsec = []
r2cal = []
rmsecv = []
r2cv = []
for i in range(parameters):
if cfg.sigma_detection:
x_sigma, y_sigma = do_sigma_pls(x_df, y_df.iloc[:, i], train_split_percentage)
if cfg.polarization_test:
x_sigma_r, y_sigma_r = polarization_reducer_by_amplitude_groups(x_sigma, y_sigma)
i_model, i_rmsec, i_r2c, i_rmsecv, i_r2cv = do_pls(x_sigma_r, y_sigma_r, train_split_percentage)
models.append(i_model)
rmsec.append(i_rmsec)
r2cal.append(i_r2c)
rmsecv.append(i_rmsecv)
r2cv.append(i_r2cv)
med_x_pred_polarized(x_sigma_r, y_sigma_r, i_rmsec, i_r2c, i_rmsecv, i_r2cv, i_model,
file_name + '\Figures\sigma_' + y_df.columns[i], i + 200, y_df.columns[i])
sigma_data_to_excel(file_name + '\SigmaReport_' + y_df.columns[i],
pd.concat([x_sigma_r, y_sigma_r], axis=1, sort=False))
else:
i_model, i_rmsec, i_r2c, i_rmsecv, i_r2cv = do_pls(x_sigma, y_sigma, train_split_percentage)
models.append(i_model)
rmsec.append(i_rmsec)
r2cal.append(i_r2c)
rmsecv.append(i_rmsecv)
r2cv.append(i_r2cv)
med_x_pred_sigma(x_sigma, y_sigma, i_rmsec, i_r2c, i_rmsecv, i_r2cv, i_model, file_name + '\Figures\sigma_' + y_df.columns[i], i + 200, y_df.columns[i])
sigma_data_to_excel(file_name + '\SigmaReport_' + y_df.columns[i], pd.concat([x_sigma, y_sigma], axis=1, sort=False))
else:
if cfg.polarization_test:
x_df_r, y_df_r = polarization_reducer_by_amplitude_groups(x_df, y_df.iloc[:, i])
i_model, i_rmsec, i_r2c, i_rmsecv, i_r2cv = do_pls(x_df_r, y_df_r, train_split_percentage)
models.append(i_model)
rmsec.append(i_rmsec)
r2cal.append(i_r2c)
rmsecv.append(i_rmsecv)
r2cv.append(i_r2cv)
med_x_pred_polarized(x_df_r, y_df_r, i_rmsec, i_r2c, i_rmsecv, i_r2cv, i_model,
file_name + '\Figures\polarized_' + y_df.columns[i], i + 200, y_df.columns[i])
else:
i_model, i_rmsec, i_r2c, i_rmsecv, i_r2cv = do_pls(x_df, y_df.iloc[:, i], train_split_percentage)
models.append(i_model)
rmsec.append(i_rmsec)
r2cal.append(i_r2c)
rmsecv.append(i_rmsecv)
r2cv.append(i_r2cv)
df_models_summary = pd.DataFrame(
pd.concat([pd.DataFrame(list(y_df.columns)), pd.DataFrame(rmsec), pd.DataFrame(r2cal), pd.DataFrame(rmsecv), pd.DataFrame(r2cv)], axis=1))
s = pd.Series(['Parameter', 'RMSEC', 'R2CAL', 'RMSECV', 'R2CV'])
df_models_summary = df_models_summary.transpose().set_index(s)
df_y_resume = pd.DataFrame(y_df.describe().dropna())
df_indexes = pd.DataFrame(pd.concat([df_y_resume, df_models_summary], axis=0))
return df_indexes, df_models_summary, df_y_resume, models, x_train, y_train, x_test, y_test
def do_pls(data_x, data_y, train_split_percentage):
latent_variables = []
x_test, x_train, y_test, y_train = train_test_split(data_x, data_y, test_size=train_split_percentage, random_state=0)
for i in range(20):
pls = PLSRegression(n_components=(i + 1), scale=True)
pls.fit(x_train, y_train)
predicted_cv_y = pls.predict(x_test)
mean_squared_error_cv = sqrt(mean_squared_error(y_test, predicted_cv_y))
latent_variables.append(mean_squared_error_cv)
best_factor = np.argmin(latent_variables)
pls2 = PLSRegression(n_components=(best_factor + 1), scale=True)
pls2.fit(x_train, y_train)
predicted_cal = pls2.predict(x_train)
rmsec = sqrt(mean_squared_error(y_train, predicted_cal))
r2c = pls2.score(x_train, y_train)
predicted_cv_y = pls2.predict(x_test)
rmsecv = sqrt(mean_squared_error(y_test, predicted_cv_y))
r2v = pls2.score(x_test, y_test)
plsfinal = PLSRegression(n_components=(best_factor + 1), scale=True)
plsfinal.fit(data_x, data_y)
return plsfinal, rmsec, r2c, rmsecv, r2v
def do_sigma_pls(data_x, data_y, train_split_percentage):
latent_variables = []
x_test, x_train, y_test, y_train = train_test_split(data_x, data_y, test_size=train_split_percentage, random_state=0)
for i in range(20):
pls = PLSRegression(n_components=(i + 1), scale=True)
pls.fit(x_train, y_train)
predicted_cv_y = pls.predict(x_test)
mean_squared_error_cv = sqrt(mean_squared_error(y_test, predicted_cv_y))
latent_variables.append(mean_squared_error_cv)
best_factor = np.argmin(latent_variables)
pls_sigma = PLSRegression(n_components=(best_factor + 1), scale=True)
pls_sigma.fit(data_x, data_y)
predicted_cv_y_sigma = pd.DataFrame(pls_sigma.predict(data_x))
data_labels = pd.DataFrame(data_y.index)
data_x = pd.DataFrame(data_x).reset_index(drop=True)
data_y = pd.DataFrame(data_y).reset_index(drop=True)
if cfg.sigma_percentage:
percentual_error = pd.DataFrame(abs(data_y.iloc[:, 0] - predicted_cv_y_sigma.iloc[:, 0]))
percentual_error = pd.DataFrame((percentual_error.iloc[:, 0] * 100) / data_y.iloc[:, 0])
df_x = pd.DataFrame(pd.DataFrame(pd.concat([data_x, percentual_error], axis=1)))
df_x = df_x.drop(df_x[df_x.iloc[:, -1] > cfg.sigma_confidence].index)
df_x.drop(df_x.columns[len(df_x.columns) - 1], axis=1, inplace=True)
df_y = pd.DataFrame(pd.DataFrame(pd.concat([data_y, data_labels, percentual_error], axis=1)))
df_y = df_y.drop(df_y[df_y.iloc[:, -1] > cfg.sigma_confidence].index)
df_x.set_index(df_y.iloc[:, 1], inplace=True)
df_y.set_index(df_x.index, inplace=True)
df_y.drop(df_y.columns[len(df_y.columns) - 1], axis=1, inplace=True)
return df_x, df_y
else:
abs_error = pd.DataFrame(abs(data_y.iloc[:, 0] - predicted_cv_y_sigma.iloc[:, 0]))
df_x = pd.DataFrame(pd.DataFrame(pd.concat([data_x, abs_error], axis=1)))
df_x = df_x.drop(df_x[df_x.iloc[:, -1] > cfg.sigma_confidence].index)
df_x.drop(df_x.columns[len(df_x.columns) - 1], axis=1, inplace=True)
df_y = pd.DataFrame(pd.DataFrame(
|
pd.concat([data_y, abs_error], axis=1)
|
pandas.concat
|
from pathlib import Path
import pandas
import pytest
from muller import dataio
from muller.inheritance import areascore, polygon
from tests import filenames
FOLDER_DATA = Path(__file__).parent.parent / "data"
@pytest.fixture
def trajectory_table() -> pandas.DataFrame:
filename_table = filenames.fake_tables['generic.model.area']
return dataio.import_table(filename_table, sheet_name = "data", index = 'Trajectory')
@pytest.mark.parametrize(
"key, expected",
[
('A', [(0, 0), (1, .1), (2, .1), (3, .2), (4, .2), (5, .3), (6, .3), (6, 0)]),
('B', [(0, 0.0)] + [(i, .1) for i in range(7)] + [(6, 0.0)]),
('C', [(0, 0), (0, 1), (1, .9), (2, .8), (3, .7), (4, .6), (5, .5), (6, .4), (6, 0)]),
('D', [(0, 0), (0, .1), (1, .2), (2, .3), (3, .4), (4, .5), (5, .6), (6, .7), (6, 0)]),
('E', [(0, 0.0), (1, 0.0001), (2, 0.2), (3, .1), (4, 0.0001), (5, 0.0001), (6, 0.0)]),
('F', [(0, 0.0), (1, 0.0001), (2, 0.0001), (3, 0.0001), (4, 0.0001), (5, 0.0001), (6, .1), (6, 0.0)])
]
)
def test_get_points(trajectory_table, key, expected):
series = trajectory_table.loc[key]
result = polygon.get_vertices(series)
assert result == expected
@pytest.mark.parametrize(
"data, expected",
[
([0, 1, 0, 0, .1, 0], [(0.0, 0), (1, 1.0), (2, polygon.MINIMUM), (3, polygon.MINIMUM), (4, 0.1), (5, 0)])
]
)
def test_get_vertices_again(data, expected):
result = polygon.get_vertices(data)
assert result == expected
def test_decompose_correct_split_series():
series = pandas.Series([0.97, 0.0, 0.97, 0.97, 0.97, 0.87, 0.97])
expected = [0.97, 0.001, 0.97, 0.97, 0.97, 0.87, 0.97]
result = polygon._decompose_correct_split_series(series)
assert result.tolist() == pytest.approx(expected)
@pytest.mark.parametrize(
"timepoint, previous, expected",
[
("A", True, "A"),
("B", True, "A"),
("C", True, "B"),
("C", False, "D"),
("F", False, "G"),
("G", False, "G")
]
)
def test_get_neighbor(timepoint, previous, expected):
index = pandas.Index(list("ABCDEFG"))
result = polygon.get_neighbor(index, timepoint, previous)
assert result == expected
@pytest.mark.parametrize(
"key, expected",
[
("A", 1.05), ("B", 0.6), ("C", 4.2), ("D", 2.4), ("E", .3), ("F", 0.05), ("G", 1.1)
]
)
def test_shoelace(trajectory_table, key, expected):
# Keep this for now to make sure the area is being calculated correctly.
series = trajectory_table.loc[key]
result = areascore.area_of_series(series)
assert pytest.approx(result, abs = 0.01) == expected
@pytest.mark.parametrize(
"data, expected",
[
(
[(0, 0), (1, 1), (2, 0), (3, 0), (4, 0), (5, .1), (6, 0)],
[[(0, 0), (1, 1), (2, 0)], [(4, 0), (5, .1), (6, 0)]]
),
(
[(0, 0), (1, 1), (2, 0)],
[[(0, 0), (1, 1), (2, 0)]]
),
([], []),
(
[(0, 0), (0, .1), (1, .2), (2, .3), (3, .4), (4, .5), (5, .6), (6, .7), (6, 0)],
[[(0, 0), (0, .1), (1, .2), (2, .3), (3, .4), (4, .5), (5, .6), (6, .7), (6, 0)]],
),
]
)
def test_separate(data, expected):
result = polygon.isolate_polygons(data)
assert result == expected
# data: The y-values for a specific series.
# expected: The expected polygons generated from the sequence, where y=0 deliminates polygon vertices.
@pytest.mark.parametrize(
"data, expected,",
[
(
[0, 1, 0, 0, .1, 0],
[
# The polygon 0-1-0 is a triangle and is separate from the next polygon.
[(0, 0.0), (1, 1.0), (2, polygon.MINIMUM)],
[(3, polygon.MINIMUM), (4, .1), (5, 0.0)]
]
),
(
[0, 1, 0, 0, .1, 0],
[
[(0, 0.0), (1, 1.0), (2, polygon.MINIMUM)],
[(3, polygon.MINIMUM), (4, 0.1), (5, 0.0)]
]
),
]
)
def test_separate_again(data, expected):
series = pandas.Series(data)
points = polygon.get_vertices(series)
result = polygon.isolate_polygons(points)
# Note: polygon.isolate_polygons returns a single polygon for each series. This is to prevent
# a TopologyError from shapely.
# This is basicaly a list with another list as the only element.
expected = expected[0] + expected[1]
assert result[0] == expected
@pytest.mark.parametrize(
"data, expected",
[
([0, 1, 0, 0, .1, 0], [(0, 0.0), (1, 1), (2, 0.0001), (3, 0.0001), (4, .1), (5, 0.0)]),
([0, 1, 0, 0, 0, .1, 0], [(0, 0.0), (1, 1), (2, 0.0001), (3, 0.0001), (4, 0.0001), (5, 0.1), (6, 0.0)]),
]
)
def test_decompose(data, expected):
series =
|
pandas.Series(data)
|
pandas.Series
|
from textwrap import dedent
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from .. import params
@pytest.fixture
def initial_params_dict():
return dict(alpha=1.0, beta=2.0)
@pytest.fixture
def initial_params_series():
return
|
pd.Series([1.0, 2.0], ["alpha", "beta"])
|
pandas.Series
|
## this tool is for snv analysis
## author: taozhou
## email: <EMAIL>
import pandas as pd
from glob import glob
import numpy as np
import sh
import os
import re
import datetime
from genecast_package.core import make_result_folder
class MethodException(Exception):
pass
class FileNoExist(Exception):
pass
def replace(file, indel):
base_name = file.split("/")[-1].replace("snp", indel)
return "/".join(file.split("/")[:-1]) + "/" + base_name
def merge_snp_indel(file, args=None):
title = ['Chr', 'Start', 'End', 'Ref', 'Alt', 'Func.refGene', 'Gene.refGene',
'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',
'ljb2_pp2hdiv', 'ljb2_pp2hvar', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR',
'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH', 'ExAC_SAS',"CLINSIG","CLNDBN","CLNACC","CLNDSDB","CLNDSDBID",
"gnomAD_exome_ALL", "gnomAD_exome_AFR", "gnomAD_exome_AMR", "gnomAD_exome_ASJ", "gnomAD_exome_EAS",
"gnomAD_exome_FIN", "gnomAD_exome_NFE", "gnomAD_exome_OTH", "gnomAD_exome_SAS",
'Otherinfo', ".", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "ratio"]
if args.somatic.upper() == "Y":
title = title
else:
title = title[:-2] + ["ratio"]
if args.data_type == "snp":
return pd.read_table(file, skiprows=1, names=title)
elif args.data_type == "indel":
indel_file = replace(file, "indel")
pd.read_table(indel_file, skiprows=1, names=title)
else:
data_snp = pd.read_table(file, skiprows=1, names=title)
indel_file = replace(file, "indel")
data_indel = pd.read_table(indel_file, skiprows=1, names=title)
data_snp_indel = pd.concat([data_snp, data_indel])
#data_snp_indel.to_csv(replace(file, "snp_indel"), index=False, sep="\t")
return data_snp_indel
def filter(data, file_name, args=None):
target_ExonicFunc = ["frameshift", "nonframeshift", "nonsynonymous", "stopgain", "stoploss"]
data = data.loc[data["Func.refGene"] == "exonic"]
data["ExonicFunc"] = [i.split(" ")[0] for i in data["ExonicFunc.refGene"]]
data = data.loc[data["ExonicFunc"].isin(target_ExonicFunc)]
B = []
for ljb2_pp2hdiv, ljb2_pp2hvar in zip(data["ljb2_pp2hdiv"], data["ljb2_pp2hvar"]):
if "B" in ljb2_pp2hdiv and "B" in ljb2_pp2hvar:
B.append("B")
else:
B.append("ok")
data["B"] = B
try:
data = data.loc[data["B"] != args.ljb2_pp2hdiv]
except TypeError:
raise FileNoExist("after filter this file: %s no result" % file_name)
ExAC_columns = [i for i in data.columns if "gnomAD" in i]
data["gnomAD_max"] = data[ExAC_columns].max(1)
if args.somatic.upper() == "Y":
n = 5; s = ","
else:
n = 6; s = ":"
ratio = []
strand_filter = []
for i in data["ratio"]:
ratio.append(i.split(":")[n])
if int(i.split(s)[-2]) + int(i.split(s)[-1]) >= args.two_strand:
if int(i.split(s)[-2]) >= args.one_strand and int(i.split(s)[-1]) >= args.one_strand:
strand_filter.append(True)
else:
strand_filter.append(False)
else:
strand_filter.append(False)
data["ratio"] = ratio
data["max"] = [False if i != "." and float(i) >= args.gnomAD_max else True for i in data["gnomAD_max"]]
data["ratio"] = [float(i.rstrip("%")) for i in data["ratio"]]
data = data.loc[(strand_filter) & (data["ratio"] >= args.ratio) & (data["max"] == True)]
if args.subcommand == "maf":
data = data[['Chr', 'Start', 'End', 'Ref', 'Alt', "ratio"]]
data.columns = ["chr", "start", "end", "ref_allele", "alt_allele", "i_TumorVAF_WU"]
data["Tumor_Sample_Barcode"] = [file_name] * len(data)
return data
if args.locus:
data = data[['Chr', 'Start', 'End', 'Ref', 'Alt', "ratio"]]
data.columns = ['Chr', 'Start', 'End', 'Ref', 'Alt', file_name]
data["Start"] = data["Start"].astype(float)
data["End"] = data["End"].astype(float)
return data
# p_p = re.compile(r'p.(.*?),')
# data_site = {"gene": [], file_name:[]}
# for gene, aa, ratio in zip(data["Gene.refGene"], data["AAChange.refGene"], data["ratio"]):
# for a in p_p.findall(aa) + [aa.split(".")[-1]]:
# data_site["gene"].append(gene + "_" + a); data_site[file_name].append(ratio)
# return pd.DataFrame(data_site).drop_duplicates()
if args.circos:
data = data[['Chr', 'Start', 'End', 'Gene.refGene', "ratio"]]
return data
else:
data = data[["Gene.refGene", "ratio"]]
groups = data.groupby(data["Gene.refGene"])
data = pd.merge(groups.count(), groups.mean(), left_index=True, right_index=True, how="inner")
data.columns = ["num", "mean"]
data = pd.DataFrame({file_name:data[args.cal_type]}, index=data.index)
return data
def get_host_gene(args=None):
if len(pd.read_table(args.host_gene).T) == 1:
try:
gene_list = pd.read_table(args.host_gene, usecols=["gene"]).drop_duplicates()
except Exception:
raise MethodException('the format of your target file is wrong, '
'please make sure it only contain one colomn and its title must be gene,'
'or panal bed file also be ok')
elif args.subcommand == "snv" and args.locus:
gene_list = pd.read_table(args.host_gene, usecols=["GeneSymbol", 'Chr', 'Start', 'End', 'Ref', 'Alt', "gene"]).drop_duplicates()
gene_list["Start"] = gene_list["Start"].astype(float)
gene_list["End"] = gene_list["End"].astype(float)
else:
try:
gene_list =
|
pd.read_table(args.host_gene, names=["chr", "start", "end", "gene", "trans"])
|
pandas.read_table
|
#
# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# We're going to explicitly use a local installation of Pyserini (as opposed to a pip-installed one).
# Comment these lines out to use a pip-installed one instead.
sys.path.insert(0, './')
import argparse
import json
import multiprocessing
import os
import pickle
import time
import numpy as np
import pandas as pd
from tqdm import tqdm
from pyserini.ltr import *
"""
Running prediction on candidates
"""
def dev_data_loader(file, format, top=100):
if format == 'tsv':
dev = pd.read_csv(file, sep="\t",
names=['qid', 'pid', 'rank'],
dtype={'qid': 'S','pid': 'S', 'rank':'i',})
elif format == 'trec':
dev = pd.read_csv(file, sep="\s+",
names=['qid', 'q0', 'pid', 'rank', 'score', 'tag'],
usecols=['qid', 'pid', 'rank'],
dtype={'qid': 'S','pid': 'S', 'rank':'i',})
else:
raise Exception('unknown parameters')
assert dev['qid'].dtype == np.object
assert dev['pid'].dtype == np.object
assert dev['rank'].dtype == np.int32
dev = dev[dev['rank']<=top]
dev_qrel = pd.read_csv('./collections/msmarco-passage/qrels.dev.small.tsv', sep="\t",
names=["qid", "q0", "pid", "rel"], usecols=['qid', 'pid', 'rel'],
dtype={'qid': 'S','pid': 'S', 'rel':'i'})
assert dev['qid'].dtype == np.object
assert dev['pid'].dtype == np.object
assert dev['rank'].dtype == np.int32
dev = dev.merge(dev_qrel, left_on=['qid', 'pid'], right_on=['qid', 'pid'], how='left')
dev['rel'] = dev['rel'].fillna(0).astype(np.int32)
dev = dev.sort_values(['qid', 'pid']).set_index(['qid', 'pid'])
print(dev.shape)
print(dev.index.get_level_values('qid').drop_duplicates().shape)
print(dev.groupby('qid').count().mean())
print(dev.head(10))
print(dev.info())
dev_rel_num = dev_qrel[dev_qrel['rel'] > 0].groupby('qid').count()['rel']
recall_point = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]
recall_curve = {k: [] for k in recall_point}
for qid, group in tqdm(dev.groupby('qid')):
group = group.reset_index()
assert len(group['pid'].tolist()) == len(set(group['pid'].tolist()))
total_rel = dev_rel_num.loc[qid]
query_recall = [0 for k in recall_point]
for t in group.sort_values('rank').itertuples():
if t.rel > 0:
for i, p in enumerate(recall_point):
if t.rank <= p:
query_recall[i] += 1
for i, p in enumerate(recall_point):
if total_rel > 0:
recall_curve[p].append(query_recall[i] / total_rel)
else:
recall_curve[p].append(0.)
for k, v in recall_curve.items():
avg = np.mean(v)
print(f'recall@{k}:{avg}')
return dev, dev_qrel
def query_loader():
queries = {}
with open('collections/msmarco-ltr-passage/queries.train.json') as f:
for line in f:
query = json.loads(line)
qid = query.pop('id')
query['analyzed'] = query['analyzed'].split(" ")
query['text'] = query['text_unlemm'].split(" ")
query['text_unlemm'] = query['text_unlemm'].split(" ")
query['text_bert_tok'] = query['text_bert_tok'].split(" ")
queries[qid] = query
with open('collections/msmarco-ltr-passage/queries.dev.small.json') as f:
for line in f:
query = json.loads(line)
qid = query.pop('id')
query['analyzed'] = query['analyzed'].split(" ")
query['text'] = query['text_unlemm'].split(" ")
query['text_unlemm'] = query['text_unlemm'].split(" ")
query['text_bert_tok'] = query['text_bert_tok'].split(" ")
queries[qid] = query
with open('collections/msmarco-ltr-passage/queries.eval.small.json') as f:
for line in f:
query = json.loads(line)
qid = query.pop('id')
query['analyzed'] = query['analyzed'].split(" ")
query['text'] = query['text_unlemm'].split(" ")
query['text_unlemm'] = query['text_unlemm'].split(" ")
query['text_bert_tok'] = query['text_bert_tok'].split(" ")
queries[qid] = query
return queries
def batch_extract(df, queries, fe):
tasks = []
task_infos = []
group_lst = []
for qid, group in tqdm(df.groupby('qid')):
task = {
"qid": qid,
"docIds": [],
"rels": [],
"query_dict": queries[qid]
}
for t in group.reset_index().itertuples():
task["docIds"].append(t.pid)
task_infos.append((qid, t.pid, t.rel))
tasks.append(task)
group_lst.append((qid, len(task['docIds'])))
if len(tasks) == 1000:
features = fe.batch_extract(tasks)
task_infos = pd.DataFrame(task_infos, columns=['qid', 'pid', 'rel'])
group = pd.DataFrame(group_lst, columns=['qid', 'count'])
print(features.shape)
print(task_infos.qid.drop_duplicates().shape)
print(group.mean())
print(features.head(10))
print(features.info())
yield task_infos, features, group
tasks = []
task_infos = []
group_lst = []
# deal with rest
if len(tasks) > 0:
features = fe.batch_extract(tasks)
task_infos =
|
pd.DataFrame(task_infos, columns=['qid', 'pid', 'rel'])
|
pandas.DataFrame
|
"""Helper functions for random forest classification and regression
Author <NAME>"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
#from . import ml_sets as sets
from result_analysis import ml_analysis as ml_an
from result_analysis import photoz_analysis as pz_an
#to standardize scaling
from sklearn.preprocessing import RobustScaler
#clean up memory
import gc
import math
def build_matrices(df, features,label, drop_nans = True):
"""This routines returns the feature matrix X to use for the classification
and the label vector y based on the input DataFrame. The label column must
be df.label and the features must be valid column names of the DataFrame
Input:
df (DataFrame)
features (list) list of label names to be considered
Output:
X (Numpy Array, 2D) feature matrix
y (Numpy Array, 1D) label vector
"""
if drop_nans:
df.dropna(axis=0,how='any',subset=features,inplace=True)
X = np.array(df[features])
y = np.array(df[label])
return X,y
def build_matrix(df, features,drop_nans = False):
"""This routines returns the feature matrix X to use for the classification.
The features must be valid column names of the DataFrame.
Input:
df (DataFrame)
features (list) list of label names to be considered
Output:
X (Numpy Array, 2D) feature matrix
"""
if drop_nans:
df.dropna(axis=0,how='any',subset=features,inplace=True)
X = np.array(df[features])
return X
def rf_class_grid_search(df_train,df_pred, features, label, param_grid, rand_state, scores, name):
"""This routine calculates the random forest classification on a grid of
hyper-parameters for the random forest method to test the best
hyper-parameters. The analysis results of the test will be written out and
saved.
Parameters:
df : pandas dataframe
The dataframe containing the features and the label for the
regression.
features : list of strings
List of features
label : string
The label for the regression
param_grid : dictionary-like structure
Parameter grid of input parameters for the grid search
rand_state : integer
Setting the random state variables to ensure reproducibility
scores : list of strings
Setting the score by which the grid search should be evaluated
name : strings
Setting the name of the output file for the grid search which
contains all information about the grid
"""
X_train, y_train = build_matrices(df_train, features,label=label)
X_test, y_test = build_matrices(df_pred, features,label=label)
print ("Trainingset: ", X_train.shape)
print(pd.Series(y_train).value_counts())
print("Testset:", X_test.shape)
print(pd.Series(y_test).value_counts())
for score in scores:
print(("# Tuning hyper-parameters for %s" % score))
print()
clf = GridSearchCV(RandomForestClassifier(random_state=rand_state),
param_grid, cv=5, scoring='%s' % score, n_jobs = 15, return_train_score=True)
clf.fit(X_train, y_train)
print("Detailed classification report:")
print("")
print("The model is trained on the training set.")
print("The scores are computed on the test set.")
print("")
y_true, y_pred = y_test, clf.predict(X_test)
y_true = y_true.astype('str')
y_pred = y_pred.astype('str')
print((classification_report(y_true, y_pred)))
print()
print("Best parameters set found on training set:")
print()
print((clf.best_params_))
print()
print("Grid scores on training set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print(("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params)))
print()
df = pd.DataFrame(clf.cv_results_)
df.to_hdf('data/'+name+'_'+score+'.hdf5','data')
def rf_class_validation_curve(df, features, label, params, param_name, param_range):
"""This routine calculates the validation curve for one hyper-parameter of
the random forest classification method.
Input:
df (DataFrame) The database to draw from
features (list) list of features in the DataFrame
label : string
The label for the regression
param_name (string) name of the hyper parameter
param_range (list) list of parameter values to use
Output:
None
"""
print("THIS FUNCTION IS DEPRECATED")
X,y = build_matrices(df, features,label)
# Standardizing the data
# X = preprocessing.robust_scale(X)
clf = RandomForestClassifier(**params)
title = "Validation curve / Random Forest Classifier"
ml_an.plot_validation_curve(clf, param_name, param_range, title, X, y,
ylim=(0.0, 1.1), cv=None, n_jobs=4)
plt.show()
def rf_class_create(df_train, features, label, params,rand_state, save=False, save_filename=None):
"""This routine creates a random forest classifier.
It is aimed at multi-class classification and used for my pipeline.
Parameters:
df : pandas dataframe
The dataframe containing the features and the label for the
regression.
features : list of strings
List of features
label : string
The label for the regression
params : dictionary
List of input parameters for the regression
rand_state : integer
Setting the random state variables to ensure reproducibility
Return :
clf : scikit-learn Classifier
The Classifier trained on the training set
"""
print("training on "+ str(len(df_train.index))+" entries")
X_train, y_train = build_matrices(df_train, features,label=label)
# Standardizing the data
# X_train = preprocessing.robust_scale(X_train)
# X_pred = preprocessing.robust_scale(X_pred)
clf = RandomForestClassifier(**params)
clf.fit(X_train,y_train)
if(save):
from sklearn.externals import joblib
joblib.dump(clf, save_filename+'.pkl')
feat_importances = clf.feature_importances_
print("Feature Importance ")
for i in range(len(features)):
print(str(features[i])+": "+str(feat_importances[i]))
print("\n")
return clf
def rf_class_predict(clf, df_pred, features, prefix):
"""This routine takes a random forest classifier and applies it to data
It is aimed at multi-class classification and used for my pipeline.
Parameters:
clf : RandomForestClassifier
Classifier from sklearn.
df_pred : pandas dataframe
Contains the data to classify
features : list of strings
List of features
prefix : string
Prefix to all new columns to be able to use multiple
classifier.
Return :
df_pred : pandas dataframe
The dataframe containing the features for prediction (given as argument to the function)
and the classification in the pred_label named column.
"""
X_pred = build_matrix(df_pred, features)
# Standardizing the data
# X_train = preprocessing.robust_scale(X_train)
# X_pred = preprocessing.robust_scale(X_pred)
y_pred = clf.predict(X_pred)
# Predicting the probabilities for the classes
y_prob = clf.predict_proba(X_pred)
df_prob = pd.DataFrame(y_prob)
df_prob.columns = clf.classes_
df_prob.index = df_pred.index #not sure how this works
df_prob['qso_prob'] = df_prob.highz + df_prob.midz + df_prob.lowz + df_prob.vlowz
#df_prob['qso_prob'] = 0
#for i in [x for x in df_prob.columns if "z" in x]:
# df_prob['qso_prob'] = df_prob['qso_prob'] + df_prob[i]
df_prob['pred_class'] = y_pred
#add prefix for the classifier (if using multiple)
df_prob = df_prob.add_prefix(prefix)
df_pred = pd.concat([df_pred, df_prob], axis=1)
del df_prob,X_pred,y_pred,y_prob
gc.collect()
return df_pred
def rf_class_example(df_train, df_pred, features, label, params, rand_state, save=False, save_filename=None, display=True):
"""This routine calculates an example of the random forest classification
method. It is aimed at multi-class classification.
It prints the classification report and feature importances and shows the
confusion matrix for all classes.
Parameters:
df : pandas dataframe
The dataframe containing the features and the label for the
regression.
features : list of strings
List of features
label : string
The label for the regression
params : dictionary
List of input parameters for the regression
rand_state : integer
Setting the random state variables to ensure reproducibility
"""
#clf, y_pred, y_prob = rf_class_predict(df_train,df_pred, features, label,
# params, rand_state)
X_train, y_train = build_matrices(df_train, features,label=label)
clf = RandomForestClassifier(**params)
clf.fit(X_train,y_train)
X_pred, y_true = build_matrices(df_pred, features,label=label)
# y_true = y_true.astype('string')
# y_pred = y_pred.astype('string')
y_pred = clf.predict(X_pred)
y_prob = clf.predict_proba(X_pred)
df_prob = pd.DataFrame(y_prob)
df_prob.columns = clf.classes_
df_prob.index = df_pred.index
#df_prob['qso_prob'] = df_prob.highz + df_prob.midz + df_prob.lowz + df_prob.vlowz
df_prob['qso_prob'] = 0
for i in [x for x in df_prob.columns if "z" in x]:
df_prob['qso_prob'] = df_prob['qso_prob'] + df_prob[i]
df_prob['true_class'] = y_true
df_prob['pred_class'] = y_pred
if(save):
from sklearn.externals import joblib
joblib.dump(clf, save_filename+'.pkl')
feat_importances = clf.feature_importances_
print("Classification Report ")
print((classification_report(y_true, y_pred)))
print("\n")
print("Feature Importance ")
for i in range(len(features)):
print(str(features[i])+": "+str(feat_importances[i]))
print("\n")
# Confusion matrix
if (display):
all_class_names = clf.classes_
class_names = ["A", "F", "G", "K", "M", "L", "T", "highz", "midz", "lowz", "vlowz"]
#for name
cnf_matrix = confusion_matrix(y_true, y_pred, labels=None, sample_weight=None)
# ml_an.plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
# title='Confusion matrix, with normalization')
# ml_an.plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=False,
# title='Confusion matrix, without normalization')
print(class_names)
ml_an.my_confusion_matrix(cnf_matrix, classes=class_names)
plt.show()
return y_true, y_pred, df_prob
def rf_reg_grid_search(df,features,label,param_grid,rand_state,scores,name):
"""This routine calculates the random forest regression on a grid of
hyper-parameters for the random forest method to test the best
hyper-parameters. The analysis results of the test will be written out and
saved.
Parameters:
df : pandas dataframe
The dataframe containing the features and the label for the
regression.
features : list of strings
List of features
label : string
The label for the regression
param_grid : dictionary-like structure
Parameter grid of input parameters for the grid search
rand_state : integer
Setting the random state variables to ensure reproducibility
scores : list of strings
Setting the score by which the grid search should be evaluated
name : strings
Setting the name of the output file for the grid search which
contains all information about the grid
"""
X,y = build_matrices(df, features,label)
# Standardizing the data
X = preprocessing.robust_scale(X)
X_train, X_test, y_train, y_test = train_test_split(
X,y, test_size=0.2,random_state=rand_state)
print("Training sample size: ", X_train.shape)
print("Evaluation sample size: ", X_test.shape)
for score in scores:
print(("# Tuning hyper-parameters for %s" % score))
print()
reg = GridSearchCV(RandomForestRegressor(random_state=rand_state), \
param_grid,scoring='%s' % score,cv=5,n_jobs=15, return_train_score=True)
reg.fit(X_train, y_train)
print("Best parameters set found on training set:")
print()
print((reg.best_params_))
print()
print("Grid scores on training set:")
print()
means = reg.cv_results_['mean_test_score']
stds = reg.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, reg.cv_results_['params']):
print(("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params)))
print()
df = pd.DataFrame(reg.cv_results_)
#df.to_hdf('RF_GS_'+name+'_'+score+'.hdf5','data')
print()
print("The model is trained on the full development set (80%).")
print("The scores are computed on the full evaluation set (20%).")
print()
y_true, y_pred = y_test, reg.predict(X_test)
ml_an.evaluate_regression(y_test,y_pred)
pz_an.evaluate_photoz(y_test,y_pred)
print()
def rf_reg_validation_curve(df,features,label,params,val_param,val_range):
"""This routine calculates the validation curve for random forest
regression.
Parameters:
df : pandas dataframe
The dataframe containing the features and the label for the
regression.
features : list of strings
List of features
label : string
The label for the regression
params : dictionary
List of input parameters for the regression
val_param : string
Name of the validation parameter
val_range : array-like
List of parameter values for the validation curve
"""
print("THIS FUNCTION IS DEPRECATED")
X,y = build_matrices(df, features,label)
# Random Forest Regression
reg = RandomForestRegressor(**params)
#Calculate and plot validation curve
pz_an.plot_validation_curve(reg, val_param, val_range, X, y,
ylim=(0.0, 1.1), cv=None, n_jobs=4)
plt.show()
def rf_reg_predict(reg, scaler, df, features, pred_label):
"""This function predicts the regression values for pred_set based on the
features specified in the train_set
Parameters:
reg : trained regressor
The regressor trained on the data
df : pandas dataframe
The dataframe containing the features for prediction
features : list of strings
List of features
pred_label : string
Name of the new label in the df dataframe in which the
predicted values are written
Output:
df : pandas dataframe
The dataframe containing the features for prediction and the
regression values in the pred_label named column.
"""
#df = df.copy()
# Building test and training sample
X = build_matrix(df, features)
# Standardizing the data
X = scaler.transform(X)
#predicting the redshift
redshift = reg.predict(X)
#redshift= pd.DataFrame(redshift)
#redshift.index = df.index #not sure how this works
#df.loc[:,pred_label]=redshift
df[pred_label]=redshift
#df = df.assign(pred_label=redshift)
del X
gc.collect()
return df
def rf_create_reg(df,features,label,params,rand_state,save=False,save_filename=None):
"""This routinecreates the random forest regression tuned
to photometric redshift estimation. This is the method used by the pipeling to
create the regressor
Parameters:
df : pandas dataframe
The dataframe containing the features and the label for the
regression.
features : list of strings
List of features
label : string
The label for the regression
params : dictionary
List of input parameters for the regression
rand_state : integer
Setting the random state variables to ensure reproducibility
save : Boolean
specifies if the result should be saved
save_filename : string
The Filename as which the regressor should be saved
The scaler is also saved, with the appendix _scaler
"""
# Building test and training sample
X_train,y_train = build_matrices(df, features, label)
# Standardizing the data
scaler = RobustScaler().fit(X_train)
X_train = scaler.transform(X_train)
# Save scale
if(save):
if save_filename:
from sklearn.externals import joblib
joblib.dump(scaler, save_filename+'_scaler.pkl')
else:
print("Error: No Filename supplied!")
#X_train, X_test, y_train, y_test = train_test_split(
# X,y, test_size=0.2, random_state=rand_state)
# Random Forest Regression
reg = RandomForestRegressor(**params)
reg.fit(X_train,y_train)
#y_pred = reg.predict(X_test)
# Save regressor
if(save):
if save_filename:
from sklearn.externals import joblib
joblib.dump(reg, save_filename+'.pkl')
else:
print("Error: No Filename supplied!")
feat_importances = reg.feature_importances_
# Evaluate regression method
print("Feature Importances ")
for i in range(len(features)):
print(str(features[i])+": "+str(feat_importances[i]))
print("\n")
def rf_reg_example(df,features,label,params,rand_state,save=False,save_filename=None, display=True):
"""This routine calculates an example of the random forest regression tuned
to photometric redshift estimation. The results will be analyzed with the
analyis routines/functions provided in ml_eval.py and photoz_analysis.py
Parameters:
df : pandas dataframe
The dataframe containing the features and the label for the
regression.
features : list of strings
List of features
label : string
The label for the regression
params : dictionary
List of input parameters for the regression
rand_state : integer
Setting the random state variables to ensure reproducibility
returns the fitted regressor
"""
# Building test and training sample
X,y = build_matrices(df, features, label)
# Standardizing the data
scaler = RobustScaler().fit(X)
X = scaler.transform(X)
# Save scale
if(save):
if save_filename:
from sklearn.externals import joblib
joblib.dump(scaler, save_filename+'_scaler.pkl')
else:
print("Error: No Filename supplied!")
X_train, X_test, y_train, y_test = train_test_split(
X,y, test_size=0.2, random_state=rand_state)
# Leftover from trying out weights
# w_train = X_train[:,-1]
# X_train = X_train[:,:-1]
# w_test = X_test[:,-1]
# X_test = X_test[:,:-1]
# Random Forest Regression
reg = RandomForestRegressor(**params)
reg.fit(X_train,y_train)
y_pred = reg.predict(X_test)
feat_importances = reg.feature_importances_
# Save regressor
if(save):
if save_filename:
from sklearn.externals import joblib
joblib.dump(reg, save_filename+'.pkl')
else:
print("Error: No Filename supplied!")
# Evaluate regression method
print("Feature Importances ")
for i in range(len(features)):
print(str(features[i])+": "+str(feat_importances[i]))
print("\n")
if(display):
ml_an.evaluate_regression(y_test,y_pred)
pz_an.plot_redshifts(y_test,y_pred)
pz_an.plot_error_hist(y_test,y_pred)
plt.show()
pz_an.plot_error_hist(y_test[np.where(y_test > 4.7)],y_pred[np.where(y_test > 4.7)])
plt.title('error histogram only for quasars with z bigger 4.7')
plt.show()
pz_an.plot_error_hist(y_test[np.where(y_test > 5.4)],y_pred[np.where(y_test > 5.4)])
plt.title('error histogram only for quasars with z bigger 5.4')
plt.show()
return reg, scaler
def make_train_pred_set(df_stars, df_qsos, test_ratio ,rand_state,
save_prefix = 'default', concat=True, save = False):
""" This routine combines the already labelled quasar and star flurx ratio
catalogs and creates a training and test set from them with the
train_test_split function of scikit-learn.
Parameters:
df_star : pandas dataframe
Star flux ratio catalog
df_qsos : pandas dataframe
Quasar flux ratio catalog
test_ratio : float
Ratio of the test set with respect to the total combined catalogs.
The value ranges between 0.0 and 1.0.
rand_state: integer
Integer that sets the random state variable for reproducibility
save : boolean
Boolean to select whether the test and training sets are saved
concat : boolean
Boolean to select whether the samples are already concatenated or
returned without.
Returns:
df_train : pandas dataframe
The new combined training set
df_test : pandas dataframe
The new combined test set
"""
stars_train, stars_test = train_test_split(df_stars, test_size=test_ratio,
random_state=rand_state)
qsos_train, qsos_test = train_test_split(df_qsos, test_size=test_ratio,
random_state=rand_state)
df_train =
|
pd.concat([stars_train,qsos_train], sort=False)
|
pandas.concat
|
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# util.py
#
# Internal utility functions for reading ibm_cloud outputs
# not exposed in the public API.
import warnings
import pandas as pd
import pyarrow as pa
from text_extensions_for_pandas import SpanArray
def schema_to_names(schema):
return [col for col, t in schema]
def apply_schema(df, schema, std_schema_on):
# TODO: Apply the dtype information in schema, not just the names
# TODO: Document what this mysterious "std_schema_on" argument does.
columns = [n for n in schema_to_names(schema) if std_schema_on or n in df.columns]
return df.reindex(columns=columns)
def find_column(table, column_endswith):
for name in table.column_names:
if name.lower().endswith(column_endswith):
return table.column(name), name
raise ValueError("Expected {} column but got {}".format(column_endswith, table.column_names))
def flatten_struct(struct_array, parent_name=None):
arrays = struct_array.flatten()
fields = [f for f in struct_array.type]
for array, field in zip(arrays, fields):
name = field.name if parent_name is None else parent_name + "." + field.name
if pa.types.is_struct(array.type):
for child_array, child_name in flatten_struct(array, name):
yield child_array, child_name
elif pa.types.is_list(array.type) and pa.types.is_struct(array.type.value_type):
struct = array.flatten()
for child_array, child_name in flatten_struct(struct, name):
list_array = pa.ListArray.from_arrays(array.offsets, child_array)
yield list_array, child_name
else:
yield array, name
def make_table(records):
arr = pa.array(records)
assert pa.types.is_struct(arr.type)
arrays, names = zip(*flatten_struct(arr))
return pa.Table.from_arrays(arrays, names)
def make_dataframe(records):
if len(records) == 0:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Copyright [2020] [<NAME> (<EMAIL>)]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---
Main library file for CredPy package (https://github.com/seslak/CredPy)
Created on Fri Feb 7 00:29:32 2020
@author: <NAME>
"""
# These functions have to be on top of module due to pickling for multiprocessing support. Do not move them within class functions.
from support_functions import weight
from support_functions import crappend
from ratios import getratio
from scoring import scores
#
class company:
"""
Companies can have current balance positions and from previous periods. At this version of CredPy only three consequtive periods are supported.
First dataset in input data is considered to be the current dataset, while the following ones are considered to be one period older.
"""
def __init__(self, *args):
"""
Building balance sheet positions and profit and loss
Initial dataset that is forwarded to class appends the data to corresponding
balance sheet position. Data in dataset has to be arranged per columns in
the following order (in brakets are the names of the columns in class'
dataset, this is important for later):
- Balance sheet
Cash and Cash equivalents [cash]
Receivables [receivables]
Inventory [inventory]
Other short-term assets [otherstassets]
Equipment [equipment]
Buildings and machinery [buildings]
Land [land]
Other long-term assets [otherltassets]
Deffered Taxes [defferedtax]
Loss above equity level [lossaboveq]
Payables [payables]
Shor-term loans [stloans]
Long-term loans part maturing within a year [ltloansyear]
Other short-term obligations [otherstobl]
Long-term loans [ltloans]
Other long-term obligations [otherltobl]
Paid in capital [paidincap]
Retained earnings [retainedear]
Other capital [othcap]
- Profit and loss
Total revenues [revenues]
Costs of goods sold [cogs]
General and administration costs [gna]
Total salaries [salaries]
Amortization [amortization]
Other operating expenses [othopexp]
Interest expanses [interest]
Other revenues [othrev]
Other expenses [othexp]
Taxes [taxes]
Other P&L Changes [othchg]
"""
self.dataset = [None] * len(args)
import os
if os.name == 'nt':
"""
System test and branching for Windows based systems. Multiprocessing in CredPy has not been properly coded and tested for Windows machines.
Due to that fact in case of Windows system the function moves on to single processing algorithm. It might influence performance dramatically
on large datasets.
"""
for c in range(len(args[:])):
self.dataset[c] = crappend(args[c])
else:
"""
Multiprocessing for other systems beside Windows.
"""
self.dataset = []
import concurrent.futures
with concurrent.futures.ProcessPoolExecutor() as executor:
self.results = [executor.submit(crappend, args[c]) for c in range(len(args[:]))]
for f in concurrent.futures.as_completed(self.results):
self.dataset.append(f.result())
def position(self, pos, n=0): # Getting distinguished balance sheet position
"""
Position is used for retrieving any accounting position from company's statements,
or having it for all of the companies in the dataset.
Example:
x.position('equity')[3]
will return equity position for the fourth company in the dataset,
removing the index at the end will result in the retrievment of the
entire 'equity' column.
List of available positions:
+ Previously stated balance sheet and P&L positions
Total long-term assets [tlta]
Total short-term assets [tsta]
Total assets [ta]
Total short-term obligations [tso]
Equity [equity]
Total costs [totalcosts]
EBITDAR [ebitdar]
EBITDA [ebitda]
EBIT [ebit]
EBT [ebt]
Net Income [netincome]
"""
return self.dataset[n][pos]
# Weights function
def weights(self, *args):
"""
Weights function is used for calculating weights in dataset.
For its usage it needs to have the target value (can be any of the dataset, but one),
and weighted value (can be any of the dataset, and as many as wanted).
Example:
x.weights('inventory', 80000, 'equity', 'ta', 'cash')
Retrieves weights for equity, total assets, and cash for the inventory to be
over the 80.000.
"""
import os
if os.name == 'nt':
"""
Single processing for Windows systems.
"""
result = [None] * len(self.dataset)
self.modeldataset = [None] * len(self.dataset)
for c in range(len(self.dataset)):
self.modeldataset[c] = self.dataset[c].loc[(self.dataset[c][args[0]] > args[1]), args[2:]].reset_index(drop=True)
x = [None] * len(args[2:])
for i in args[2:]:
x[args.index(i)-2] = weight(args, i, self.dataset[c], self.modeldataset[c])
result[c] = x
return result
else:
"""
Multiprocessing for other systems beside Windows.
"""
import concurrent.futures
result = [None] * len(self.dataset)
self.modeldataset = [None] * len(self.dataset)
x = []
for c in range(len(self.dataset)):
self.modeldataset[c] = self.dataset[c].loc[(self.dataset[c][args[0]] > args[1]), args[2:]].reset_index(drop=True)
with concurrent.futures.ProcessPoolExecutor() as executor:
self.results = [executor.submit(weight, args, i, self.dataset[c], self.modeldataset[c]) for i in args[2:]]
for f in concurrent.futures.as_completed(self.results):
x.append(f.result())
result = x
return result
# Ratios function
def ratio(self, ratiotype, n='all', days=365):
"""
Ratios function is used for applying ratio calculations on the appended dataset
Function is called from separated file ratios.py
Example:
x.ratio("dayssales", days=360)
List of available ratios:
Current ratio [current]
Quick ratio [quick]
Cash ratio [cashr]
Net-working capital [nwr]
Cash to total assets ratio [cashta]
Sales to receivables (or turnover ratio) [salestor]
Days sales outstanding [dayssales] {'days' is optional variable which can be defined, default is 365}
Cost of sales [costsales]
Cash turnover [ctr]
Debt to equity ratio [debtequ]
Debt ratio [debt]
Fixed-assets to net-worth [fatonw]
Interest coverage [ebitint]
Retained earnings ratio compared to equity [earnings]
Equity ratio [equityr]
Inventory turnover [invtr]
Inventory holding period [invhp]
Inventory to assets ratio [invta]
Accounts receivable turnover [acctr]
Accounts receivable collection period [acccp]
Days payable outstanding [dpo]
"""
self.ratiotype = ratiotype
self.days = days
import os
if os.name == 'nt':
"""
Single processing for Windows systems.
"""
x = [None] * len(self.dataset)
for c in range(len(self.dataset)):
x[c] = getratio(self.dataset[c], self.ratiotype, c, self.days)
if n == 'all':
return x
else:
return x[n]
else:
"""
Multiprocessing for other systems beside Windows.
"""
import concurrent.futures
result = []
with concurrent.futures.ProcessPoolExecutor() as executor:
self.results = [executor.submit(getratio, self.dataset[c], self.ratiotype, c, self.days) for c in range(len(self.dataset[:]))]
for f in concurrent.futures.as_completed(self.results):
result.append(f.result())
if n == 'all':
return result
else:
return result[n]
# Scoring function
"""
Ratios methods is used for applying ratio calculations on the appended dataset
Function is called from separated file ratios.py
Example:
x.score("altman", "revised")
You can also edit weights in scoring models. Example:
x.score("fulmer", 1.1, 2.2, 0.5, 0.9, 5, 2.8)
List of available scoring models:
Altman's z-score [altman]
Original (default model if not defined) [altman, original]
Updated [altman, updated]
Revised [altman, revised]
Taffler's and Tisshaw's [altman, tntmodel]
Non-manufacturing [altman, non-man]
Emerging markets [emerging]
Bathory model [bathory]
Springate model [springate]
Zmijewski model [zmijewski]
Kralicek DF indicator [kralicek]
Grover model [grover]
Fulmer model [fulmer]
"""
def score(self, model, modeltype="original", n='all', **kwargs):
import os
if os.name == 'nt':
"""
Single processing for Windows systems.
"""
x = [None] * len(self.dataset)
for c in range(len(self.dataset)): # This should be optimized to run only for the requested time series
x[c] = scores(self.dataset[c], model, modeltype, kwargs)
if n == 'all':
return x
else:
return x[n]
else:
"""
Multiprocessing for other systems beside Windows.
"""
import concurrent.futures
result = []
with concurrent.futures.ProcessPoolExecutor() as executor:
self.results = [executor.submit(scores, self.dataset[c], model, modeltype, kwargs) for c in range(len(self.dataset[:]))] # Same here, can be optimized
for f in concurrent.futures.as_completed(self.results):
result.append(f.result())
if n == 'all':
return result
else:
return result[n]
# Machine learning framework
"""
Machine learning methods are meant to easily use some of the popular statistical libraries and frameworks on financial datasets.
This is more because CredPy has someone specific data structure so it is meant to make the work easier for the end user.
Receantly added, should be rewritten soon in separate functions and with more comments.
"""
def ml(self, model, xtestset, setnumber, targetvalue, *args, **kwargs):
# Linear regression
if (model[0] == "linreg"):
import pandas as pd
self.xtestset = crappend(xtestset)
X_train = []
X_test = []
y_train = self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(targetvalue)].values
for c in range(len(args)):
X_train.append(self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(args[c])].values)
X_test.append(self.xtestset.iloc[:, self.xtestset.columns.get_loc(args[c])].values)
X_train = pd.DataFrame(X_train)
X_train = X_train.transpose()
y_train = pd.DataFrame(y_train)
X_test = pd.DataFrame(X_test)
X_test = X_test.transpose()
if 'fit_intercept' not in kwargs:
fit_intercept = True;
else:
fit_intercept = kwargs['fit_intercept']
if 'normalize' not in kwargs:
normalize = False;
else:
normalize = kwargs['normalize']
if 'copy_X' not in kwargs:
copy_X = True;
else:
copy_X = kwargs['copy_X']
if 'n_jobs' not in kwargs:
n_jobs = None;
else:
n_jobs = kwargs['n_jobs']
from sklearn.linear_model import LinearRegression
regressor = LinearRegression(fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, n_jobs=n_jobs)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
return y_pred
# Polynomial regression
if (model[0] == "polyreg"):
import pandas as pd
self.xtestset = crappend(xtestset)
X_train = []
X_test = []
y_train = self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(targetvalue)].values
for c in range(len(args)):
X_train.append(self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(args[c])].values)
X_test.append(self.xtestset.iloc[:, self.xtestset.columns.get_loc(args[c])].values)
X_train = pd.DataFrame(X_train)
X_train = X_train.transpose()
y_train = pd.DataFrame(y_train)
X_test = pd.DataFrame(X_test)
X_test = X_test.transpose()
if 'degree' not in kwargs:
degree = 2;
else:
degree = kwargs['degree']
if 'interaction_only' not in kwargs:
interaction_only = False;
else:
interaction_only = kwargs['interaction_only']
if 'include_bias' not in kwargs:
include_bias = True;
else:
include_bias = kwargs['include_bias']
if 'order' not in kwargs:
order = 'C';
else:
order = kwargs['order']
if 'fit_intercept' not in kwargs:
fit_intercept = True;
else:
fit_intercept = kwargs['fit_intercept']
if 'normalize' not in kwargs:
normalize = False;
else:
normalize = kwargs['normalize']
if 'copy_X' not in kwargs:
copy_X = True;
else:
copy_X = kwargs['copy_X']
if 'n_jobs' not in kwargs:
n_jobs = None;
else:
n_jobs = kwargs['n_jobs']
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
poly_reg = PolynomialFeatures(degree=degree, interaction_only=interaction_only, include_bias=include_bias, order=order)
X_train = poly_reg.fit_transform(X_train)
regressor = LinearRegression(fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, n_jobs=n_jobs)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(poly_reg.fit_transform(X_test))
return y_pred
# Support Vector Regression (SVR)
if (model[0] == "SVR"):
import pandas as pd
self.xtestset = crappend(xtestset)
X_train = []
X_test = []
y_train = self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(targetvalue)].values
for c in range(len(args)):
X_train.append(self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(args[c])].values)
X_test.append(self.xtestset.iloc[:, self.xtestset.columns.get_loc(args[c])].values)
X_train = pd.DataFrame(X_train)
X_train = X_train.transpose()
y_train = pd.DataFrame(y_train)
X_test = pd.DataFrame(X_test)
X_test = X_test.transpose()
if 'kernel' not in kwargs:
kernel = 'rbf';
else:
kernel = kwargs['kernel']
if 'degree' not in kwargs:
degree = 3;
else:
degree = kwargs['degree']
if 'gamma' not in kwargs:
gamma = 'scale';
else:
gamma = kwargs['gamma']
if 'coef0' not in kwargs:
coef0 = 0.0;
else:
coef0 = kwargs['coef0']
if 'tol' not in kwargs:
tol = 0.001;
else:
tol = kwargs['tol']
if 'C' not in kwargs:
C = 1.0;
else:
C = kwargs['C']
if 'epsilon' not in kwargs:
epsilon = 0.1;
else:
epsilon = kwargs['epsilon']
#### OVDE SAM STAO
if 'shrinking' not in kwargs:
shrinking = True;
else:
shrinking = kwargs['shrinking']
if 'cache_size' not in kwargs:
cache_size = 200;
else:
cache_size = kwargs['cache_size']
if 'verbose' not in kwargs:
verbose = False;
else:
verbose = kwargs['verbose']
if 'max_iter' not in kwargs:
max_iter = -1;
else:
max_iter = kwargs['max_iter']
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X_train)
y = sc_y.fit_transform(y_train)
from sklearn.svm import SVR
regressor = SVR(kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=C, epsilon=epsilon, shrinking=shrinking, cache_size=cache_size, verbose=verbose, max_iter=max_iter)
regressor.fit(X, y)
y_pred = sc_y.inverse_transform(regressor.predict(sc_X.transform(X_test)))
return y_pred
# Decision Tree Regression
if (model[0] == "decision_tree_reg"):
import pandas as pd
self.xtestset = crappend(xtestset)
X_train = []
X_test = []
y_train = self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(targetvalue)].values
for c in range(len(args)):
X_train.append(self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(args[c])].values)
X_test.append(self.xtestset.iloc[:, self.xtestset.columns.get_loc(args[c])].values)
X_train = pd.DataFrame(X_train)
X_train = X_train.transpose()
y_train = pd.DataFrame(y_train)
X_test = pd.DataFrame(X_test)
X_test = X_test.transpose()
if 'criterion' not in kwargs:
criterion = 'mse';
else:
criterion = kwargs['criterion']
if 'splitter' not in kwargs:
splitter = 'best';
else:
splitter = kwargs['splitter']
if 'max_depth' not in kwargs:
max_depth = None;
else:
max_depth = kwargs['max_depth']
if 'min_samples_split' not in kwargs:
min_samples_split = 2;
else:
min_samples_split = kwargs['min_samples_split']
if 'min_samples_leaf' not in kwargs:
min_samples_leaf = 1;
else:
min_samples_leaf = kwargs['min_samples_leaf']
if 'min_weight_fraction_leaf' not in kwargs:
min_weight_fraction_leaf = 0.0;
else:
min_weight_fraction_leaf = kwargs['min_weight_fraction_leaf']
if 'max_features' not in kwargs:
max_features = None;
else:
max_features = kwargs['max_features']
if 'random_state' not in kwargs:
random_state = None;
else:
random_state = kwargs['random_state']
if 'max_leaf_nodes' not in kwargs:
max_leaf_nodes = None;
else:
max_leaf_nodes = kwargs['max_leaf_nodes']
if 'min_impurity_decrease' not in kwargs:
min_impurity_decrease = 0.0;
else:
min_impurity_decrease = kwargs['min_impurity_decrease']
if 'min_impurity_split' not in kwargs:
min_impurity_split = 0;
else:
min_impurity_split = kwargs['min_impurity_split']
if 'ccp_alpha' not in kwargs:
ccp_alpha = 0.0;
else:
ccp_alpha = kwargs['ccp_alpha']
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, random_state=random_state, max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, ccp_alpha=ccp_alpha)
regressor.fit(X_train, y_train)
# Predicting a new result
y_pred = regressor.predict(X_test)
return y_pred
# Random Forest Regression
if (model[0] == "random_forest_reg"):
import pandas as pd
self.xtestset = crappend(xtestset)
X_train = []
X_test = []
y_train = self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(targetvalue)].values
for c in range(len(args)):
X_train.append(self.dataset[setnumber].iloc[:, self.dataset[setnumber].columns.get_loc(args[c])].values)
X_test.append(self.xtestset.iloc[:, self.xtestset.columns.get_loc(args[c])].values)
X_train = pd.DataFrame(X_train)
X_train = X_train.transpose()
y_train = pd.DataFrame(y_train)
X_test =
|
pd.DataFrame(X_test)
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
tm.assert_series_equal(
datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
)
# shift int frame
int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_frame_equal(shifted2, shifted3)
tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_fill_value(self):
# GH#24128
df = DataFrame(
[1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = DataFrame(
[0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(1, fill_value=0)
tm.assert_frame_equal(result, exp)
exp = DataFrame(
[0, 0, 1, 2, 3], index=
|
date_range("1/1/2000", periods=5, freq="H")
|
pandas.date_range
|
import numpy as np
import pandas as pd
import pytest
from estimagic.parameters.tree_registry import get_registry
from pandas.testing import assert_frame_equal
from pybaum import leaf_names
from pybaum import tree_flatten
from pybaum import tree_unflatten
@pytest.fixture
def value_df():
df = pd.DataFrame(
np.arange(6).reshape(3, 2),
columns=["a", "value"],
index=["alpha", "beta", "gamma"],
)
return df
@pytest.fixture
def other_df():
df =
|
pd.DataFrame(index=["alpha", "beta", "gamma"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
DataSets
=================================
DataSet from `The role of heterogeneity in contact timing and duration in network models of influenza spread in schools`.
Source: http://rsif.royalsocietypublishing.org/content/12/108/20150279
"""
# <NAME> <<EMAIL>>
# All rights reserved.
# MIT license.
import inspect
import os
import pandas as pd
import datetime
class base_dataset(object):
""" Base Dataset class. Handles attributes and methods used for the Salanthé dataset."""
BASE_URL = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
def get_contact_sequence(self):
"""Returns a DataFrame with the Contact Sequence
Returns: pd.DataFrame
"""
return self.dfC
def get_metadata(self):
"""Returns a DataFrame with the dataset Metadata, if available.
Returns: pd.DataFrame
"""
return self.dfM
def __str__(self):
return "<Dataset(name='%s', size='%d')>" % (self.name, len(self.dfC))
class elementary_school(base_dataset):
""" Salanthé High School Dataset
Args:
date (string): Either date '2013-01-31' (day 1) or '2013-02-01 (day 2) of measured contacts.
distance (int): Either None (no distance qualification) or 1 (one-meter contact data)
Note:
The value of 19.53125 comes from the paper methods.
"""
name = 'Toth Elementary School'
def __init__(self, date, distance=None):
columns = ['i', 'j', 'created_time', 'length']
if date == 'all':
dates = ['2013-01-31', '2013-02-01']
else:
dates = [date]
if distance is None:
if date == '2013-01-31':
contact_file_names = ['D3 - Elem1 day 1 contact data.txt']
elif date == '2013-02-01':
contact_file_names = ['D4 - Elem1 day 2 contact data.txt']
elif date == 'all':
contact_file_names = ['D3 - Elem1 day 1 contact data.txt', 'D4 - Elem1 day 2 contact data.txt']
else:
raise ValueError("Date must be either '2013-01-31' or '2013-02-01'")
elif distance == 1:
if date == '2013-01-31':
contact_file_names = ['D9 - Elem1 day 1 one-meter contact data.txt']
elif date == '2013-02-01':
contact_file_names = ['D10 - Elem1 day 2 one-meter contact data.txt']
elif date == 'all':
contact_file_names = ['D9 - Elem1 day 1 one-meter contact data.txt', 'D10 - Elem1 day 2 one-meter contact data.txt']
else:
raise ValueError("Date must be either '2013-01-31' or '2013-02-01'")
else:
raise ValueError("Distance must be either None or 1 (one-meter)")
dfCs = []
for date, contact_file_name in zip(dates, contact_file_names):
dfC = pd.read_csv(self.BASE_URL + '/elementary-school/' + contact_file_name, sep=' ', header=0, names=columns, encoding='utf-8')
# Records have a length of contact, we need to expand them to a record for each 20S contact
records = dfC.to_records(index=False)
expanded = [(i, j, created_time + x) for i, j, created_time, length in records for x in range(length)]
dfC = pd.DataFrame.from_records(expanded, columns=['i', 'j', 'created_time'])
# the timedelta between contacts
timedelta = pd.to_timedelta(dfC['created_time'] * 19.53125, unit='s')
dfC['created_time_fmt'] = pd.to_datetime(datetime.datetime.strptime(date, '%Y-%m-%d') + timedelta).dt.round('S')
dfCs.append(dfC)
self.dfC = pd.concat(dfCs, axis=0)
# Metadata
self.dfM = pd.read_csv(self.BASE_URL + '/elementary-school/D6 - Elem1 student data.txt', sep=' ', header=0, index_col=0, names=['i', 'grade', 'class', 'gender'])
# Update metadata
self.dfM['gender'].replace(to_replace={0: 'Male', 1: 'Female'}, inplace=True)
self.dfM['grade'].replace(to_replace={0: 'Kindergarden', -1: 'Unknown'}, inplace=True)
class middle_school(base_dataset):
""" Salanthé Middle School Dataset
Args:
date (string): Either date '2012-11-28' (day 1) or '2012-11-29 (day 2) of measured contacts.
distance (int): Either None (no distance qualification) or 1 (one-meter contact data)
Note:
The value of 19.53125 comes from the paper methods.
"""
name = 'Toth Elementary School'
def __init__(self, date, distance=None):
columns = ['i', 'j', 'created_time', 'length']
if date == 'all':
dates = ['2012-11-28', '2012-11-29']
else:
dates = [date]
if distance is None:
if date == '2012-11-28':
contact_file_names = ['D1 - Mid1 day 1 contact data.txt']
elif date == '2012-11-29':
contact_file_names = ['D2 - Mid1 day 2 contact data.txt']
elif date == 'all':
contact_file_names = ['D2 - Mid1 day 2 contact data.txt', 'D1 - Mid1 day 1 contact data.txt']
else:
raise ValueError("Date must be either '2012-11-28' or '2012-11-29'")
elif distance == 1:
if date == '2012-11-28':
contact_file_names = ['D7 - Mid1 day 1 one-meter contact data.txt']
elif date == '2012-11-29':
contact_file_names = ['D8 - Mid1 day 2 one-meter contact data.txt']
elif date == 'all':
contact_file_names = ['D7 - Mid1 day 1 one-meter contact data.txt', 'D8 - Mid1 day 2 one-meter contact data.txt']
else:
raise ValueError("Date must be either '2012-11-28' or '2012-11-29'")
else:
raise ValueError("Distance must be either None or 1 (one-meter)")
dfCs = []
for date, contact_file_name in zip(dates, contact_file_names):
dfC =
|
pd.read_csv(self.BASE_URL + '/middle-school/' + contact_file_name, sep=' ', header=0, names=columns, encoding='utf-8')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import math
from statistics import mean
from sklearn.decomposition import PCA
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, mean_squared_log_error
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
# root_path = os.path.abspath(os.path.join(root_path,os.path.pardir))
from metrics_ import PPTS,mean_absolute_percentage_error
def read_two_stage(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
predictions = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
test_pred=test_pred.reset_index(drop=True)
predictions = pd.concat([predictions,test_pred],axis=1)
predictions = predictions.mean(axis=1)
records = records.values.flatten()
predictions = predictions.values.flatten()
r2=r2_score(y_true=records,y_pred=predictions)
nrmse=math.sqrt(mean_squared_error(y_true=records,y_pred=predictions))/(sum(records)/len(predictions))
mae=mean_absolute_error(y_true=records,y_pred=predictions)
mape=mean_absolute_percentage_error(y_true=records,y_pred=predictions)
ppts=PPTS(y_true=records,y_pred=predictions,gamma=5)
time_cost=mean(time_cost)
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_two_stage_traindev_test(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
test_predss = pd.DataFrame()
dev_predss = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
test_y = data['test_y'][0:120]
dev_y = data['dev_y'][0:120]
dev_pred=data['dev_pred'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
dev_pred=dev_pred.reset_index(drop=True)
test_pred=test_pred.reset_index(drop=True)
test_predss = pd.concat([test_predss,test_pred],axis=1)
dev_predss = pd.concat([dev_predss,dev_pred],axis=1)
test_predss = test_predss.mean(axis=1)
dev_predss = dev_predss.mean(axis=1)
test_y = test_y.values.flatten()
dev_y = dev_y.values.flatten()
test_predss = test_predss.values.flatten()
dev_predss = dev_predss.values.flatten()
test_nse=r2_score(y_true=test_y,y_pred=test_predss)
test_nrmse=math.sqrt(mean_squared_error(y_true=test_y,y_pred=test_predss))/(sum(test_y)/len(test_predss))
test_mae=mean_absolute_error(y_true=test_y,y_pred=test_predss)
test_mape=mean_absolute_percentage_error(y_true=test_y,y_pred=test_predss)
test_ppts=PPTS(y_true=test_y,y_pred=test_predss,gamma=5)
dev_nse=r2_score(y_true=dev_y,y_pred=dev_predss)
dev_nrmse=math.sqrt(mean_squared_error(y_true=dev_y,y_pred=dev_predss))/(sum(dev_y)/len(dev_predss))
dev_mae=mean_absolute_error(y_true=dev_y,y_pred=dev_predss)
dev_mape=mean_absolute_percentage_error(y_true=dev_y,y_pred=dev_predss)
dev_ppts=PPTS(y_true=dev_y,y_pred=dev_predss,gamma=5)
metrics_dict={
"dev_nse":dev_nse,
"dev_nrmse":dev_nrmse,
"dev_mae":dev_mae,
"dev_mape":dev_mape,
"dev_ppts":dev_ppts,
"test_nse":test_nse,
"test_nrmse":test_nrmse,
"test_mae":test_mae,
"test_mape":test_mape,
"test_ppts":test_ppts,
"time_cost":time_cost,
}
time_cost=mean(time_cost)
return dev_y,dev_predss,test_y,test_predss,metrics_dict
def read_two_stage_max(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
predictions = pd.DataFrame()
time_cost=[]
r2list=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
r2list.append(data['test_r2'][0])
print("one-month NSE LIST:{}".format(r2list))
max_id = r2list.index(max(r2list))
print("one-month max id:{}".format(max_id))
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(max_id+1)+".csv"
data =
|
pd.read_csv(model_path+model_name)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import logging
from abc import abstractmethod
from datetime import datetime
from enum import Enum
from typing import List, Optional, Tuple, Union
import dateutil.parser
import numpy as np
import pandas as pd
import pytz
from measurement.measures import Distance
from measurement.utils import guess
from rapidfuzz import fuzz, process
from wetterdienst.core.core import Core
from wetterdienst.core.scalar.result import StationsResult
from wetterdienst.exceptions import InvalidEnumeration, StartDateEndDateError
from wetterdienst.metadata.columns import Columns
from wetterdienst.metadata.datarange import DataRange
from wetterdienst.metadata.kind import Kind
from wetterdienst.metadata.period import Period, PeriodType
from wetterdienst.metadata.provider import Provider
from wetterdienst.metadata.resolution import Frequency, Resolution, ResolutionType
from wetterdienst.util.enumeration import parse_enumeration_from_template
from wetterdienst.util.geo import Coordinates, derive_nearest_neighbours
log = logging.getLogger(__name__)
EARTH_RADIUS_KM = 6371
class ScalarRequestCore(Core):
""" Core for stations information of a source """
@property
@abstractmethod
def provider(self) -> Provider:
""" Optional enumeration for multiple resolutions """
pass
@property
@abstractmethod
def kind(self) -> Kind:
""" Optional enumeration for multiple resolutions """
pass
@property
@abstractmethod
def _resolution_base(self) -> Optional[Resolution]:
""" Optional enumeration for multiple resolutions """
pass
@property
@abstractmethod
def _resolution_type(self) -> ResolutionType:
""" Resolution type, multi, fixed, ..."""
pass
@property
def frequency(self) -> Frequency:
"""Frequency for the given resolution, used to create a full date range for
mering"""
return Frequency[self.resolution.name]
@property
@abstractmethod
def _period_type(self) -> PeriodType:
""" Period type, fixed, multi, ..."""
pass
@property
@abstractmethod
def _period_base(self) -> Optional[Period]:
""" Period base enumeration from which a period string can be parsed """
pass
@property
# @abstractmethod
def _parameter_base(self) -> Enum:
"""parameter base enumeration from which parameters can be parsed e.g.
DWDObservationParameter"""
if self._has_datasets:
if not self._unique_dataset:
raise NotImplementedError(
"implement _parameter_base enumeration that "
"all parameters of one resolution stored together"
)
return
@property
@abstractmethod
def _data_range(self) -> DataRange:
"""State whether data from this provider is given in fixed data chunks
or has to be defined over start and end date"""
pass
@property
@abstractmethod
def _has_datasets(self) -> bool:
"""Boolean if weather service has datasets (when multiple parameters are stored
in one table/file"""
pass
@property
def _dataset_base(self) -> Optional[Enum]:
""" Dataset base that is used to differ between different datasets """
if self._has_datasets:
raise NotImplementedError(
"implement _dataset_base enumeration that contains available datasets"
)
return
@property
def _dataset_tree(self) -> Optional[object]:
""" Detailed dataset tree with all parameters per dataset """
if self._has_datasets:
raise NotImplementedError(
"implement _dataset_tree class that contains available datasets "
"and their parameters"
)
return None
@property
def _unique_dataset(self) -> bool:
"""If ALL parameters are stored in one dataset e.g. all daily data is stored in
one file"""
if self._has_datasets:
raise NotImplementedError("define if only one big dataset is available")
return False
@property
def _dataset_accessor(self) -> str:
"""Accessor for dataset, by default the resolution is used as we expect
datasets to be divided in resolutions"""
return self.resolution.name
@property
def _parameter_to_dataset_mapping(self) -> dict:
""" Mapping to go from a (flat) parameter to dataset """
if not self._unique_dataset:
raise NotImplementedError(
"for non unique datasets implement a mapping from parameter to dataset"
)
return {}
@property
@abstractmethod
def _unit_tree(self):
pass
@property
def datasets(self):
datasets = self._dataset_tree[self._dataset_accessor].__dict__.keys()
datasets = list(filter(lambda x: x not in ("__module__", "__doc__"), datasets))
return datasets
@property
@abstractmethod
def _values(self):
""" Class to get the values for a request """
pass
# Columns that should be contained within any stations information
_base_columns = (
Columns.STATION_ID.value,
Columns.FROM_DATE.value,
Columns.TO_DATE.value,
Columns.HEIGHT.value,
Columns.LATITUDE.value,
Columns.LONGITUDE.value,
Columns.NAME.value,
Columns.STATE.value,
)
# TODO: eventually this can be matched with the type coercion of station data to get
# similar types of floats and strings
# Dtype mapping for stations
_dtype_mapping = {
Columns.STATION_ID.value: str,
Columns.HEIGHT.value: float,
Columns.LATITUDE.value: float,
Columns.LONGITUDE.value: float,
Columns.NAME.value: str,
Columns.STATE.value: str,
}
def _parse_period(self, period: Period) -> Optional[List[Period]]:
"""
Method to parse period(s)
:param period:
:return:
"""
if not period:
return None
elif self._period_type == PeriodType.FIXED:
return [period]
else:
return (
pd.Series(period)
.apply(
parse_enumeration_from_template, args=(self._period_base, Period)
)
.sort_values()
.tolist()
)
def _parse_parameter(
self, parameter: List[Union[str, Enum]]
) -> List[Tuple[Enum, Enum]]:
"""
Method to parse parameters, either from string or enum. Case independent for
strings.
:param parameter: parameters as strings or enumerations
:return: list of parameter enumerations of type self._parameter_base
"""
# TODO: refactor this!
# for logging
enums = []
if self._dataset_base:
enums.append(self._dataset_base)
enums.append(self._parameter_base)
parameters = []
for parameter in pd.Series(parameter):
# Each parameter can either be
# - a dataset : gets all data from the dataset
# - a parameter : gets prefixed parameter from a resolution e.g.
# precipitation height of daily values is taken from climate summary
# - a tuple of parameter -> dataset : to decide from which dataset
# the parameter is taken
try:
parameter, dataset = pd.Series(parameter)
except (ValueError, TypeError):
parameter, dataset = parameter, parameter
# Prefix return values
parameter_, dataset_ = None, None
# Try to parse dataset
try:
dataset_ = parse_enumeration_from_template(dataset, self._dataset_base)
except InvalidEnumeration:
pass
if parameter == dataset and dataset_:
parameters.append((dataset_, dataset_))
continue
try:
# First parse parameter
parameter_ = parse_enumeration_from_template(
parameter, self._parameter_base[self._dataset_accessor]
)
except (InvalidEnumeration, TypeError):
pass
else:
if self._unique_dataset:
# If unique dataset the dataset is given by the accessor
# and the parameter is not a subset of a dataset
dataset_ = self._dataset_tree[self._dataset_accessor]
elif not dataset_:
# If there's multiple datasets the mapping defines which one
# is taken for the given parameter
dataset_ = self._parameter_to_dataset_mapping[self.resolution][
parameter_
]
if not self._unique_dataset:
# Parameter then has to be taken from the datasets definition
parameter_ = self._dataset_tree[self._dataset_accessor][
dataset_.name
][parameter_.name]
parameters.append((parameter_, dataset_))
if not parameter_:
log.info(f"parameter {parameter} could not be parsed from ({enums})")
return parameters
@staticmethod
def _parse_station_id(series: pd.Series) -> pd.Series:
"""
Dedicated method for parsing station ids, by default uses the same method as
parse_strings but could be modified by the implementation class
:param series:
:return:
"""
return series.astype(str)
def __eq__(self, other) -> bool:
""" Equal method of request object """
return (
self.parameter == other.parameter
and self.resolution == other.resolution
and self.period == other.period
and self.start_date == other.start_date
and self.end_date == other.end_date
and self.humanize == other.humanize
and self.tidy == other.tidy
)
def __init__(
self,
parameter: Tuple[Union[str, Enum]],
resolution: Resolution,
period: Period,
start_date: Optional[Union[str, datetime, pd.Timestamp]] = None,
end_date: Optional[Union[str, datetime, pd.Timestamp]] = None,
humanize: bool = True,
tidy: bool = True,
si_units: bool = True,
) -> None:
"""
:param parameter: requested parameter(s)
:param resolution: requested resolution
:param period: requested period(s)
:param start_date: Start date for filtering stations for their available data
:param end_date: End date for filtering stations for their available data
:param humanize: boolean if parameters should be humanized
:param tidy: boolean if data should be tidied
:param si_units: boolean if values should be converted to si units
"""
super().__init__()
self.resolution = parse_enumeration_from_template(
resolution, self._resolution_base, Resolution
)
self.period = self._parse_period(period)
self.start_date, self.end_date = self.convert_timestamps(start_date, end_date)
self.parameter = self._parse_parameter(parameter)
self.humanize = humanize
tidy = tidy
if self._has_datasets:
tidy = tidy or any(
[
parameter not in self._dataset_base
for parameter, dataset in self.parameter
]
)
self.tidy = tidy
self.si_units = si_units
log.info(
f"Processing request for "
f"provider={self.provider}, "
f"parameter={self.parameter}, "
f"resolution={self.resolution}, "
f"period={self.period}, "
f"start_date={self.start_date}, "
f"end_date={self.end_date}, "
f"humanize={self.humanize}, "
f"tidy={self.tidy}, "
f"si_units={self.si_units}"
)
@staticmethod
def convert_timestamps(
start_date: Optional[Union[str, datetime, pd.Timestamp]] = None,
end_date: Optional[Union[str, datetime, pd.Timestamp]] = None,
) -> Union[Tuple[None, None], Tuple[pd.Timestamp, pd.Timestamp]]:
"""
Sort out start_date vs. end_date, parse strings to datetime
objects and finally convert both to pd.Timestamp types.
:param start_date: Start date for filtering stations for their available data
:param end_date: End date for filtering stations for their available data
:return: pd.Timestamp objects tuple of (start_date, end_date)
"""
if start_date is None and end_date is None:
return None, None
if start_date:
if isinstance(start_date, str):
start_date = dateutil.parser.isoparse(start_date)
if not start_date.tzinfo:
start_date = start_date.replace(tzinfo=pytz.UTC)
if end_date:
if isinstance(end_date, str):
end_date = dateutil.parser.isoparse(end_date)
if not end_date.tzinfo:
end_date = end_date.replace(tzinfo=pytz.UTC)
# If only one date given, set the other one to equal.
if not start_date:
start_date = end_date
if not end_date:
end_date = start_date
# TODO: replace this with a response + logging
if not start_date <= end_date:
raise StartDateEndDateError(
"Error: 'start_date' must be smaller or equal to 'end_date'."
)
return
|
pd.Timestamp(start_date)
|
pandas.Timestamp
|
import os
import pandas as pd
from rdkit import Chem
from rdkit.Chem.Draw import IPythonConsole
from IPython.display import display
IPythonConsole.ipython_useSVG = True
# Define directories
HERE = os.path.abspath(os.path.dirname(__file__))
TGT_DIR = os.path.join(HERE, 'Targets')
targets = pd.read_csv(os.path.join(TGT_DIR, 'full targets', 'targets_MASTER.csv'))
def abc_replace(df: pd.DataFrame, match: str):
base_mol: dict = n[n['pentamer'] == df.at[i, match]].to_dict(orient='records')[0]
df.at[i, 'a'] = base_mol['a']
df.at[i, 'ab'] = base_mol['ab']
df.at[i, 'b'] = base_mol['b']
df.at[i, 'c'] = base_mol['c']
# Define patterns for matching
f_patt = Chem.MolFromSmarts('cF')
b_patt = Chem.MolFromSmiles('c12n(c3nccnc3)ccc1cccc2')
s_patt = Chem.MolFromSmiles('Cn(c1c2cccc1)c3c2cccc3')
# Define substructures for replacement
cf = Chem.MolFromSmiles('CF')
pyr = Chem.MolFromSmiles('Nc1nccnc1')
nh = Chem.MolFromSmiles('N[H]')
nboc = Chem.MolFromSmiles('NC(OC(C)(C)C)=O')
# Define starting materials to add
carbazole = 'c1ccc2c(c1)[nH]c1ccccc12'
BHA_halide = 'Brc1nccnc1'
# Split 'targets' dataframe into different route types by substructure matching
bs = pd.DataFrame()
sb = pd.DataFrame()
s =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
from concurrent.futures import ProcessPoolExecutor
import itertools
import yaml
import sys
import copy
import numpy as np
import pandas as pd
from lib.constants import *
from lib.utils import *
TOP_N = 15
loader = yaml.SafeLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
parser = argparse.ArgumentParser()
parser.add_argument('--config_file','-c',
default="config.yaml",
type=str,
help="Configuration file.")
args = parser.parse_args()
f = open(args.config_file)
config = yaml.load(f,Loader=loader)
to_search = {
'pheromony_policies': {'AntSystem':{"rho": [0.3,0.5,0.7],
"Q": [75, 100, 125]}},
"selection":{"beta": [3,5,7]},
'parameters':{
# "instance_name": ['lau15','sgb128'],
"eid": list(range(1,NUM_EXECUTIONS+1))},
}
# parameters_names=['rho','Q','betas','eid']
keys_to_value, combinations=utils.get_names_combinations(config,to_search)
result_df = pd.DataFrame(columns=
[keys[-1] for keys in keys_to_value])
parameters_names = [i[-1] for i in keys_to_value]
i = 0
for combination in combinations:
for keys, v in zip(keys_to_value,combination):
tmp = config
for k in keys[:-1]:
tmp = tmp[k]
tmp[keys[-1]] = v
result_df.loc[i,keys[-1]] = v
ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']],
selection_policy_kwargs=config['selection'],
**config['parameters'])
df = ac.load_results()
result_df.loc[i,parameters_names] = combination
result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global']
result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness']
result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness']
result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness']
result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness']
i += 1
result_df['eid']=pd.to_numeric(result_df['eid'])
# print('Top best fitness')
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(result_df)
|
pd.set_option('display.expand_frame_repr', False)
|
pandas.set_option
|
"""
Check Triplet performance
"""
import glob
import pandas as pd
import numpy as np
import pickle
def read_triplets(seed_candidates):
"""
Read the input seed candidates
"""
if "pickle" in seed_candidates:
if "*" in seed_candidates:
all_files = glob.glob(seed_candidates)
new_data = []
for file_name in all_files:
with open(file_name, 'rb') as f:
data = pickle.load(f)
for dd in data:
new_data.append((dd[0], dd[1], dd[2], dd[3]))
df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)
else:
with open(seed_candidates, 'rb') as f:
data = pickle.load(f)
new_data = []
for dd in data:
new_data.append((dd[0], dd[1], dd[2], dd[3]))
# idx = int(dd[0][10:])
# new_data.append((idx, dd[1], dd[2], dd[3]))
df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)
else:
column_names = ['evtid', 'h1', 'h2', 'h3']
if "*" in seed_candidates:
all_files = glob.glob(seed_candidates)
new_data = []
for file_name in all_files:
df_seed_tmp = pd.read_csv(file_name, header=None, names=column_names,)
new_data.append(df_seed_tmp)
df_seed = pd.concat(new_data)
else:
df_seed = pd.read_csv(seed_candidates, header=None,
names=column_names)
return df_seed
def evaluate_evt(event, seed_candidates, min_hits=5, layers=None, verbose=False):
hits = event.hits
evtid = hits.evtid.values[0]
all_particles = np.unique(hits.particle_id).shape[0]
all_hits = hits.shape[0]
if verbose:
print("Total particles: {}, with {} hits".format(all_particles, all_hits))
aa = hits.groupby(['particle_id'])['hit_id'].count()
total_particles = aa[aa > min_hits].index
total_particles = total_particles[total_particles != 0]
n_total_particles = total_particles.shape[0]
if verbose:
print("Event {} has {} particles with minimum of {} hits".format(
evtid, n_total_particles, min_hits))
df = seed_candidates
if verbose:
print("Event {} has {} seed candidates".format(evtid, df.shape[0]))
if layers is not None:
## now select the hits in specified layers
hits = hits[hits.layer.isin(layers)]
# particles leaving 3 hits in three layers
bb = hits.groupby(['particle_id'])['layer'].count()
good_particles = bb[(bb > 2) & (aa > min_hits)].index
good_particles = good_particles[good_particles != 0]
if verbose:
print("Event {} has {} particles leaving hits at inner 3 layers".format(
evtid, good_particles.shape[0]))
good_particles_pT = np.unique(hits[hits.particle_id.isin(good_particles) \
& hits.pt.abs() >= 1].particle_id)
print("Event {} has {} particles leaving hits at inner 3 layers, with pT > 1 GeV".format(
evtid, good_particles_pT.shape[0]))
df1 = df.merge(hits, left_on='h1', right_on='hit_id', how='left')
df2 = df.merge(hits, left_on='h2', right_on='hit_id', how='left')
df3 = df.merge(hits, left_on='h3', right_on='hit_id', how='left')
p1 = df1.particle_id.values.astype('int64')
p2 = df2.particle_id.values.astype('int64')
p3 = df3.particle_id.values.astype('int64')
n_total_seeds = df.shape[0]
true_seeds_dup = p1[(p1 != 0) & (p1==p2) & (p2==p3)]
n_true_seeds_dup = true_seeds_dup.shape[0]
true_seeds = p1[(p1 != 0) & (p1==p2) & (p2==p3) \
& (df1.layer != df2.layer)\
& (df1.layer != df3.layer) & (df2.layer != df3.layer)]
n_true_seeds = true_seeds.shape[0]
# unique true seeds should be part of good particles
dup_mask = np.isin(true_seeds, good_particles)
unique_true_seeds = np.unique(true_seeds[dup_mask])
n_unique_true_seeds = unique_true_seeds.shape[0]
if verbose:
print("{} particles matched".format(n_unique_true_seeds))
print("Fraction of duplicated seeds: {:.2f}%".format(100 - n_unique_true_seeds*100/n_true_seeds))
print("Purity: {:.2f}%".format(n_true_seeds*100./n_total_seeds))
print("Efficiency: {:.2f}%".format(n_unique_true_seeds*100./good_particles.shape[0]))
summary = {
"evtid": evtid,
'n_hits': hits.shape[0],
'n_particles': good_particles.shape[0],
'n_matched_particles': unique_true_seeds.shape[0],
'n_seeds': n_total_seeds,
'n_true_seeds_dup': true_seeds_dup.shape[0],
'n_true_seeds': true_seeds.shape[0]
}
df_unique_true_seeds =
|
pd.DataFrame(unique_true_seeds, columns=['particle_id'])
|
pandas.DataFrame
|
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other =
|
DataFrame([[45, 45]], index=[0], columns=["A", "B"])
|
pandas.DataFrame
|
"""
Script that prepares data for later usage
"""
from functools import reduce
from logging import FATAL
from operator import add, index
from pathlib import Path
from typing import Callable, Dict, List
from enum import Enum
import bagpy
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi as PI
from numpy.lib.function_base import angle
from pyquaternion import Quaternion
import pandas as pd
from scipy.spatial.transform.rotation import Rotation
import seaborn as sea
from pandas import DataFrame
from scipy.interpolate.interpolate import interp1d
from sklearn import preprocessing
from numba import jit, njit
from helper import (
ETA_EULER_DOFS,
Jq,
NU_DOFS,
TAU_DOFS,
get_eta,
get_nu,
get_tau,
make_df,
profile,
DFKeys,
ORIENTATIONS_EULER,
ORIENTATIONS_QUAT,
POSITIONS,
LINEAR_VELOCITIES,
ANGULAR_VELOCITIES,
ETA_DOFS,
rotation,
)
def recreate_sampling_times(
data: DataFrame,
step_length: float,
start_time: float,
end_time: float,
plot_col=None,
) -> DataFrame:
"""
Functions that transforms measurement data with samples taken it any (possibly irregular)
sample rate and outputs the same measurements evenly spanced according to a given step length.
data: dataframe with numeric values that includes a 'Time' column
step length: desired time between each sample timestep
duration: amount of time covered by measurements in data
plot_col: name of column that should be plotted before and after (for vertification purposes)
"""
first_time_in_df = data[DFKeys.TIME.value].iloc[0]
if start_time < first_time_in_df:
raise ValueError("start time cannot precede first time in df")
get_shifted_time = lambda row: row[DFKeys.TIME.value] - start_time
shifted_timestamps = data.apply(get_shifted_time, axis=1).rename(
DFKeys.TIME.value, axis=1
)
duration = end_time - start_time
timesteps = np.arange(0, duration, step_length)
new_columns = [pd.Series(timesteps, name=DFKeys.TIME.value)]
columns_except_time = data.columns.difference(
[
DFKeys.TIME.value,
"child_frame_id",
"header.frame_id",
"header.seq",
"header.stamp.nsecs",
"header.stamp.secs",
"pose.covariance",
"twist.covariance",
"pins_0",
"pins_1",
"pins_2",
"pins_3",
"pins_4",
"pins_5",
"pins_6",
"pins_7",
]
)
for col_name in columns_except_time:
f = interp1d(shifted_timestamps.values, data[col_name].values)
new_columns.append(pd.Series(f(timesteps), name=col_name))
data_new = pd.concat(new_columns, axis=1)
if plot_col in data.columns:
SAVEDIR = Path("results/interpolation")
sea.set_style("white")
# plt.figure(figsize=(5, 2.5))
sea.lineplot(x=shifted_timestamps.values, y=data[plot_col], label="original")
sea.lineplot(
x=DFKeys.TIME.value, y=plot_col, data=data_new, label="interpolated"
)
# plt.ylabel("Velocity")
# plt.savefig(SAVEDIR.joinpath("%s.pdf" % plot_col))
plt.show()
return data_new
def bag_to_dataframes(bagpath: Path, topics: List[str]) -> Dict[str, DataFrame]:
bag = bagpy.bagreader(str(bagpath))
dataframes = dict()
for topic in topics:
topic_msgs: str = bag.message_by_topic(topic)
dataframes[topic] = pd.read_csv(topic_msgs)
columns_except_time = dataframes[topic].columns.difference([DFKeys.TIME.value])
return dataframes
def bag_to_dataframe(
bagpath: Path, topics: List[str], step_length: float, plot_col=None
) -> DataFrame:
"""Function for converting messages on topics in a bag into a single
dataframe with equal timesteps. Uses 1d interpolation to synchronize
topics.
Args:
bagpath (Path): path to bag file
topics (List[str]): list of topics that should be converted
step_length (float): length between timesteps in the new dataframe
Returns:
DataFrame: dataframe containing the desired topics
"""
# convert bag to dataframes
dataframes = bag_to_dataframes(bagpath, topics)
# find global start and end times
start_times = list()
end_times = list()
for topic in topics:
df = dataframes[topic]
start_times.append(df[DFKeys.TIME.value].iloc[0])
end_times.append(df[DFKeys.TIME.value].iloc[-1])
start_time = max(start_times)
end_time = min(end_times)
# give all dataframes equal timesteps
synchronized_dataframes = []
for topic in topics:
df = recreate_sampling_times(
dataframes[topic], step_length, start_time, end_time, plot_col=plot_col
)
synchronized_dataframes.append(df)
# merge dataframes
df_merged = reduce(
lambda left, right: pd.merge(left, right, on=[DFKeys.TIME.value], how="outer"),
synchronized_dataframes,
)
return df_merged
def transform_to_NED(df: DataFrame) -> DataFrame:
invert_columns = [
DFKeys.FORCE_Y.value,
DFKeys.FORCE_Z.value,
DFKeys.TORQUE_Y.value,
DFKeys.TORQUE_Z.value,
DFKeys.POSITION_Y.value,
DFKeys.POSITION_Z.value,
DFKeys.SWAY.value,
DFKeys.HEAVE.value,
DFKeys.YAW_VEL.value,
DFKeys.ROLL_VEL.value,
]
df[invert_columns] = df[invert_columns].apply(lambda x: x * (-1))
def rotate_quat(row):
w = row.orientation_w
x = row.orientation_x
y = row.orientation_y
z = row.orientation_z
q = Quaternion(w, x, y, z)
q_unit = q.normalised
q_rot = Quaternion(axis=[1.0, 0.0, 0.0], degrees=180)
q_res: Quaternion = q_rot.rotate(q_unit)
return pd.Series(
{
DFKeys.ORIENTATION_W.value: q_res.w,
DFKeys.ORIENTATION_X.value: q_res.x,
DFKeys.ORIENTATION_Y.value: q_res.y,
DFKeys.ORIENTATION_Z.value: q_res.z,
}
)
df[ORIENTATIONS_QUAT] = df[ORIENTATIONS_QUAT].apply(rotate_quat, axis=1)
return df
def remove_orientation_flip(df: DataFrame, print_flips=False) -> DataFrame:
def new_flip(row: pd.Series, prev_row: pd.Series) -> bool:
for i in range(len(row)):
if abs(row[i] - prev_row[i]) > 1:
return True
return False
new_orientations = np.zeros(df[ORIENTATIONS_QUAT].shape)
prev_row = df[ORIENTATIONS_QUAT].loc[0]
flipped = False
num_flips = 0
for item in df[ORIENTATIONS_QUAT].iterrows():
i = item[0]
row = item[1]
if new_flip(row, prev_row):
flipped = not flipped
num_flips += 1
if flipped:
new_orientations[i] = -row.values
else:
new_orientations[i] = row.values
prev_row = row
df[ORIENTATIONS_QUAT] =
|
pd.DataFrame(new_orientations)
|
pandas.DataFrame
|
"""
glucoseDataFrame.py
Creates a dataframe of glucose related statistics
in diabetics for predictive analysis.
"""
import sys
import os
import math
from datetime import *
from dateutil.parser import parse
import pandas as pd
import numpy as np
sys.path.append("..") # proper file path for importing local modules
from pythonScripts.jsonToCsv import convertToCsv
#-------CONSTANTS-------------
CONVERSION_FACTOR = 18.01559
#-------Dicts----------
#basal rates (unit/hour)
BASAL = {
"0" : .625,
"2" : .650, #if hour equals 2, then also minute = 30 cause (2:30)
"4" : .800,
"8" : .725,
"12" : .700,
"14" : .250,
"19" : .650
}
#insulin sensitivity (mg/dL/unit)
SENSITIVITY = {
"0" : 60,
"6" : 70,
"9" : 60,
"12" : 60,
"15" : 60
}
#carb ratio (grams/unit)
CARB_RATIO = {
"0" : 10,
"6" : 5,
"11" : 5.5, #if hour equals 11, then also minute = 30 cause (11:30)
"14" : 6,
"18" : 7,
"21" : 9
}
#----------------------
#-----------------------------
def convert_glucose(glucose_levels):
"""Do conversion across entire dataset
conversion mmol/L to mg/dL"""
value_row = glucose_levels.loc[:, 'value']
convert_row = value_row.mul(CONVERSION_FACTOR)
round_conversion = convert_row.round(2)
return round_conversion
def divide_timestamp(time_row):
"""Seperates timestamp into individual
months, days, weekdays, hours, and minutes"""
month_list = []
day_list = []
weekday_list = []
hour_list = []
minutes_list = []
time_str = time_row.astype(str).values.tolist()
for i in time_str:
#for months
month = parse(i).month
month_list.append(month)
#for days
day = parse(i).day
day_list.append(day)
#for weekdays
weekday = parse(i).weekday()
weekday_list.append(weekday)
#for hours
hour = parse(i).hour
hour_list.append(hour)
#for minutes
minute = parse(i).minute
minutes_list.append(minute)
return month_list, day_list, weekday_list, hour_list, minutes_list
def create_dataframe():
"""Creates dataframe for glucose analysis"""
#---get correct path to csv input file-----------
path_to_input_csv = convertToCsv()
current_file = os.path.basename(path_to_input_csv)
print(f"Currently Reading File: {current_file}")
care_link_file = input("\nEnter Medtronic File: ")
#------------------------------------------------
#----------Create data frame-------------------
#get all data from csv
gluc_level_data = pd.read_csv(path_to_input_csv)
# remove rows that are NaN for value
gluc_level_data = gluc_level_data[pd.notnull(gluc_level_data["value"])]
#----------------------------------------------
#---------------conversion mmol/L to mg/dL-----------------
glu = convert_glucose(gluc_level_data)
#----------------------------------------------------------
#--------Save month, day, weekday, hour, minutes---------------
timestamp = gluc_level_data.loc[:, 'time']
saved_index = timestamp.index # save the index from this dataframe as variable index
month_list, day_list, weekday_list, hour_list, minutes_list = divide_timestamp(timestamp)
#convert the lists to dataframes while ensuring the index corresponds to the other dataframes
monthdf = pd.DataFrame(np.array(month_list), index=saved_index)
daydf = pd.DataFrame(np.array(day_list), index=saved_index)
weekdaydf = pd.DataFrame(np.array(weekday_list), index=saved_index)
hourdf = pd.DataFrame(np.array(hour_list), index=saved_index)
minutesdf = pd.DataFrame(np.array(minutes_list), index=saved_index)
#--------------------------------------------------------------
#---------BOLUS OUTPUT---------------------------
path_to_care_link = os.path.join(os.getcwd(), "csvData", "csvInData")
bolus_carb_csv = pd.read_csv(os.path.join(path_to_care_link, care_link_file), skiprows=6)
bolus = bolus_carb_csv.loc[:, 'Bolus Volume Delivered (U)']
date = bolus_carb_csv.loc[:, 'Date']
time = bolus_carb_csv.loc[:, 'Time']
carb = bolus_carb_csv.loc[:, 'BWZ Carb Input (grams)']
bolus_carb_data = pd.concat([date, time, bolus, carb], axis=1, ignore_index=True)
#remove column if NaN value in both columns 2&3
bolus_carb_data = bolus_carb_data.dropna(subset=[2, 3], how='all')
#get rid of last header row
bolus_carb_data = bolus_carb_data.drop(bolus_carb_data.index[len(bolus_carb_data)-1])
bolus_carb_data.columns = ["Date", "Time", "Bolus (U)", "Carb Input (grams)"]
#-------------------------------------------------------------------------
#--------Save month, day, weekday, hour, minutes---------------
month_list_b = []
day_list_b = []
hour_list_b = []
minutes_list_b = []
date = bolus_carb_data.loc[:, 'Date']
time = bolus_carb_data.loc[:, 'Time']
index_bolus = date.index # save the index from this dataframe as variable index
day_str = date.astype(str).values.tolist()
time_str_b = time.astype(str).values.tolist()
for j in time_str_b:
time_whole = datetime.strptime(j, '%H:%M:%S')
#for months
hour_list_b.append(time_whole.hour)
#for days
minutes_list_b.append(time_whole.minute)
for k in day_str:
date_whole = datetime.strptime(k, '%Y/%m/%d')
#for hours
month_list_b.append(date_whole.month)
#for minutes
day_list_b.append(date_whole.day)
#convert the lists to dataframes while ensuring the index corresponds to the other dataframes
monthdf_bolus = pd.DataFrame(np.array(month_list_b), index=index_bolus)
daydf_bolus = pd.DataFrame(np.array(day_list_b), index=index_bolus)
hourdf_bolus = pd.DataFrame(np.array(hour_list_b), index=index_bolus)
minutesdf_bolus = pd.DataFrame(np.array(minutes_list_b), index=index_bolus)
#concatenate all of these
bolus_carb_final = pd.concat([bolus_carb_data, monthdf_bolus, daydf_bolus, hourdf_bolus, minutesdf_bolus], axis=1, ignore_index=True)
bolus_carb_final.columns = ["Date", "Time", "Bolus (U)", "Carb Input (grams)", "Month", "Day", "Hour", "Minutes"]
#--------------------------------------------------------------
#--------Concatenate all of the dataframes into one dataframe----------------------------
final = pd.concat([timestamp, glu, monthdf, daydf, weekdaydf, hourdf, minutesdf], axis=1, ignore_index=True) #concatenate the dataframe together
#give columns names
final.columns = ["TimeStamp", "Glucose (mg/dL)", "Month", "Day", "Weekday", "Hour", "Minutes"]
#----------------------------------------------------------------------------------------
#MERGE MEDTRONIC DATA WITH DEXCOM
#----------------------------------------------------------------------------------------
#make dataframe of NaN filled bolus and carb columns with indexes matching tidepool
bolus_carbdf =
|
pd.DataFrame(np.nan, index=saved_index, columns=["Bolus (U)", "Carb Input (grams)"])
|
pandas.DataFrame
|
import os
import sys
import hashlib
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from glmnet import LogitNet
from itertools import product
from sklearn.metrics import roc_auc_score, average_precision_score
import hetnet_ml.src.graph_tools as gt
from hetnet_ml.src.extractor import MatrixFormattedGraph
from hetnet_ml.src.processing import DegreeTransform, DWPCTransform
## Set arguments to run the script
parser = argparse.ArgumentParser(description='Run Machine Learning on Time-Based Wikidata Network')
parser.add_argument('data_dir', help="The directory of the source files for machine learning", type=str)
parser.add_argument('-g', '--gs_treat', help='Replace the TREATS edges in the network with those from the Gold Standard',
action='store_true')
parser.add_argument('-a', '--alpha', help="Set the alpha value for the ElasticNet Regression", type=float, default=0.1)
parser.add_argument('-w', '--weight', help="Set the damping exponent for DWPC extraction", type=float, default=0.4)
parser.add_argument('-m', '--multiplier', help="Multiplier for selecting the number of negatives for training. Will"+
" use this factor of the number of positives", type=int, default=10)
parser.add_argument('-s', '--scoring', help='Scoring metric to use for ElasticNet regression', type=str,
default='recall')
parser.add_argument('-d', '--degree_features', help='Use Degree Features in the model', action='store_true')
parser.add_argument('-l', '--max_length', help='The maximum Lenth for a metapath to be extracted', type=int, default=4)
parser.add_argument('-r', '--remove_similarity_mps', help='Remove Metapaths with CxXxCtD and CtDxXxD pattern.',
action='store_true')
args = parser.parse_args()
## Define variables that will govern the network analysis
#remove_mps = ['CrCRrCtD', 'CtDmsMSmsD']
remove_mps = []
#remove_edges = ['CtD']
remove_edges = []
# Test params will be prepended to any output files if they differ from defaults
test_params = '' #'remove_all_sim'
## Unpack command line variables
target_edge_abv = 'CtD'
data_dir = args.data_dir
gs_treat = args.gs_treat
alpha = args.alpha
w = args.weight
negative_multiplier = args.multiplier
scoring = args.scoring
include_degree_features = args.degree_features
max_length = args.max_length
remove_similarity_mps = args.remove_similarity_mps
if scoring.lower() == 'none':
scoring = None
# Convert load dir into an integer for a consistent random seed
ini_seed = int(hashlib.sha1(data_dir.encode()).hexdigest(), 16) % 2**16
# Out dir is based on this filename
out_dir = os.path.join('../2_pipeline', sys.argv[0].split('.')[0], 'out')
# Convert non-default parameters to output directory
for k in sorted(list(vars(args).keys())):
if k == 'data_dir':
dirname = os.path.split(vars(args)[k])
dirname = dirname[-1] if dirname[-1] else dirname[-2]
test_params += dirname+'.'
continue
v = vars(args)[k]
if v != parser.get_default(k):
if type(v) == bool:
test_params += '{}.'.format(k)
else:
test_params += '{}-{}.'.format(k, v)
test_params = test_params.rstrip('.')
print('Non-default testing params: {}'.format(test_params))
n_jobs = 32
# Make sure the save directory exists, if not, make it
try:
os.stat(os.path.join(out_dir, test_params))
except:
os.makedirs(os.path.join(out_dir, test_params))
# Read input files
nodes = gt.remove_colons(pd.read_csv(os.path.join(data_dir, 'nodes.csv')))
edges = gt.remove_colons(pd.read_csv(os.path.join(data_dir, 'edges.csv')))
comp_ids = set(nodes.query('label == "Compound"')['id'])
dis_ids = set(nodes.query('label == "Disease"')['id'])
# We will use the TREATS edges within the graph as the training for the model
gs_edges = edges.query('type == "TREATS_CtD"').reset_index(drop=True)
# Just look at compounds and diseases in the gold standard
compounds = gs_edges['start_id'].unique().tolist()
diseases = gs_edges['end_id'].unique().tolist()
print('Based soley on gold standard...')
print('{:,} Compounds * {:,} Diseases = {:,} CD Pairs'.format(len(compounds), len(diseases),
len(compounds)*len(diseases)))
# Add in some other edges... anything with a degree > 1... or a CtD edge
compounds = set(compounds)
diseases = set(diseases)
print('Adding some more compounds and diseases....')
# Do some magic to find nodes with degree > 1
frac = 0.15
mg = MatrixFormattedGraph(nodes, edges)
first_comp = nodes.query('label == "Compound"')['id'].iloc[0]
first_disease = nodes.query('label == "Disease"')['id'].iloc[0]
comp_degrees = mg.extract_degrees(end_nodes=[first_disease])
comp_degrees = comp_degrees.loc[:, ['compound_id']+[c for c in comp_degrees.columns if c.startswith('C')]]
comp_degrees['total'] = comp_degrees[[c for c in comp_degrees.columns if c.startswith('C')]].sum(axis=1)
dis_degrees = mg.extract_degrees(start_nodes=[first_comp])
dis_degrees = dis_degrees.loc[:, ['disease_id']+[c for c in dis_degrees.columns if c.startswith('D')]]
dis_degrees['total'] = dis_degrees[[c for c in dis_degrees.columns if c.startswith('D')]].sum(axis=1)
compounds.update(set(comp_degrees.query('total > 1').sample(frac=frac)['compound_id']))
diseases.update(set(dis_degrees.query('total > 1').sample(frac=frac)['disease_id']))
compounds = list(compounds)
diseases = list(diseases)
print('Now comps and diseases')
print('{:,} Compounds * {:,} Diseases = {:,} CD Pairs'.format(len(compounds), len(diseases),
len(compounds)*len(diseases)))
# Ensure all the compounds and diseases actually are of the correct node type and in the network
node_kind = nodes.set_index('id')['label'].to_dict()
compounds = [c for c in compounds if c in comp_ids]
diseases = [d for d in diseases if d in dis_ids]
# Currently no functionality... but TODO may want combine this with ability to load different gold standards.
if not gs_treat:
print('Using the original TREATS edge from Wikidata')
else:
print('Removing Wikidata TREATS edges and repalcing with those from Gold Standard')
def drop_edges_from_list(df, drop_list):
idx = df.query('type in @drop_list').index
df.drop(idx, inplace=True)
# Filter out any compounds and diseases wrongly classified
gs_edges = gs_edges.query('start_id in @compounds and end_id in @diseases')
# Remove the TREATs edge form edges
drop_edges_from_list(edges, ['TREATS_CtD'])
gs_edges['type'] = 'TREATS_CtD'
column_order = edges.columns
edges = pd.concat([edges, gs_edges], sort=False)[column_order].reset_index(drop=True)
print('{:,} Nodes'.format(len(nodes)))
print('{:,} Edges'.format(len(edges)))
print('{:,} Compounds * {:,} Diseases = {:,} CD Pairs'.format(len(compounds),
len(diseases), len(compounds)*len(diseases)))
def remove_edges_from_gold_standard(to_remove, gs_edges):
"""
Remove edges from the gold standard
"""
remove_pairs = set([(tup.c_id, tup.d_id) for tup in to_remove.itertuples()])
gs_tups = set([(tup.start_id, tup.end_id) for tup in gs_edges.itertuples()])
remaining_edges = gs_tups - remove_pairs
return pd.DataFrame({'start_id': [tup[0] for tup in remaining_edges],
'end_id': [tup[1] for tup in remaining_edges],
'type': 'TREATS_CtD'})
def add_percentile_column(in_df, group_col, new_col, cdst_col='prediction'):
grpd = in_df.groupby(group_col)
predict_dfs = []
for grp, df1 in grpd:
df = df1.copy()
total = df.shape[0]
df.sort_values(cdst_col, inplace=True)
order = np.array(df.reset_index(drop=True).index)
percentile = (order+1) / total
df[new_col] = percentile
predict_dfs.append(df)
return pd.concat(predict_dfs, sort=False)
def find_drug_or_disease_similarity(mg):
""" Finds paths with CxXxCtD or CtDxXxD pattern..."""
remove_paths = []
sk = mg.start_kind
ek = mg.end_kind
for mp, info in mg.metapaths.items():
if info['length'] != 3:
continue
else:
# CxXxCtD pattern:
if (info['edges'][0].split(' - ')[0] == sk and
info['edges'][1].split(' - ')[-1] == sk and
info['standard_edge_abbreviations'][2] == 'CtD') \
or (info['standard_edge_abbreviations'][0] == 'CtD' and # CtDxXxD pattern
info['edges'][1].split(' - ')[0] == ek and
info['edges'][2].split(' - ')[-1] == ek):
remove_paths.append(mp)
return remove_paths
def glmnet_coefs(glmnet_obj, X, f_names):
"""Helper Function to quickly return the model coefs and correspoding fetaure names"""
l = glmnet_obj.lambda_best_[0]
coef = glmnet_obj.coef_[0]
coef = np.insert(coef, 0, glmnet_obj.intercept_)
names = np.insert(f_names, 0, 'intercept')
z_intercept = coef[0] + sum(coef[1:] * X.mean(axis=0))
z_coef = coef[1:] * X.values.std(axis=0)
z_coef = np.insert(z_coef, 0, z_intercept)
return
|
pd.DataFrame([names, coef, z_coef])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from operator import itemgetter
from datetime import datetime
from keras_preprocessing.sequence import pad_sequences
def format_x(input_format: np.array) -> np.array:
for i in range(input_format.shape[0]):
input_format[i] = np.array(np.nan_to_num(input_format[i].astype(np.float32)))
input_format = pad_sequences(input_format, dtype='float32', padding='pre', value=0.0)
print(np.shape(input_format))
return input_format
def add_missing_timestamp_values(df: pd.DataFrame) -> pd.DataFrame:
frame2time = df.groupby(['gameId', 'time', 'frameId'])
prev_frame_id = 0
coef = 1
prev_timestamp = pd.Timestamp('2018-01-01T00:00:00.000Z')
timestamp_delta = pd.Timedelta(np.timedelta64(10, 'ms'))
print('Start record processing. Differentiate timestamps that have multiple records...')
print(f'Time: {datetime.now().strftime("%H:%M:%S")}')
for name, group in frame2time:
game_id, timestamp, frame_id = name
timestamp = pd.Timestamp(timestamp)
if frame_id >= prev_frame_id and timestamp == prev_timestamp:
prev_frame_id = frame_id
new_timestamp = timestamp + (timestamp_delta * coef)
coef += 1
df['time'].mask(
(df['gameId'] == game_id) &
(df['time'] == timestamp.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z') &
(df['frameId'] == frame_id),
new_timestamp.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z',
inplace=True
)
else:
# new play
coef = 1
prev_frame_id = frame_id
prev_timestamp = timestamp
print(f'End record processing: {datetime.now().strftime("%H:%M:%S")}')
return df
def normalize_play_direction(df: pd.DataFrame) -> pd.DataFrame:
# normalize coordinates
def normalize_x(row):
if row.playDirection == 'left':
return 120 - row.x
return row.x
def normalize_y(row):
if row.playDirection == 'left':
return 160 / 3 - row.y
return row.y
df.x = df.apply(lambda row: normalize_x(row), axis=1)
df.y = df.apply(lambda row: normalize_y(row), axis=1)
return df
SPEED_MAX_THRESHOLD = 43
ACCELERATION_MAX_THRESHOLD = 71
DISTANCE_MAX_THRESHOLD = 13
def normalize_and_discard(df: pd.DataFrame) -> pd.DataFrame:
"""
Normalize numeric values between 0 and 1 and discard records that are out of bounds.
"""
# ## 2. Discard values out of range of x and y
df_cleaned = df[(df.x >= 0) & (df.x <= 120) & (df.y >= 0) & (df.y <= (160 / 3))]
print(f'Shape difference {df.shape[0] - df_cleaned.shape[0]}')
# ## 3. Normalize x, y , s, a, dis, o, dir on scale 0-1
# thresholds are determined by examining data from all weeks
df_cleaned.x = df_cleaned.x / df.x.max()
df_cleaned.y = df_cleaned.y / df.y.max()
df_cleaned.s = df_cleaned.s / SPEED_MAX_THRESHOLD
df_cleaned.a = df_cleaned.a / ACCELERATION_MAX_THRESHOLD
df_cleaned.dis = df_cleaned.dis / DISTANCE_MAX_THRESHOLD
df_cleaned.o = df_cleaned.o / 360
df_cleaned.dir = df_cleaned.dir / 360
df_n2 = df_cleaned[[
'time', 'x', 'y', 's', 'a', 'dis', 'o', 'dir', 'event', 'frameId', 'team', 'gameId',
'playId', 'quarter', 'homeHasPossession',
'down', 'playType', 'defendersInTheBox',
'numberOfPassRushers', 'passResult', 'isDefensivePI'
]]
df_n2.quarter /= 5.0 # max quarters
df_n2.down /= 4.0 # max quarters
df_n2.defendersInTheBox /= 11.0
df_n2.numberOfPassRushers /= 11.0
return df_n2
def convert_df_rows2columns(df: pd.DataFrame) -> pd.DataFrame:
print('Transforming rows to cols...')
def preprocess_df_columns() -> pd.Series:
occurrence_list = df.sort_values(['gameId', 'time', 'frameId']).groupby(
['gameId', 'time', 'frameId', 'team']).team.cumcount().add(1)
labels_list = df.sort_values(['gameId', 'time', 'frameId']).groupby(
['gameId', 'time', 'frameId']).team.apply(list)
flat_list = np.array([item for sublist in labels_list for item in sublist])
merge_list = np.column_stack((occurrence_list, flat_list))
col_list = [f'{row[1]}_{row[0]}' for row in merge_list]
return
|
pd.Series(col_list)
|
pandas.Series
|
""" Simulation of online news consumption including recommendations.
A simulation framework for the visualization and analysis of the effects of different recommenders systems.
This simulation draws mainly on the work of Fleder and Hosanagar (2017). To account for the specificities
of news consumption, it includes both users preferences and editorial priming as they interact in a
news-webpage context. The simulation models two main components: users (preferences and behavior)
and items (article content, publishing habits). Users interact with items and are recommended items
based on their interaction.
Example:
$ python3 simulation.py
"""
from __future__ import division
import numpy as np
from scipy import spatial
from scipy import stats
from scipy.stats import norm
from scipy.stats import chi2
import random
import pandas as pd
import pickle
from sklearn.mixture import GaussianMixture
import os
import sys, getopt
import copy
import json
import metrics
import matplotlib
import bisect
import time
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
import traceback, sys
from PyQt5.QtCore import QDateTime, Qt, QTimer
from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox, QDateTimeEdit,
QDial, QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,
QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy,
QSlider, QSpinBox, QStyleFactory, QTableWidget, QTabWidget, QTextEdit,
QVBoxLayout, QWidget)
from PyQt5.QtWidgets import *
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
__author__ = '<NAME>'
def cdf(weights):
""" Cummulative density function.
Used to convert topic weights into probabilities.
Args:
weights (list): An array of floats corresponding to weights
"""
total = sum(weights)
result = []
cumsum = 0
for w in weights:
cumsum += w
result.append(cumsum / total)
return result
def selectClassFromDistribution(population, weights):
""" Given a list of classes and corresponding weights randomly select a class.
Args:
population (list): A list of class names e.g. business, politics etc
weights (list): Corresponding float weights for each class.
"""
assert len(population) == len(weights)
cdf_vals = cdf(weights)
x = random.random()
idx = bisect.bisect(cdf_vals, x)
return population[idx]
def standardize(num, precision = 2):
""" Convert number to certain precision.
Args:
num (float): Number to be converted
precision (int): Precision either 2 or 4 for now
"""
if precision == 2:
return float("%.2f"%(num))
if precision == 4:
return float("%.4f"%(num))
def euclideanDistance(A,B):
""" Compute the pairwise distance between arrays of (x,y) points.
We use a numpy version which is C++ based for the sake of efficiency.
"""
#spatial.distance.cdist(A, B, metric = 'euclidean')
return np.sqrt(np.sum((np.array(A)[None, :] - np.array(B)[:, None])**2, -1)).T
class Users(object):
""" The class for modeling the user preferences (users) and user behavior.
The users object can be passed from simulation to simulation, allowing for
different recommendation algorithms to be applied on. The default attributes
correspond to findings reports on online news behavior (mostly Mitchell et
al 2017,'How Americans encounter, recall and act upon digital news').
Todo:
* Allow export of the data for analysis.
"""
def __init__(self):
""" The initialization simply sets the default attributes.
"""
self.seed = 1
self.totalNumberOfUsers = 200 # Total number of users
self.percentageOfActiveUsersPI = 1.0 # Percentage of active users per iterations
self.m = 0.05 # Percentage of the distance_ij covered when a user_i drifts towards an item_j
# Choice attributes
self.k = 20
self.delta = 5
self.beta = 0.9
self.meanSessionSize = 6
# Awareness attributes
self.theta = 0.07 # Proximity decay
self.thetaDot = 0.5 # Prominence decay
self.Lambda = 0.6 # Awareness balance between items in proximity and prominent items
self.w = 40 # Maximum awareness pool size
self.Awareness = [] # User-item awareness matrix
self.Users = [] # User preferences, (x,y) position of users on the attribute space
self.UsersClass = [] # Users be assigned a class (center at attribute space)
self.userVarietySeeking = [] # Users' willingness to drift
self.X = False # Tracking the X,Y position of users throught the simulation
self.Y = False
def generatePopulation(self):
""" Genererating a population of users (user preferences and variety seeking).
"""
random.seed(self.seed)
np.random.seed(self.seed)
# Position on the attribute space. Uniform, bounded by 1-radius circle
self.Users = np.random.uniform(-1,1,(self.totalNumberOfUsers,2))
for i, user in enumerate(self.Users):
while euclideanDistance([user], [[0,0]])[0][0]>1.1:
user = np.random.uniform(-1,1,(1,2))[0]
self.Users[i] = user
# Variety seeking, willingness to drift. Arbitrary defined
lower, upper = 0, 1
mu, sigma = 0.1, 0.03
X = stats.truncnorm( (lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
self.userVarietySeeking = X.rvs(self.totalNumberOfUsers, random_state = self.seed)
# Users can be assigned a class (most proxiamte attribute center), not currently used.
#self.UsersClass = [gmm.predict([self.Users[i]*55])[0] for i in range(self.totalNumberOfUsers)]
self.X = {i:[self.Users[i,0]] for i in range(self.totalNumberOfUsers)}
self.Y = {i:[self.Users[i,1]] for i in range(self.totalNumberOfUsers)}
def sessionSize(self):
""" Draw the session size (amount of items to purchase) of each user at each iteration from a normal distribution.
Returns:
int: the session size
"""
return int(np.random.normal(self.meanSessionSize, 2))
def subsetOfAvailableUsers(self):
""" Randomly select a subset of the users.
"""
self.activeUserIndeces = np.arange(self.totalNumberOfUsers).tolist()
random.shuffle(self.activeUserIndeces)
self.activeUserIndeces = self.activeUserIndeces[:int(len(self.activeUserIndeces)*self.percentageOfActiveUsersPI)]
self.nonActiveUserIndeces = [ i for i in np.arange(self.totalNumberOfUsers) if i not in self.activeUserIndeces]
def computeAwarenessMatrix(self, Dij, ItemProminence, activeItemIndeces):
""" Compute awareness from proximity and prominence (not considering availability, recommendations, history).
Args:
Dij (nparray): |Users| x |Items| distance matrix
ItemProminence (nparray): |Items|-sized prominence vector
"""
totalNumberOfItems = ItemProminence.shape[0]
W = np.zeros([self.totalNumberOfUsers,totalNumberOfItems])
W2 = W.copy() # for analysis purposes
W3 = W.copy() # for analysis purposes
for a in self.activeUserIndeces:
W[a,activeItemIndeces] = self.Lambda*(-self.thetaDot*np.log(1-ItemProminence[activeItemIndeces])) + (1-self.Lambda)*np.exp(-(np.power(Dij[a,activeItemIndeces],2))/self.theta)
W2[a,activeItemIndeces] = self.Lambda*(-self.thetaDot*np.log(1-ItemProminence[activeItemIndeces]))
W3[a,activeItemIndeces] = (1-self.Lambda)*np.exp(-(np.power(Dij[a,activeItemIndeces],2))/self.theta)
R = np.random.rand(W.shape[0],W.shape[1])
W = R<W
self.Awareness, self.AwarenessOnlyPopular, self.AwarenessProximity = W, W2, W3
def choiceModule(self, Rec, w, distanceToItems, sessionSize, control = False):
""" Selecting items to purchase for a single user.
Args:
Rec (list): List of items recommended to the user
w (nparray): 1 x |Items| awareness of the user
distanceToItems (nparray): 1 x |Items| distance of the user to the items
sessionSize (int): number of items that the user will purchase
Returns:
param1 (list): List of items that were selected including the stochastic component
param2 (list): List of items that were selected not including the stochastic component
"""
Similarity = -self.k*np.log(distanceToItems)
V = Similarity.copy()
if not control:
# exponential ranking discount, from Vargas
for k, r in enumerate(Rec):
V[r] = Similarity[r] + self.delta*np.power(self.beta,k)
# Introduce the stochastic component
E = -np.log(-np.log([random.random() for v in range(len(V))]))
U = V + E
sel = np.where(w==1)[0]
# with stochastic
selected = np.argsort(U[sel])[::-1]
# without stochastic
selectedW = np.argsort(V[sel])[::-1]
return sel[selected[:sessionSize]],sel[selectedW[:sessionSize]]
def computeNewPositionOfUser(self, user, ChosenItems):
""" Compute new position of a user given their purchased item(s).
Args:
user (int): Index of specific user.
ChosenItems (list): (x,y) position array of items selected by the user.
"""
for itemPosition in ChosenItems:
dist = euclideanDistance([self.Users[user]], [itemPosition])[0]
p = np.exp(-(np.power(dist,2))/(self.userVarietySeeking[user])) # based on the awareness formula
B = np.array(self.Users[user])
P = np.array(itemPosition)
BP = P - B
x,y = B + self.m*(random.random()<p)*BP
self.Users[user] = [x,y]
self.X[user].append(x)
self.Y[user].append(y)
class Items(object):
""" The class for modeling the items' content (items) and prominence.
The items object can be passed from simulation to simulation, allowing for
different recommendation algorithms to be applied on. The default attributes
correspond to findings reports on online news behavior (mostly Mitchell et
al 2017,'How Americans encounter, recall and act upon digital news').
Todo:
* Allow export of the data for analysis.
"""
def __init__(self):
""" The initialization simply sets the default attributes.
"""
self.seed = 1
self.numberOfNewItemsPI = 100 # The number of new items added per iteration
self.totalNumberOfItems = False # The total number of items (relates to the number of iterations)
self.percentageOfActiveItems = False
# Topics, frequency weights and prominence weights. We use topics instead of "classes" here.
self.topics = ["entertainment","business","politics","sport","tech"]
self.topicsProminence = [] #[0.05,0.07,0.03,0.85,0.01]
self.topicsFrequency = [] #[0.2, 0.2, 0.2, 0.2, 0.2]
self.p = 0.1 # Slope of salience decrease function
self.Items = [] # The items' content (x,y) position on the attribute space
self.ItemsClass = [] # The items' class corresponds to the most prominent topic
self.ItemsFeatures = False # The items' feature vector
self.ItemsDistances = False # |Items|x|Items| distance matrix
self.ItemsOrderOfAppearance = False # Random order of appearance at each iteration
self.ItemProminence = False # Item's prominence
self.ItemLifespan = False # Items' age (in iterations)
self.hasBeenRecommended = False # Binary matrix holding whether each items has been recommended
def generatePopulation(self, totalNumberOfIterations):
""" Genererating a population of items (items' content and initial prominence).
"""
random.seed(self.seed)
np.random.seed(self.seed)
# Compute number of total items in the simulation
self.totalNumberOfItems = totalNumberOfIterations*self.numberOfNewItemsPI
self.percentageOfActiveItems = self.numberOfNewItemsPI/self.totalNumberOfItems
# Apply GMM on items/articles from the BBC data
R, S = [5,1,6,7], [5,2,28,28]
r = int(random.random()*4)
(X,labels,topicClasses) = pickle.load(open('BBC data/t-SNE-projection'+str(R[r])+'.pkl','rb'))
gmm = GaussianMixture(n_components=5, random_state=S[r]).fit(X)
# Normalize topic weights to sum into 1 (CBF)
self.topicsFrequency = [np.round(i,decimals=1) for i in self.topicsFrequency/np.sum(self.topicsFrequency)]
# Generate items/articles from the BBC data projection
samples_, classes_ = gmm.sample(self.totalNumberOfItems*10)
for c, category in enumerate(self.topics):
selection = samples_[np.where(classes_ == c)][:int(self.topicsFrequency[c]*self.totalNumberOfItems)]
if len(self.Items) == 0:
self.Items = np.array(selection)
else:
self.Items = np.append(self.Items, selection, axis=0)
self.ItemsClass+=[c for i in range(len(selection))]
self.ItemsClass = np.array(self.ItemsClass)
self.ItemsFeatures = gmm.predict_proba(self.Items)
self.Items = self.Items/55 # Scale down to -1, 1 range
# Cosine distance between item features
self.ItemsDistances = spatial.distance.cdist(self.ItemsFeatures, self.ItemsFeatures, metric='cosine')
# Generate a random order of item availability
self.ItemsOrderOfAppearance = np.arange(self.totalNumberOfItems).tolist()
random.shuffle(self.ItemsOrderOfAppearance)
# Initial prominence
self.initialProminceZ0()
self.ItemProminence = self.ItemsInitialProminence.copy()
# Lifespan, item age
self.ItemLifespan = np.ones(self.totalNumberOfItems)
# Has been recommended before
self.hasBeenRecommended = np.zeros(self.totalNumberOfItems)
def prominenceFunction(self, initialProminence, life):
""" Decrease of item prominence, linear function.
Args:
initialProminence (float): The initial prominence of the item
life (int): The item's age (in iterations)
Returns:
param1 (float): New prominence value
"""
x = life
y = (-self.p*(x-1)+1)*initialProminence
return max([y, 0])
def subsetOfAvailableItems(self,iteration):
""" Randomly select a subset of the items.
The random order of appearance has already been defined in ItemsOrderOfAppearance. The function simply
extends the size of the activeItemIndeces array.
Args:
iteration (int): the current simulation iteration
"""
self.activeItemIndeces =[j for j in self.ItemsOrderOfAppearance[:(iteration+1)*int(self.totalNumberOfItems*self.percentageOfActiveItems)] if self.ItemProminence[j]>0]
self.nonActiveItemIndeces = [ i for i in np.arange(self.totalNumberOfItems) if i not in self.activeItemIndeces]
def updateLifespanAndProminence(self):
""" Update the lifespan and promince of the items.
"""
self.ItemLifespan[self.activeItemIndeces] = self.ItemLifespan[self.activeItemIndeces]+1
for a in self.activeItemIndeces:
self.ItemProminence[a] = self.prominenceFunction(self.ItemsInitialProminence[a],self.ItemLifespan[a])
def initialProminceZ0(self):
""" Generate initial item prominence based on the topic weights and topic prominence.
"""
self.topicsProminence = [np.round(i,decimals=2) for i in self.topicsProminence/np.sum(self.topicsProminence)]
counts = dict(zip(self.topics, [len(np.where(self.ItemsClass==i)[0]) for i,c in enumerate(self.topics) ]))
items = len(self.ItemsClass)
population = self.topics
# Chi square distribution with two degrees of freedom. Other power-law distributions can be used.
df = 2
mean, var, skew, kurt = chi2.stats(df, moments='mvsk')
x = np.linspace(chi2.ppf(0.01, df), chi2.ppf(0.99, df), items)
rv = chi2(df)
Z = {}
for c in self.topics: Z.update({c:[]})
# Assign topic to z prominence without replacement
for i in rv.pdf(x):
c = selectClassFromDistribution(population, self.topicsProminence)
while counts[c]<=0:
c = selectClassFromDistribution(population, self.topicsProminence)
counts[c]-=1
Z[c].append(i/0.5)
self.ItemsInitialProminence = np.zeros(self.totalNumberOfItems)
for c, category in enumerate(self.topics):
indeces = np.where(self.ItemsClass==c)[0]
self.ItemsInitialProminence[indeces] = Z[category]
class Recommendations(object):
def __init__(self):
self.outfolder = ""
self.SalesHistory = []
self.U = []
self.I = []
self.algorithm = False
self.n = 5
def setData(self, U, I, algorithm, SalesHistory):
self.U, self.I, self.algorithm, self.SalesHistory = U, I, algorithm, SalesHistory
def exportToMMLdocuments(self):
""" Export users' features, items' content and user-item purchase history for MyMediaLite.
MyMediaLite has a specific binary input format for user-, item-attributes: the attribute
either belongs or does not belong to an item or user. To accommodate for that we had to
take some liberties and convert the user's feature vector and item's feature vector into
a binary format.
"""
np.savetxt(self.outfolder + "/users.csv", np.array([i for i in range(self.U.totalNumberOfUsers)]), delimiter=",", fmt='%d')
F = []
for user in range(self.SalesHistory.shape[0]):
purchases = self.SalesHistory[user,:]
items = np.where(purchases==1)[0]
userf = self.I.ItemsFeatures[items]
userfm = np.mean(userf,axis=0)
userfm = userfm/np.max(userfm)
feat = np.where(userfm>0.33)[0]
for f in feat: F.append([int(user),int(f)])
np.savetxt(self.outfolder + "/users_attributes.csv", np.array(F), delimiter=",", fmt='%d')
if self.I.activeItemIndeces:
p = np.where(self.SalesHistory>=1)
z = zip(p[0],p[1])
l = [[i,j] for i,j in z if j in self.I.activeItemIndeces]
np.savetxt(self.outfolder + "/positive_only_feedback.csv", np.array(l), delimiter=",", fmt='%d')
if not self.I.activeItemIndeces: self.I.activeItemIndeces = [i for i in range(self.I.totalNumberOfItems)]
d = []
for i in self.I.activeItemIndeces:
feat = np.where(self.I.ItemsFeatures[i]/np.max(self.I.ItemsFeatures[i])>0.33)[0]
for f in feat: d.append([int(i),int(f)])
np.savetxt(self.outfolder + "/items_attributes.csv", np.array(d), delimiter=",", fmt='%d')
def mmlRecommendation(self):
""" A wrapper around the MyMediaLite toolbox
Returns:
recommendations (dict): A {user:[recommended items]} dictionary
"""
command = "mono MyMediaLite/item_recommendation.exe --training-file=" + self.outfolder + "/positive_only_feedback.csv --item-attributes=" + self.outfolder + "/items_attributes.csv --recommender="+self.algorithm+" --predict-items-number="+str(self.n)+" --prediction-file=" + self.outfolder + "/output.txt --user-attributes=" + self.outfolder + "/users_attributes.csv" # --random-seed="+str(int(self.seed*random.random()))
os.system(command)
# Parse output
f = open( self.outfolder + "/output.txt","r").read()
f = f.split("\n")
recommendations = {}
for line in f[:-1]:
l = line.split("\t")
user_id = int(l[0])
l1 = l[1].replace("[","").replace("]","").split(",")
rec = [int(i.split(":")[0]) for i in l1]
recommendations.update({user_id:rec})
return recommendations
class SimulationGUI(QDialog):
""" The simulation class takes users and items and simulates their interaction.
The simulation can include recommendations (currently using a MyMediaLite wrapper).
Alternative toolboxes can be used. The simulation class also stores results for
analysis and computes diversity metrics (based on the PhD thesis of Vargas).
"""
"""
GUI functions
"""
def __init__(self, *args, **kwargs):
super(SimulationGUI, self).__init__(*args, **kwargs)
self.originalPalette = QApplication.palette()
self.createTopLeftGroupBox()
self.createTopMiddleGroupBox()
self.createTopRightGroupBox()
self.createControlsGroupBox()
self.createFigures()
# Top layout with labels
topLabel = QLabel("Recommender settings")
topLabel2 = QLabel("Article settings")
topLabel3 = QLabel("User settings")
# Main layout
mainLayout = QGridLayout()
mainLayout.setColumnStretch(0, 4)
mainLayout.setColumnStretch(1, 4)
mainLayout.setColumnStretch(2, 4)
mainLayout.addWidget(topLabel, 0, 0)
mainLayout.addWidget(topLabel2, 0, 1)
mainLayout.addWidget(topLabel3, 0, 2)
mainLayout.addWidget(self.topLeftGroupBox, 1, 0 )
mainLayout.addWidget(self.topMiddleGroupBox, 1, 1 )
mainLayout.addWidget(self.topRightGroupBox, 1, 2 )
mainLayout.addWidget(self.controlsGroupBox, 2, 0, 1, 3)
mainLayout.addWidget(self.figures, 3, 0, 2, 3)
self.setLayout(mainLayout)
self.left = 10
self.top = 10
self.width = 1200
self.height = 800
self.setGeometry(self.left, self.top, self.width, self.height)
self.setWindowTitle("SIREN")
self.threadpool = QThreadPool()
#print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
# Controls
def createControlsGroupBox(self):
self.controlsGroupBox = QGroupBox()
self.startButton = QPushButton("Start")
self.startButton.setDefault(True)
self.startButton.clicked.connect(self.onStartButtonClicked)
self.startButton.setToolTip('Click to start the simulation')
self.progressBar = QProgressBar()
self.progressBar.setRange(0, 100)
self.progressBar.setValue(0)
self.progressLabel = QLabel("&Progress:")
self.progressLabel.setBuddy(self.progressBar)
layout = QVBoxLayout()
layout.addWidget(self.startButton)
layout.addWidget(self.progressLabel)
layout.addWidget(self.progressBar)
layout.addStretch(1)
self.controlsGroupBox.setLayout(layout)
# Figures
def createFigures(self):
self.figures = QGroupBox("Figures")
dynamic_canvas1 = FigureCanvas(Figure(figsize=(5, 4),dpi = 100, tight_layout=False))
self._dynamic_ax1 = dynamic_canvas1.figure.subplots()
dynamic_canvas2 = FigureCanvas(Figure(figsize=(5, 4), dpi = 100, tight_layout=False))
self._dynamic_ax2 = dynamic_canvas2.figure.subplots()
dynamic_canvas3 = FigureCanvas(Figure(figsize=(5, 4), dpi = 100, tight_layout=False))
self._dynamic_ax3 = dynamic_canvas3.figure.subplots()
layout = QGridLayout()
# layout.setColumnStretch(0, 3)
# layout.setColumnStretch(1, 3)
layout.addWidget(dynamic_canvas1, 0, 0)
layout.addWidget(dynamic_canvas2, 0, 1)
layout.addWidget(dynamic_canvas3, 0, 2)
self.figures.setLayout(layout)
# Recommendation settings
def createTopLeftGroupBox(self):
self.topLeftGroupBox = QGroupBox()
self.comboBoxAlgorithms = QListWidget(self.topLeftGroupBox)
self.comboBoxAlgorithms.setSelectionMode(QAbstractItemView.MultiSelection)
comboBoxAlgorithmsLabel = QLabel("&Rec algorithms (scroll for more):")
comboBoxAlgorithmsLabel.setBuddy(self.comboBoxAlgorithms)
self.comboBoxAlgorithms.addItems(["BPRMF", "ItemAttributeKNN", "ItemKNN", "MostPopular",
"Random", "UserAttributeKNN","UserKNN","WRMF","MultiCoreBPRMF",
"SoftMarginRankingMF", "WeightedBPRMF", "MostPopularByAttributes",
"BPRSLIM","LeastSquareSLIM"])
comboBoxAlgorithmsLabel.setToolTip('The recommendation algorithms are part of the MyMediaLite toolbox')
self.comboBoxAlgorithms.setToolTip('The recommendation algorithms are part of the MyMediaLite toolbox')
self.spinBoxSalience = QSpinBox(self.topLeftGroupBox)
spinBoxSalienceLabel = QLabel("&Rec salience:")
spinBoxSalienceLabel.setBuddy(self.spinBoxSalience)
self.spinBoxSalience.setValue(5)
spinBoxSalienceLabel.setToolTip('Salience controls the extent to which users are susceptible to recommendations')
self.spinBoxDays = QSpinBox(self.topLeftGroupBox)
spinBoxDaysLabel = QLabel("&Days:")
spinBoxDaysLabel.setBuddy(self.spinBoxDays)
self.spinBoxDays.setValue(20)
spinBoxDaysLabel.setToolTip('The simulation iterations for which each recommendation algorithm will run')
self.spinBoxRecArticles = QSpinBox(self.topLeftGroupBox)
spinBoxRecArticlesLabel = QLabel("&Recommended articles per day:")
spinBoxRecArticlesLabel.setBuddy(self.spinBoxRecArticles)
self.spinBoxRecArticles.setValue(5)
spinBoxRecArticlesLabel.setToolTip('The amount of recommendations that each user receives per day')
layout = QVBoxLayout()
layout.addWidget(comboBoxAlgorithmsLabel)
layout.addWidget(self.comboBoxAlgorithms)
layout.addWidget(spinBoxSalienceLabel)
layout.addWidget(self.spinBoxSalience)
layout.addWidget(spinBoxDaysLabel)
layout.addWidget(self.spinBoxDays)
layout.addWidget(spinBoxRecArticlesLabel)
layout.addWidget(self.spinBoxRecArticles)
layout.addStretch(1)
self.topLeftGroupBox.setLayout(layout)
# Article settings
def createTopMiddleGroupBox(self):
self.topMiddleGroupBox = QTabWidget()
#self.topMiddleGroupBox.setSizePolicy(QSizePolicy.Preferred,QSizePolicy.Ignored)
#self.topMiddleGroupBox.heightForWidth()
self.sliderEnt = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderEntLabel = QLabel("&Entertainment:")
sliderEntLabel.setBuddy(self.sliderEnt)
self.sliderBus = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderBusLabel = QLabel("&Business:")
sliderBusLabel.setBuddy(self.sliderBus)
self.sliderPol = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderPolLabel = QLabel("&Politics:")
sliderPolLabel.setBuddy(self.sliderPol)
self.sliderSpo = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderSpoLabel = QLabel("&Sports:")
sliderSpoLabel.setBuddy(self.sliderSpo)
self.sliderTec = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderTecLabel = QLabel("&Tech:")
sliderTecLabel.setBuddy(self.sliderTec)
self.sliderPromEnt = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderPromEntLabel = QLabel("&Entertainment:")
sliderPromEntLabel.setBuddy(self.sliderPromEnt)
self.sliderPromBus = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderPromBusLabel = QLabel("&Business:")
sliderPromBusLabel.setBuddy(self.sliderPromBus)
self.sliderPromPol = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderPromPolLabel = QLabel("&Politics:")
sliderPromPolLabel.setBuddy(self.sliderPromPol)
self.sliderPromSpo = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderPromSpoLabel = QLabel("&Sports:")
sliderPromSpoLabel.setBuddy(self.sliderPromSpo)
self.sliderPromTec = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
sliderPromTecLabel = QLabel("&Tech:")
sliderPromTecLabel.setBuddy(self.sliderPromTec)
# Set values
for i,widget in enumerate([self.sliderPromEnt, self.sliderPromBus, self.sliderPromPol, self.sliderPromSpo, self.sliderPromTec]):
widget.setMinimum(1)
widget.setMaximum(10)
widget.setTickInterval(1)
widget.setSingleStep(1) # arrow-key step-size
widget.setPageStep(1) # mouse-wheel/page-key step-size
if i!=2: widget.setValue(1)
else: widget.setValue(9) # politics
for i,widget in enumerate([self.sliderEnt,self.sliderBus, self.sliderPol, self.sliderSpo, self.sliderTec]):
widget.setRange(10, 100)
widget.setValue(20)
# Add to layout
tab1 = QWidget()
tab1hbox = QVBoxLayout()
tab1hbox.setContentsMargins(5, 5, 5, 5)
tab1Text = QLabel("The likelihood of an article to belong\n to a topic (from 0.1 to 1)")
tab1hbox.addWidget(tab1Text)
for widget in [sliderEntLabel, self.sliderEnt, sliderBusLabel,
self.sliderBus, sliderPolLabel, self.sliderPol, sliderSpoLabel,
self.sliderSpo, sliderTecLabel, self.sliderTec]:
tab1hbox.addWidget(widget)
tab1hbox.addStretch(0)
tab1.setLayout(tab1hbox)
tab2 = QWidget()
tab2hbox = QVBoxLayout()
tab2hbox.setContentsMargins(5, 5, 5, 5)
tab2Text = QLabel("The likelihood of an article of a certain\n topic to appear in the news headlines (from 0.1 to 1)")
tab2hbox.addWidget(tab2Text)
for widget in [sliderPromEntLabel, self.sliderPromEnt, sliderPromBusLabel,
self.sliderPromBus, sliderPromPolLabel, self.sliderPromPol, sliderPromSpoLabel,
self.sliderPromSpo, sliderPromTecLabel, self.sliderPromTec]:
tab2hbox.addWidget(widget)
tab2hbox.addStretch(0)
tab2.setLayout(tab2hbox)
tab3 = QWidget()
tab3hbox = QVBoxLayout()
tab3hbox.setContentsMargins(5, 5, 5, 5)
self.spinBoxPubArticles = QSpinBox(self.topMiddleGroupBox)
self.spinBoxPubArticles.setRange(10,500)
self.spinBoxPubArticles.setValue(100)
spinBoxPubArticlesLabel = QLabel("&Published articles per day:")
spinBoxPubArticlesLabel.setBuddy(self.spinBoxPubArticles)
spinBoxPubArticlesLabel.setToolTip('The amount of new articles published per day')
tab3hbox.addWidget(spinBoxPubArticlesLabel)
tab3hbox.addWidget(self.spinBoxPubArticles)
tab3hbox.addStretch(0)
tab3.setLayout(tab3hbox)
self.topMiddleGroupBox.addTab(tab3, "&General")
self.topMiddleGroupBox.addTab(tab1, "&Topic weights")
self.topMiddleGroupBox.addTab(tab2, "&Topic prominence")
# User settings
def createTopRightGroupBox(self):
self.topRightGroupBox = QGroupBox()
self.spinBoxUsers = QSpinBox(self.topRightGroupBox)
self.spinBoxUsers.setRange(10,500)
self.spinBoxUsers.setValue(100)
spinBoxUsersLabel = QLabel("&Active users per day:")
spinBoxUsersLabel.setBuddy(self.spinBoxUsers)
spinBoxUsersLabel.setToolTip('The amount of users that actively read articles every day')
self.spinBoxUsersArticles = QSpinBox(self.topRightGroupBox)
self.spinBoxUsersArticles.setRange(1,50)
self.spinBoxUsersArticles.setValue(6)
spinBoxUsersArticlesLabel = QLabel("&Average read articles per day:")
spinBoxUsersArticlesLabel.setBuddy(self.spinBoxUsersArticles)
spinBoxUsersArticlesLabel.setToolTip('The average amount of articles that each user reads per day')
self.sliderFocus = QSlider(Qt.Horizontal, self.topMiddleGroupBox)
self.sliderFocus.setRange(5, 100)
self.sliderFocus.setValue(60)
sliderFocusLabel = QLabel("&Reading focus:")
sliderFocusLabel.setBuddy(self.sliderFocus)
sliderFocusLabel.setToolTip("Focus controls the extent to which users are susceptible \n to articles promoted by the editors vs. articles \n close to the user's preferences. \n 1 = focus on promoted articles")
layout = QVBoxLayout()
layout.addWidget(spinBoxUsersLabel)
layout.addWidget(self.spinBoxUsers)
layout.addWidget(spinBoxUsersArticlesLabel)
layout.addWidget(self.spinBoxUsersArticles)
layout.addWidget(sliderFocusLabel)
layout.addWidget(self.sliderFocus)
layout.addStretch(1)
self.topRightGroupBox.setLayout(layout)
def printj(self, text, comments=""):
json = {"action":text,"comments":comments}
print(json)
self.feedback = text+comments
self.progressLabel.setText(self.feedback)
"""
Multi-thread functions
"""
# Update function
def progress_fn(self, progress_):
curVal = self.progressBar.value()
maxVal = self.progressBar.maximum()
self.progressBar.setValue(progress_*100)
if self.algorithm!="Control":
# Load distribution metrics
df_dis = pd.read_pickle(self.outfolder + "/metrics distribution.pkl")
# Load diversity metrics data
df = pd.read_pickle(self.outfolder + "/metrics analysis.pkl")
# Font sizes
for axis in [self._dynamic_ax1, self._dynamic_ax2, self._dynamic_ax3]:
axis.clear()
axis.clear()
axis_font = {'fontname':'Arial', 'size':'8'}
for item in ([axis.title, axis.xaxis.label, axis.yaxis.label] + axis.get_xticklabels() + axis.get_yticklabels()):
item.set_fontsize(8)
axis.tick_params(direction='out', length=6, width=2, grid_color='r', grid_alpha=0.5, labelsize=6)
self._dynamic_ax1.set_xlabel("Days", **axis_font)
self._dynamic_ax1.set_ylabel("EPC", **axis_font)
self._dynamic_ax1.set_title("Long-tail diversity (Expected Popularity Complement)", **axis_font)
self._dynamic_ax2.set_xlabel("Days", **axis_font)
self._dynamic_ax2.set_ylabel("EPD", **axis_font)
self._dynamic_ax2.set_title("Unexpectedness diversity (Expected Profile Distance)", **axis_font)
self._dynamic_ax3.set_title("The overall distribution of read articles by topic", **axis_font)
# Parse data
print(self.algorithms)
for k, algorithm in enumerate(self.algorithms):
if algorithm=="Control":continue
y = np.array(df[algorithm]["EPC"])
x = np.array([i for i in range(len(y))])+k*0.1
yerror = df[algorithm]["EPCstd"]
self._dynamic_ax1.errorbar(x, y, yerr = yerror, label=algorithm)
y = df[algorithm]["EPD"]
x = np.array([i for i in range(len(y))])+k*0.1
yerror = df[algorithm]["EPDstd"]
self._dynamic_ax2.errorbar(x, y, yerr = yerror, label=algorithm)
ind = np.arange(len(self.algorithms)-1)
df_dis = df_dis.drop(columns = ["Control"])
columns = list(df_dis.columns.values)
for i,topic in enumerate(df_dis.index.values):
data = np.array(df_dis.loc[topic])
self._dynamic_ax3.bar(ind + i*0.1, data, 0.1, label=topic)
# Set x-limits for diversity measures
self._dynamic_ax1.set_xlim([0, self.spinBoxDays.value()])
self._dynamic_ax2.set_xlim([0, self.spinBoxDays.value()])
self._dynamic_ax3.set_xticks(ind)
self._dynamic_ax3.set_xticklabels(columns)
# Add legends and draw
for axis in [self._dynamic_ax1, self._dynamic_ax2, self._dynamic_ax3]:
axis.legend(prop={'size': 6})
axis.figure.canvas.draw()
axis.figure.canvas.draw_idle()
def print_output(self):
"""
Empty
"""
return False
def simulation_complete(self):
"""
Empty
"""
alert = QMessageBox()
alert.setText('Simulation complete!')
alert.exec_()
# Activate button
self.startButton.setEnabled(True)
self.startButton.setText("Start")
self.startButton.repaint()
self.printj(text="Raw data stored in folder: "+self.settings["outfolder"])
self.progressLabel.repaint()
return False
def onStartButtonClicked(self):
# If no recommenders have been selected
if len(self.comboBoxAlgorithms.selectedItems())==0:
alert = QMessageBox()
alert.setText('Please select at least one recommendation algorithm.')
alert.exec_()
return False
# Deactivate button
self.startButton.setEnabled(False)
self.startButton.setText("Running...")
self.startButton.repaint()
# Initialize the simulation
self.settings = {"Number of active users per day": self.spinBoxUsers.value(),
"Days" : self.spinBoxDays.value(),
"seed": int(1),
"Recommender salience": self.spinBoxSalience.value(),
"Number of published articles per day": self.spinBoxPubArticles.value(),
"outfolder": "output-"+str(time.time()),
"Number of recommended articles per day": self.spinBoxRecArticles.value(),
"Average read articles per day": self.spinBoxUsersArticles.value(),
"Reading focus": float(self.sliderFocus.value()/100),
"Recommender algorithms": [str(item.text()) for item in self.comboBoxAlgorithms.selectedItems()],
"Overall topic weights": [float(i.value()/100) for i in [self.sliderEnt, self.sliderBus, self.sliderPol, self.sliderSpo, self.sliderTec]],
"Overall topic prominence": [float(i.value()/10) for i in [self.sliderPromEnt, self.sliderPromBus, self.sliderPromPol, self.sliderPromSpo, self.sliderPromTec]]}
# Make outfolder
os.makedirs(self.settings["outfolder"])
# Initialize
self.initWithSettings()
# Pass the function to execute
worker = Worker(self.runSimulation) # Any other args, kwargs are passed to the run function
worker.signals.result.connect(self.print_output)
worker.signals.finished.connect(self.simulation_complete)
worker.signals.progress.connect(self.progress_fn)
# Execute
self.threadpool.start(worker)
"""
Main simulation functions
"""
def exportAnalysisDataAfterIteration(self):
""" Export data to dataframes
This is called at the end of each rec algorithm iteration. Certain data
can be exported for further analysis e.g. the the SalesHistory. For this
version, we simply export the appropriate data for the figures provided
by the interface.
"""
# Metrics output
df = pd.DataFrame(self.data["Diversity"])
df.to_pickle(self.outfolder + "/metrics analysis.pkl")
# Topics distribution output
df =
|
pd.DataFrame(self.data["Distribution"])
|
pandas.DataFrame
|
from src.landscape import Peak, PeakCollection
import numpy as np
import pandas as pd
def test_Peak_peak_coverage_works_hamming():
test_peak = Peak(
center_seq='AAAAAAA',
seqs=['AAAAAAA', 'AAAABAA', 'AAABAAA', 'BAAAAAA', 'BBAABAA', 'BBBBBBB'],
name='test peak',
radius=3,
dist_type='hamming',
letterbook_size=2
)
assert test_peak.center_seq == 'AAAAAAA'
np.testing.assert_array_almost_equal(
test_peak.peak_coverage(max_radius=2).values[:, :-1],
np.array([[1, 1],
[7, 3],
[21, 0]])
)
def test_Peak_peak_coverage_works_edit():
test_peak = Peak(
center_seq='AAAAAAA',
seqs=['AAAAAAA', 'AAAAAA', 'AAABAAA', 'BAAAAAA', 'BBAABAA', 'BBBBBBB'],
name='test peak',
radius=3,
dist_type='edit',
letterbook_size=2
)
assert test_peak.center_seq == 'AAAAAAA'
np.testing.assert_array_almost_equal(
test_peak.peak_coverage(max_radius=2).values[:, :-1],
np.array([[1, 1],
[7, 3],
[21, 0]])
)
def test_Peak_peak_abun_works_edit():
test_peak = Peak(
center_seq='AAAAAAA',
seqs=['AAAAAAA', 'AAAAAAB', 'AAABAAA', 'BAAAAAA', 'BBAABAA', 'BBBBBBB'],
name='test peak',
radius=3,
dist_type='edit',
letterbook_size=2
)
peak_abun, peak_uniq_seq = test_peak.peak_abun(
max_radius=2,
table=pd.DataFrame(
index=['AAAAAAA', 'AAAAAAB', 'BBAABAA'],
columns=['sample1', 'sample2'],
data=[[5, 3],
[0, 2],
[1, 1]]
),
use_relative=False
)
peak_abun_expected = pd.DataFrame(
columns=['sample1', 'sample2'],
index=[0, 1, 2],
data=[[5, 3],
[0, 2],
[0, 0]]
)
peak_uniq_expected = pd.DataFrame(
columns=['sample1', 'sample2'],
index=[0, 1, 2],
data=[[1, 1],
[0, 1],
[0, 0]]
)
pd.testing.assert_frame_equal(peak_abun, peak_abun_expected)
pd.testing.assert_frame_equal(peak_uniq_seq, peak_uniq_expected)
def test_Peak_peak_abun_works_edit_relative():
test_peak = Peak(
center_seq='AAAAAAA',
seqs=['AAAAAAA', 'AAAAAAB', 'AAABAAA', 'BAAAAAA', 'BBAABAA', 'BBBBBBB'],
name='test peak',
radius=3,
dist_type='edit',
letterbook_size=2
)
peak_abun, peak_uniq_seq = test_peak.peak_abun(
max_radius=2,
table=pd.DataFrame(
index=['AAAAAAA', 'AAAAAAB', 'BBAABAA'],
columns=['sample1', 'sample2'],
data=[[5, 3],
[0, 2],
[1, 1]]
),
use_relative=True
)
peak_abun_expected = pd.DataFrame(
columns=['sample1', 'sample2'],
index=[0, 1, 2],
data=[[1, 1],
[0, 0.66666666666666],
[0, 0]]
)
peak_uniq_expected = pd.DataFrame(
columns=['sample1', 'sample2'],
index=[0, 1, 2],
data=[[1, 1],
[0, 1],
[0, 0]]
)
|
pd.testing.assert_frame_equal(peak_abun, peak_abun_expected, check_dtype=False)
|
pandas.testing.assert_frame_equal
|
import pandas as pd
import numpy as np
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import mean_absolute_error
import eli5
def parse_price(val):
return float(val.replace(" ", "").replace(",", "."))
def get_df(df_train, df_test):
df_train = df_train[ df_train.index != 106447 ].reset_index(drop=True)
df = pd.concat([df_train, df_test])
params = df["offer_params"].apply(pd.Series)
params = params.fillna(-1)
ru_params = ["Безаварийный", "Количество мест", "Страна происхождения",
"Объем", "Тип топлива", "Коробка передач",
"Кредит", "Первый владелец", "Предложение от",
"Количество дверей", "Пробег", "Мощность", "Металлик", "Тип", "Цвет",
"Модель автомобиля", "Состояние", "Категория", "Обслуживание в сервисном центре", "Привод",
"Лизинг", "Год выпуска", "Марка автомобиля", "Счет-фактура по НДС",
"Первая регистрация", "Зарегистрировано в Польше", "Маржа НДС",
"Версия", "VIN", "Перламутр", "Поврежденный", "Код двигателя",
"Сажевый фильтр", "Выбросы CO2", "Ежемесячный платеж",
"Количество оставшихся платежей", "Первоначальный платеж", "Сумма погашения",
"Акрил (неметаллический)", "Тюнинг", "Правый руль (английский)",
"Допуск грузовика"]
dict_params = {pl:ru for pl,ru in zip(params.columns, ru_params)}
params.rename(columns=dict_params, inplace=True)
df =
|
pd.concat([df, params], axis=1)
|
pandas.concat
|
import pandas as pd
df2 = pd.DataFrame()
for comuna in ['maipu','nunoa','pudahuel','estacion_central','providencia','santiago','la_granja','san_joaquin','quilicura','macul']:
geocode = comuna+'_geocode.csv'
comuna1 = comuna.upper()+'_padron.csv'
df1 = pd.read_csv(comuna1)
df0 = pd.read_csv(geocode)
df0['comuna'] = comuna.upper()
aux = pd.concat([df0,df1[['Mesa']]],axis=1)
df2 = df2.append(aux, ignore_index=True)
df2.comuna.replace({
'NUNOA':'ÑUÑOA',
'ESTACION_CENTRAL':'ESTACION CENTRAL',
'LA_GRANJA':'LA GRANJA',
'SAN_JOAQUIN':'SAN JOAQUIN'
},inplace=True)
df3 = pd.read_csv('Concejales 2016 TER 1_resultados_d10_d8.csv')
aux = pd.read_csv('Concejales 2016 TER 2_resultados_d10_d8.csv')
df3 = df3.append(aux, ignore_index=True)
df4 =
|
pd.read_csv('concejales_d10_2017.csv',header=None)
|
pandas.read_csv
|
import glob
import shutil
import os
import pandas as pd
import nrrd
import re
from sklearn.model_selection import train_test_split
import pickle
import numpy as np
from time import gmtime, strftime
from datetime import datetime
import timeit
#----------------------------------------------------------------------------------------
# training dataset
#----------------------------------------------------------------------------------------
def train_val_split(proj_dir, pmh_data_dir, chum_data_dir, chus_data_dir, pmh_label_csv,
chum_label_csv, chus_label_csv, train_img_dir, val_img_dir,
train_exclude):
## PMH labels
pmh_label = pd.read_csv(os.path.join(proj_dir, pmh_label_csv))
pmh_label['Contrast'] = pmh_label['Contrast'].map({'Yes': 1, 'No': 0})
labels = pmh_label['Contrast'].to_list()
#labels = to_categorical(labels)
#print(labels)
## PMH data
fns = [fn for fn in sorted(glob.glob(pmh_data_dir + '/*nrrd'))]
#print(fns)
IDs = []
for fn in fns:
ID = 'PMH' + fn.split('/')[-1].split('-')[1][2:5].strip()
IDs.append(ID)
df_pmh = pd.DataFrame({'ID': IDs, 'file': fns, 'label': labels})
pd.options.display.max_colwidth = 100
#print(df_pmh)
file = df_pmh['file'][0]
data, header = nrrd.read(file)
print(data.shape)
print('PMH data:', len(IDs))
print('PMH datset created!')
## CHUM labels
labels = []
chum_label = pd.read_csv(os.path.join(proj_dir, chum_label_csv))
chum_label['Contrast'] = chum_label['Contrast'].map({'Yes': 1, 'No': 0})
for i in range(chum_label.shape[0]):
file = chum_label['File ID'].iloc[i]
scan = file.split('_')[2].strip()
if scan == 'CT-SIM':
labels.append(chum_label['Contrast'].iloc[i])
elif scan == 'CT-PET':
continue
#print('labels:', len(labels))
fns = []
for fn in sorted(glob.glob(chum_data_dir + '/*nrrd')):
scan_ = fn.split('/')[-1].split('_')[2].strip()
if scan_ == 'CT-SIM':
fns.append(fn)
else:
continue
#print('file:', len(fns))
IDs = []
for fn in fns:
ID = 'CHUM' + fn.split('/')[-1].split('_')[1].split('-')[2].strip()
IDs.append(ID)
#print('ID:', len(IDs))
df_chum =
|
pd.DataFrame({'ID': IDs, 'file': fns, 'label': labels})
|
pandas.DataFrame
|
from __future__ import print_function, division
import os
import re
import datetime
import sys
from os.path import join, isdir, isfile, dirname, abspath
import pandas as pd
import yaml
import psycopg2 as db
from nilmtk.measurement import measurement_columns
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.datastore import Key
from nilm_metadata import convert_yaml_to_hdf5
from inspect import currentframe, getfile, getsourcefile
"""
MANUAL:
dataport is a large dataset hosted in a remote SQL database. This
file provides a function to download the dataset and save it to disk
as NILMTK-DF. Since downloading the entire dataset will likely take >
24 hours, this function provides some options to allow you to download
only a subset of the data.
For example, to only load house 26 for April 2014:
from nilmtk.dataset_converters.dataport.download_dataport import download_dataport
download_dataport(
'username',
'password',
'/path/output_filename.h5',
periods_to_load={26: ('2014-04-01', '2014-05-01')}
)
REQUIREMENTS:
On Ubuntu:
* sudo apt-get install libpq-dev
* sudo pip install psycopg2
TODO:
* intelligently handle queries that fail due to network
* integrate 'grid' (use - gen) and 'gen'
"""
feed_mapping = {
'use': {},
'air1': {'type': 'air conditioner'},
'air2': {'type': 'air conditioner'},
'air3': {'type': 'air conditioner'},
'airwindowunit1': {'type': 'air conditioner'},
'aquarium1': {'type': 'appliance'},
'bathroom1': {'type': 'sockets', 'room': 'bathroom'},
'bathroom2': {'type': 'sockets', 'room': 'bathroom'},
'bedroom1': {'type': 'sockets', 'room': 'bedroom'},
'bedroom2': {'type': 'sockets', 'room': 'bedroom'},
'bedroom3': {'type': 'sockets', 'room': 'bedroom'},
'bedroom4': {'type': 'sockets', 'room': 'bedroom'},
'bedroom5': {'type': 'sockets', 'room': 'bedroom'},
'car1': {'type': 'electric vehicle'},
'clotheswasher1': {'type': 'washing machine'},
'clotheswasher_dryg1': {'type': 'washer dryer'},
'diningroom1': {'type': 'sockets', 'room': 'dining room'},
'diningroom2': {'type': 'sockets', 'room': 'dining room'},
'dishwasher1': {'type': 'dish washer'},
'disposal1': {'type': 'waste disposal unit'},
'drye1': {'type': 'spin dryer'},
'dryg1': {'type': 'spin dryer'},
'freezer1': {'type': 'freezer'},
'furnace1': {'type': 'electric furnace'},
'furnace2': {'type': 'electric furnace'},
'garage1': {'type': 'sockets', 'room': 'dining room'},
'garage2': {'type': 'sockets', 'room': 'dining room'},
'gen': {},
'grid': {},
'heater1': {'type': 'electric space heater'},
'housefan1': {'type': 'electric space heater'},
'icemaker1': {'type': 'appliance'},
'jacuzzi1': {'type': 'electric hot tub heater'},
'kitchen1': {'type': 'sockets', 'room': 'kitchen'},
'kitchen2': {'type': 'sockets', 'room': 'kitchen'},
'kitchenapp1': {'type': 'sockets', 'room': 'kitchen'},
'kitchenapp2': {'type': 'sockets', 'room': 'kitchen'},
'lights_plugs1': {'type': 'light'},
'lights_plugs2': {'type': 'light'},
'lights_plugs3': {'type': 'light'},
'lights_plugs4': {'type': 'light'},
'lights_plugs5': {'type': 'light'},
'lights_plugs6': {'type': 'light'},
'livingroom1': {'type': 'sockets', 'room': 'living room'},
'livingroom2': {'type': 'sockets', 'room': 'living room'},
'microwave1': {'type': 'microwave'},
'office1': {'type': 'sockets', 'room': 'office'},
'outsidelights_plugs1': {'type': 'sockets', 'room': 'outside'},
'outsidelights_plugs2': {'type': 'sockets', 'room': 'outside'},
'oven1': {'type': 'oven'},
'oven2': {'type': 'oven'},
'pool1': {'type': 'electric swimming pool heater'},
'pool2': {'type': 'electric swimming pool heater'},
'poollight1': {'type': 'light'},
'poolpump1': {'type': 'electric swimming pool heater'},
'pump1': {'type': 'appliance'},
'range1': {'type': 'stove'},
'refrigerator1': {'type': 'fridge'},
'refrigerator2': {'type': 'fridge'},
'security1': {'type': 'security alarm'},
'shed1': {'type': 'sockets', 'room': 'shed'},
'sprinkler1': {'type': 'appliance'},
'unknown1': {'type': 'unknown'},
'unknown2': {'type': 'unknown'},
'unknown3': {'type': 'unknown'},
'unknown4': {'type': 'unknown'},
'utilityroom1': {'type': 'sockets', 'room': 'utility room'},
'venthood1': {'type': 'appliance'},
'waterheater1': {'type': 'electric water heating appliance'},
'waterheater2': {'type': 'electric water heating appliance'},
'winecooler1': {'type': 'appliance'},
}
feed_ignore = ['gen', 'grid']
def download_dataport(database_username, database_password,
hdf_filename, periods_to_load=None):
"""
Downloads data from dataport database into an HDF5 file.
Parameters
----------
hdf_filename : str
Output HDF filename. If file exists already then will be deleted.
database_username, database_password : str
periods_to_load : dict of tuples, optional
Key of dict is the building number (int).
Values are (<start date>, <end date>)
e.g. ("2013-04-01", None) or ("2013-04-01", "2013-08-01")
defaults to all buildings and all date ranges
"""
# dataport database settings
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
database_schema = 'university'
# try to connect to database
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# set up a new HDF5 datastore (overwrites existing store)
store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='zlib')
# remove existing building yaml files in module dir
for f in os.listdir(join(_get_module_directory(), 'metadata')):
if re.search('^building', f):
os.remove(join(_get_module_directory(), 'metadata', f))
"""
TODO:
The section below can be altered or removed, since the restructured Dataport
now has only one electricity_egauge_minutes table.
"""
# get tables in database schema
sql_query = ("SELECT table_name" +
" FROM information_schema.views" +
" WHERE table_schema ='" + database_schema + "'" +
" ORDER BY table_name")
database_tables = pd.read_sql(sql_query, conn)['table_name'].tolist()
database_tables = [t for t in database_tables if 'electricity_egauge_minutes' in t]
# if user has specified buildings
if periods_to_load:
buildings_to_load = list(periods_to_load.keys())
else:
# get buildings present in all tables
sql_query = ''
for table in database_tables:
sql_query = (sql_query + '(SELECT DISTINCT dataid' +
' FROM "' + database_schema + '".' + table +
') UNION ')
sql_query = sql_query[:-7]
sql_query = (sql_query + ' ORDER BY dataid')
buildings_to_load = pd.read_sql(sql_query, conn)['dataid'].tolist()
# for each user specified building or all buildings in database
for building_id in buildings_to_load:
print("Loading building {:d} @ {}"
.format(building_id, datetime.datetime.now()))
sys.stdout.flush()
# create new list of chunks for concatenating later
dataframe_list = []
# for each table of 1 month data
for database_table in database_tables:
print(" Loading table {:s}".format(database_table))
sys.stdout.flush()
# get buildings present in electricity_egauge_minutes table
sql_query = ('SELECT DISTINCT dataid' +
' FROM university.metadata' +
' WHERE egauge_min_time IS NOT NULL' +
' ORDER BY dataid')
buildings_in_table = pd.read_sql(sql_query, conn)['dataid'].tolist()
if building_id in buildings_in_table:
# get first and last timestamps for this house in electricity_egauge_minutes table
sql_query = ('SELECT MIN(egauge_min_time) AS minlocalminute,' +
' MAX(egauge_max_time) AS maxlocalminute' +
' FROM university.metadata' +
' WHERE dataid=' + str(building_id))
range = pd.read_sql(sql_query, conn)
first_timestamp_in_table = range['minlocalminute'][0]
last_timestamp_in_table = range['maxlocalminute'][0]
# get requested start and end and localize them
requested_start = None
requested_end = None
database_timezone = 'US/Central'
if periods_to_load:
if periods_to_load[building_id][0]:
requested_start = pd.Timestamp(periods_to_load[building_id][0])
requested_start = requested_start.tz_localize(database_timezone)
if periods_to_load[building_id][1]:
requested_end =
|
pd.Timestamp(periods_to_load[building_id][1])
|
pandas.Timestamp
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import os
current_path = f"{os.path.dirname(os.path.abspath(__file__))}/"
'''----------------------------------------------------------------------------------------
Copyright© 2019 <NAME> All Rights Reserved
This script may not be copied, altered, distributed or processed by any rights,
unless granted by the owner (i.e. <NAME>)
-------------------------------------------------------------------------------------------
DatLabV2.py
A whole new way to extract DatLab results.
Requires:
- Data file:
- a folder containing all csv files or:
- a csv file exported from DatLab
- Time: in seconds
- [O2]
- JO2 normalised per mass: if not, precise in the parameter section
it will retrieve mass from repository and correct JO2,.
- Block T° in Celsius
- Events
- Fluo: optional
- Modify the extract_
Optional:
- Repository that contains (for each experiment):
- name of DLD folder
- sample number
- sample mass for each chamber "A_mass, B_mass"
- experimental condition if any
=> all info in repo will be appended to each chamber as columns
'''
#=================================== PARAMETERS ===========================================
# Data:
DATA_PATH = f"{current_path}Data/Raw/"
CSV_PATH = f"{current_path}CSV/"
# Experimental:
TEMPERATURES = [18,30]
PROTOCOLS={'ADP':['MgG', 'MgCl2_1','MgCl2_2', 'Bleb', 'Ouab', 'Heart', 'PMG', 'ADP1', 'ADP2', 'S', 'Oli', 'EGTA'],
'ATP':['MgG', 'MgCl2_1','MgCl2_2', 'Heart', 'PMG', 'ATP1', 'ATP2', 'S', 'Oli', 'Bleb', 'Ouab', 'EGTA']}
Mgfree_calib = {'MgG':1,
'EGTA':0,
'MgCl2_1':1.625,
'MgCl2_2':2.25}
ATP_calib = {
'PMG':0,
'ATP1':0.625,
'ATP2':1.25,
'ADP1':0.625,
'ADP2':1.25}
chamber_V = 2 #ml
# Graph:
graphing = False
to_plot = ['B_O2', 'B_JO2', 'B_ATP'] #
x_range = None # Can choose 'start' or as format: (start) / (start,end)
# Constants:
F = 96485.33212 #C mol−1
R = 8.314472 #J.K-1
Z = 1 #Valence of TMR(M/E) / Safr
#=========================================================================================
#////////////////////////////////////// SCRIPT \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from sklearn import linear_model
import itertools
class Graph():
def __init__(self):
pass
def set_line_color(self, y_label, color=None):
if color != None:
return color
elif "JO2" in y_label:
color = '#aa0032'
elif "O2" in y_label:
color = '#309BCD'
elif "DYm" in y_label:
color = "#8100c7"
elif "ATP" in y_label:
color = "#00db0f"
elif "ROS" in y_label:
color = "#00db0f"
else:
if color is None:
color = "#b0b0b0"
return color
def set_Theme(self, ax, y_label=None):
# A few options:
ax.grid(True)
ax.set_facecolor('#333744')
ax.set_autoscale_on
ax.spines['bottom'].set_color('#808595')
ax.spines['top'].set_color('#808595')
ax.spines['left'].set_color('#808595')
ax.spines['right'].set_color('#808595')
ax.tick_params(axis='both', direction='in',
color='#808595', labelcolor='#808595',
grid_color='#465063', grid_linestyle='--', grid_linewidth=0.5)
return ax
def set_label(self, label=None):
if ('JO2' in label):
return 'JO2 (pmolO2/(s*mg))'
elif ('DYm' in label):
return 'DYm (U)'
elif 'date' in label:
return 'Date - Time'
else: return 'N/A'
def set_ax(self, ax, x_y_s):
x=x_y_s[0]#.to_list()
x_label=self.set_label(str(x.name))
#--- sort y ---
for j in range(1,len(x_y_s)):
#color=self.set_line_color(y_label)[j]
if type(x_y_s[j]) is dict:
# Define label forst
try:
if x_y_s[j]['label']:
label=x_y_s[j]['label']
else:label=x_y_s[j]['y'].name
except:label=None
y=x_y_s[j]['y'].to_list()
try: color=self.set_line_color(y_label='None', color=x_y_s[j]['color'])
except:pass
if x_y_s[j]['type']=='line':ax.plot(x,y,color=color,linewidth=1,label=label)
elif x_y_s[j]['type']=='scatter':ax.scatter(x,y,color=color,label=label)
elif x_y_s[j]['type']=='bar':ax.bar(x,y,color=color,label=label,width=0.5)
else:ax.plot(x , x_y_s[j].to_list(), color=color, label=x_y_s[j].name)
ax=self.set_Theme(ax)
ax.set_xlabel(x_label, labelpad=10, color='#808595', fontsize='small',fontweight='bold')
ax.set_ylabel(label, labelpad=10, color='#808595', fontsize='x-small',fontweight='bold')
ax.legend(loc='upper left', fontsize='xx-small')
return ax
def graph(self, x_y_s, x_range=None, title=None):
'''
Kwargs are passed to matplotlib plotting functions.
'''
if type(x_y_s[0]) is pd.core.series.Series:
x_y_s=[x_y_s]
subplot_nb=len(x_y_s)
fig, axs = plt.subplots(subplot_nb, 1, facecolor='#283036', sharex=True)
#--- one plot (need to otherwise returns error) --
if subplot_nb==1:
x_y_s=x_y_s[0]
axs=self.set_ax(axs,x_y_s)
#--- >1 subplot ---
else:
for i in range(subplot_nb):
axs[i]=self.set_ax(axs[i],x_y_s[i])
plt.subplots_adjust(hspace=0)
if x_range:
plt.xlim(x_range)
if title:
plt.suptitle(title, fontsize=50, fontweight='heavy', color='#808595', alpha=0.2, x=0.5,y=0.5)
plt.show()
def del_reox(df, o2col='O2', jo2col= 'JO2', _mode='auto', window=[-5,300]):
'''
delete reoxygenation if axsked
'''
#-- delete reox
# Smooth O2
df[o2col]=df[o2col].rolling(window=4).mean()
if _mode == 'auto':
df['O2diff']=df[o2col].diff()
df = df[df['O2diff']<0]
df=df.set_index('Time [min]'
).sort_index(
).reset_index()
df=df[(df[jo2col]>window[0])&(df[jo2col]<window[1])]
elif _mode == 'reox':
df = df[df[f"Event"]!='reox'].sort_index()
return df
def sort_events(df, additive=True):
"""
Distributes events name and comments (text) to respective chambers.
Takes the (yet) unprocessed df (i.e. exported csv from DatLab)
"""
try:
df['Event Name'] = df['Event Name'].fillna(method='ffill')
df['Chamber'] = df['Chamber'].fillna(method='ffill')
df[f'A:_Event'] = np.where((df['Chamber'] == 'Left') | (df['Chamber'] == 'Both'),df['Event Name'], float('nan'))
df[f'A:_Event_text'] = np.where((df['Chamber'] == 'Left') | (df['Chamber'] == 'Both'),df['Event Text'], float('nan'))
df[f'B:_Event'] = np.where((df['Chamber'] == 'Right') | (df['Chamber'] == 'Both'),df['Event Name'], float('nan'))
df[f'B:_Event_text'] = np.where((df['Chamber'] == 'Right') | (df['Chamber'] == 'Both'),df['Event Text'], float('nan'))
df[f'A:_Event'] = df[f'A:_Event'].fillna(method='ffill')
df[f'A:_Event_text'] = df[f'A:_Event_text'].fillna(method='ffill')
df[f'B:_Event'] = df[f'B:_Event'].fillna(method='ffill')
df[f'B:_Event_text'] = df[f'B:_Event_text'].fillna(method='ffill')
except Exception as e: print(f"ERROR: sort_events(): {e}")
return df
def split_chambers(df, rename_cols=True, _index='time', delete_reox='auto', get_weight_col=False, **kwargs):
"""
Seperate each chamber to an individual-like experiment
Expects raw csv from DatLab export.
Overall process:
- attributes events to each chamber to ease pandas'groupby()' method
- rename columns if True (instead of 1A:..., gets 'JO2' or parameter e.g. DYm, ROS, ATP)
- Deletes reoxygenation envents if True (default)
Each chamber is returned as a dict containing:
chamber = {
'chamber': chamber name,
'protocol': protocol (defined with PROTOCOLS variable),
'df': df
}
"""
chambers=[]
for C in ['A', 'B']:
time_col=[c for c in df.columns if 'Time [' in c][0]
cols = [time_col] + [c for c in df.columns if f"{C}:" in c]
cdf=df.loc[:,cols]
#- define columns
o2col=[c for c in cols if 'concentration' in c][0]
jo2col=[c for c in cols if any(w in c for w in ['per mass', 'O2 slope neg'])][0]
fluo=[c for c in cols if 'Amp' in c or 'pX' in c][0]
event_col=[c for c in cols if all([f"{C}:_Event" in c, "text" not in c])][0]
event_text_col=[c for c in cols if all([f"{C}:_Event" in c, "text" in c])][0]
# Define the protocol
protocol=None # Default
if 'A:' in fluo: protocol = 'ADP'
elif 'B:' in fluo: protocol = 'ATP'
#-- delete reox
if delete_reox != None:
if 'window' in kwargs:
window=kwargs['window']
else:window=[-5,300]
cdf=del_reox(cdf,
o2col=o2col,
jo2col=jo2col,
_mode=delete_reox,
window=window)
#-- rename col
if rename_cols is True:
coldict={
time_col: 'time',
o2col: "O2",
jo2col: "JO2",
fluo: protocol,
event_col: 'Event',
event_text_col: 'Event_text'
}
cdf=cdf.rename(columns=coldict)
cdf=cdf.set_index(_index
).sort_index()
#cols=['time', "JO2", "O2", protocol, "Event", "Event_text"]
chamber = {
'chamber': C,
'protocol': protocol,
'df': cdf
}
#-- retrieve weight
# Careful not to overwrite repo
if get_weight_col is not False:
weight_cols=[c for c in df.columns if get_weight_col in c]
weight_col=[c for c in weight_cols if C in c][0]
_mass=df[weight_col].iloc[0]
chamber.update({'mass': _mass})
chambers.append(chamber)
return chambers[0], chambers[1]
def mgfree_calib(chamber, calibration={'MgG':0}):
mgcalib={}
for calib, conc in calibration.items():
raw=chamber['df'].loc[chamber['df']['Event']==calib, chamber['protocol']].mean()
mgcalib.update({conc:raw})
plt.plot(*zip(*sorted(mgcalib.items())))
plt.show()
print(mgcalib)
exit()
def sc_ATP(df, adtp='ATP', ATP_calib=None, _mass=1, Mgcalib=True, MgGconc=1.1):
'''
Calibrate the ATP signal
Requires df with at least 'Event' and 'ADP' or 'ATP'
If mass: retuens slope corrected by mass.
'''
# Assign default
alpha=1
calibration={'MgG': alpha,
'sc': [[0],[0]],
'slope':float('nan'),
'r2': float('nan'),
'predict':[[0]]
}
if Mgcalib is True:
# Calibrate to MgG
# Needs MgG as event
# MgG concentration 1.1uM by default
try:
df=df.reset_index()
MgG_idx=df.loc[df['Event']=='MgG'].index[0]
bsln=df[adtp].iloc[(MgG_idx-6):(MgG_idx-2)].mean()
MgG=df[adtp].iloc[(MgG_idx+2):(MgG_idx+6)].mean()
alpha=MgGconc/(MgG-bsln)
except Exception as e:
print(f'MgG calibration ERROR: {e}')
# Differenciate between ATP and ADP calib
# At this stage, it returns the calibrated signal after
# the first calibration
# If no 'ADP' or 'ATP' event in place,
# returns the non calibrated signal
try:
# Lock portion of the trace to calibrate
# Start with ADTP1 etc
evs = [k for k,v in ATP_calib.items() if adtp in k]
if evs:
caldf=df.loc[df['Event'].isin(evs)]
fst_idx=np.where(df.index==caldf.index[0])[0][0]-10
lst_idx=np.where(df.index==caldf.index[-1])[0][0]
# Lock protion to calibrae
caldf=df.iloc[fst_idx:lst_idx]
# Now do the calibration
# Append calibration points to dict
sc={} # as x:y
for p, pdf in caldf.groupby(caldf['Event']):
pct=int(len(pdf)*0.25)
y = pdf[adtp].iloc[pct:len(pdf)-pct].mean()
if p in evs: x=ATP_calib[p]
else: x=0
sc.update({x: y})
# Retrieve linear sc
x=[k for k,_ in sc.items()]
y=[v for _,v in sc.items()]
# Get standard curve
lm=linear_model.LinearRegression()
X=np.array(x).reshape(-1,1)
model=lm.fit(X,y)
slope=lm.coef_[0]/_mass
r2=lm.score(X,y)
# Do the prediction
Y=model.predict(X)
sc=[x, Y] # stors standard curve
X=df[adtp].to_numpy().reshape(-1,1)
predict=model.predict(X)
# Keep calibration for future analysis
calibration={
'MgG': alpha,
'sc': sc,
'slope':slope,
'r2': r2,
'predict':predict
}
return calibration
else: pass
except Exception as e:
print(f'ATP Calib ERROR:{e}')
# If any error, return the raw ATP column
adtp_col=[c for c in df.columns if 'ADP' in c or 'ATP' in c][0]
return calibration
def calibrate_ATP(chambers, _saving=False):
'''
Requires chambers with ADP
'''
calib_summary=[] # to store the final
for A in ['ATP', 'ADP']:
calibs=[] # List to store A(DT)P calibrations
ap_chambers=[c for c in chambers if A in c['protocol']]
# Get only ADP or ATP chamber
for chamber in ap_chambers:
print(f"Calibrating: {chamber['Filename']}")
# Select only ATP signal and Event column
Amp_signal=chamber['df'].loc[:,[A,'Event']].copy()
# Note that calibrated signal is in 'predict'
cal=sc_ATP(Amp_signal,
adtp=A,
Mgcalib=False,
ATP_calib=ATP_calib)
cal.update({'temperature':chamber['temperature'],
'filename':chamber['Filename'],
'adenylate':A})
calibs.append(cal)
# print(f"{chamber['Filename']} => {chamber['temperature']}-{A} slope: {cal['slope']} - R2: {cal['r2']}")
temp_cal=[]
for temperature in TEMPERATURES:
temp_calib=[]
for cal in calibs:
if int(cal['temperature'])==temperature:
temp_calib.append(cal['slope'])
temp_calib=pd.DataFrame(temp_calib, index=[0]*len(temp_calib))
temp_cal.append(
{'temperature': temperature,
f'{A}_slope_mean': temp_calib[0].mean(),
f'{A}_slope_sem': temp_calib[0].sem()
})
temp_cal=pd.DataFrame(temp_cal).set_index('temperature')
calib_summary.append(temp_cal)
calib_summary=pd.concat(calib_summary, axis=1, ignore_index=False)
calib_summary['ratio']=calib_summary['ATP_slope_mean']/calib_summary['ADP_slope_mean']
if _saving is True:
calib_summary.to_csv('ATP_calibration_summary.csv')
return calib_summary
def average_state(chamber, parameter_s=['JO2'], protocol=None, _mode='time', **kwargs):
'''
Retrieves averages and stores in dataframe
Requires:
- chamber: df of the chamber with at least JO2 column and time index
events are meant to be sorted prior to this
- parameter_s: list of parameter(s) to analyse. So far only JO2, DYm and ATP are considered
- protocol: list of titrations
'''
# A few checks
if type(parameter_s) is str: parameter_s=[parameter_s]
if protocol is None: protocol=list(set(chamber['Event'].values))
# If passes, d averages fo the parameter(s)
#crate empty list to create final dataframe
favdf=[]
for parameter in parameter_s:
# Create the row that will store infor ation
row=
|
pd.DataFrame(index=[parameter], columns=protocol)
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] =
|
pd.Timestamp('2011-01-01', tz=tz)
|
pandas.Timestamp
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]),
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_start_stop_table(self, setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self, setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self, setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
dict(A=np.random.rand(20), B=np.random.rand(20)),
index=pd.date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self, setup_path):
df =
|
tm.makeDataFrame()
|
pandas.util.testing.makeDataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr =
|
pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
|
pandas.Series
|
"""Decorators and small standalone functions for api module"""
import logging
import urllib.parse
from functools import wraps
from typing import Sequence, Union, Iterable, Optional, List
from collections.abc import Mapping
import fnmatch
import pandas as pd
from iblutil.io import parquet
import numpy as np
import one.alf.exceptions as alferr
from one.alf.files import rel_path_parts, get_session_path
from one.alf.spec import FILE_SPEC, regex as alf_regex
import one.alf.io as alfio
logger = logging.getLogger(__name__)
def Listable(t):
"""Return a typing.Union if the input and sequence of input"""
return Union[t, Sequence[t]]
def ses2records(ses: dict) -> [pd.Series, pd.DataFrame]:
"""Extract session cache record and datasets cache from a remote session data record.
TODO Fix for new tables; use to update caches from remote queries.
Parameters
----------
ses : dict
Session dictionary from Alyx REST endpoint
Returns
-------
pd.Series
Session record
pd.DataFrame
Datasets frame
"""
# Extract session record
eid = parquet.str2np(ses['url'][-36:])
session_keys = ('subject', 'start_time', 'lab', 'number', 'task_protocol', 'project')
session_data = {k: v for k, v in ses.items() if k in session_keys}
# session_data['id_0'], session_data['id_1'] = eid.flatten().tolist()
session = (
(pd.Series(data=session_data, name=tuple(eid.flatten()))
.rename({'start_time': 'date'}, axis=1))
)
session['date'] = session['date'][:10]
# Extract datasets table
def _to_record(d):
rec = dict(file_size=d['file_size'], hash=d['hash'], exists=True)
rec['id_0'], rec['id_1'] = parquet.str2np(d['id']).flatten().tolist()
rec['eid_0'], rec['eid_1'] = session.name
file_path = urllib.parse.urlsplit(d['data_url'], allow_fragments=False).path.strip('/')
file_path = alfio.remove_uuid_file(file_path, dry=True).as_posix()
rec['session_path'] = get_session_path(file_path).as_posix()
rec['rel_path'] = file_path[len(rec['session_path']):].strip('/')
if 'default_revision' in d:
rec['default_revision'] = d['default_revision'] == 'True'
return rec
records = map(_to_record, ses['data_dataset_session_related'])
datasets = pd.DataFrame(records).set_index(['id_0', 'id_1']).sort_index()
return session, datasets
def datasets2records(datasets) -> pd.DataFrame:
"""Extract datasets DataFrame from one or more Alyx dataset records
Parameters
----------
datasets : dict, list
One or more records from the Alyx 'datasets' endpoint
Returns
-------
pd.DataFrame
Datasets frame
Examples
--------
>>> datasets = ONE().alyx.rest('datasets', 'list', subject='foobar')
>>> df = datasets2records(datasets)
"""
records = []
for d in ensure_list(datasets):
file_record = next((x for x in d['file_records'] if x['data_url'] and x['exists']), None)
if not file_record:
continue # Ignore files that are not accessible
rec = dict(file_size=d['file_size'], hash=d['hash'], exists=True)
rec['id_0'], rec['id_1'] = parquet.str2np(d['url'][-36:]).flatten().tolist()
rec['eid_0'], rec['eid_1'] = parquet.str2np(d['session'][-36:]).flatten().tolist()
data_url = urllib.parse.urlsplit(file_record['data_url'], allow_fragments=False)
file_path = data_url.path.strip('/')
file_path = alfio.remove_uuid_file(file_path, dry=True).as_posix()
rec['session_path'] = get_session_path(file_path).as_posix()
rec['rel_path'] = file_path[len(rec['session_path']):].strip('/')
rec['default_revision'] = d['default_dataset']
records.append(rec)
if not records:
keys = ('id_0', 'id_1', 'eid_0', 'eid_1', 'file_size', 'hash', 'session_path',
'rel_path', 'default_revision')
return pd.DataFrame(columns=keys).set_index(['id_0', 'id_1'])
return
|
pd.DataFrame(records)
|
pandas.DataFrame
|
'''Defines a pipeline step which merges training data.
'''
import os
from pandas.errors import EmptyDataError
import pandas as pd
import progressbar as pb
from src.step import Step
class Merge(Step):
'''Defines a pipeline step which merges training data.
'''
def __init__(self):
'''Initializes a new instance of the Merge object.
'''
super(Merge, self).__init__()
self.input = {
'app': 'data/raw/anything',
'game': 'data/raw/games',
'movie': 'data/raw/movie',
'music': 'data/raw/music',
'tv': 'data/raw/tv',
}
self.output = {
'path': 'data/interim/combined.csv',
}
def run(self):
'''Runs the pipeline step.
'''
consolidated = pd.DataFrame()
empty_files = []
for path in self.input.values():
for root, _, files in os.walk(path):
self.print('Consolidating {path}', path=root)
for file in pb.progressbar(files):
path = os.path.join(root, file)
try:
series =
|
pd.read_csv(path, sep='\t', squeeze=True)
|
pandas.read_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.