prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from selenium import webdriver
from time import sleep
import pandas as pd
from DreamsList import DreamList
def load_all_dreams(driver):
sleep(5)
def find_button(driver):
mt7 = driver.find_elements_by_xpath("//button")
button = None
for b in mt7:
divs = b.find_elements_by_xpath(".//div")
if divs:
button = b
return button
while True:
button = find_button(driver)
if button is None:
break
button.click()
sleep(1)
def find_all_dream_links(driver):
hrefs = driver.find_elements_by_xpath("//a[@href]")
hrefs = [e.get_attribute('href') for e in hrefs]
hrefs = [e for e in hrefs if e.startswith("https://kiezburn.dreams.wtf/kb21/60")]
return hrefs
def find_dreamers(driver):
els = driver.find_elements_by_xpath("//div[@class='mt-5 space-y-5']")
els = els[0]
k = els.find_elements_by_xpath(".//div[@User]")
users = [k1.get_attribute('alt') for k1 in k]
return users
def find_funded(driver):
paragraph = driver.find_elements_by_xpath("//p[text()='EUR']")
votes = None
for p in paragraph:
split = p.text.split()
split = combine_numbers(split)
if len(split) == 2:
votes = float(split[0])
return votes
def combine_numbers(split):
to_remove = []
if len(split) != 4:
for index, (n, one) in enumerate(zip(split[:-1], split[1:])):
try:
float(n)
float(one)
success = True
except:
success = False
if success:
new_number = n + one
split[index] = new_number
to_remove.append(one)
split = list(filter(lambda x: x not in to_remove, split))
return split
def calculate_budget(table):
cells = table.find_elements_by_xpath(".//td")
all_min = 0
all_max = 0
for cell in cells:
txt = cell.text
split = txt.split()
if any(x == "EUR" for x in split):
split = combine_numbers(split)
minimum = float(split[0])
try:
maximum = float(split[2])
except IndexError:
maximum = minimum
all_min += minimum
all_max += maximum
return all_min, all_max
def calculate_prefunding(table):
cells = table.find_elements_by_xpath(".//td")
prefund = 0
for cell in cells:
text = cell.text
split = text.split()
if any(x=="EUR" for x in split):
split = combine_numbers(split)
prefund += float(split[0])
return prefund
def find_budget(driver):
votes = find_funded(driver)
tables = driver.find_elements_by_xpath("//table")
if len(tables) == 1:
all_min, all_max = calculate_budget(tables[0])
return {"minimum_budget": all_min, "maximum_budget": all_max, "preexisting_funding": 0, "total_funding": votes}
elif len(tables) == 2:
all_min, all_max = calculate_budget(tables[0])
pre = calculate_prefunding(tables[1])
return {"minimum_budget": all_min, "maximum_budget": all_max, "preexisting_funding": pre, "total_funding": votes}
else:
return {"minimum_budget": 0, "maximum_budget": 0, "preexisting_funding": 0, "total_funding": 0}
def main(driver):
driver.get("https://kiezburn.dreams.wtf/kb21")
load_all_dreams(driver)
hrefs = find_all_dream_links(driver)
all_dreams = []
for h in hrefs:
driver.get(h)
sleep(1)
this_dream = {}
this_dream['link'] = h
this_dream['name'] = find_name(driver)
print(this_dream.get('name'))
this_dream['dreamers'] = find_dreamers(driver)
this_dream.update(find_budget(driver))
all_dreams.append(this_dream)
return all_dreams
def find_name(driver):
headers = driver.find_elements_by_xpath("//div/h1")
return headers[0].text
if __name__ == "__main__":
driver = webdriver.Chrome()
#url = "https://kiezburn.dreams.wtf/kb21/60c9aed9551867002ccd8fcb"
#driver.get(url)
#find_name(driver)
#dreamers = find_budget(driver)
#print(dreamers)
all_dreams = main(driver)
df = | pd.DataFrame(all_dreams) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from datetime import datetime
import numpy as np
from numpy.random import permutation
import pandas as pd
from pandas import to_datetime
def coalesce(*args):
return next(s for s in args if s is not None)
def group_survival_table_from_events(groups, durations, event_observed, min_observations, limit=-1):
"""
Joins multiple event series together into dataframes. A generalization of
`survival_table_from_events` to data with groups. Previously called `group_event_series` pre 0.2.3.
Parameters:
groups: a (n,) array of individuals' group ids.
durations: a (n,) array of durations of each individual
event_observed: a (n,) array of event observations, 1 if observed, 0 else.
event_observed: a (n,) array of times individual entered study. This is most applicable in
cases where there is left-truncation, i.e. a individual might enter the
study late. If not the case, normally set to all zeros.
Output:
- np.array of unique groups
- dataframe of removal count data at event_times for each group, column names are 'removed:<group name>'
- dataframe of observed count data at event_times for each group, column names are 'observed:<group name>'
- dataframe of censored count data at event_times for each group, column names are 'censored:<group name>'
Example:
#input
group_survival_table_from_events(waltonG, waltonT, np.ones_like(waltonT)) #data available in test_suite.py
#output
[
array(['control', 'miR-137'], dtype=object),
removed:control removed:miR-137
event_at
6 0 1
7 2 0
9 0 3
13 0 3
15 0 2
,
observed:control observed:miR-137
event_at
6 0 1
7 2 0
9 0 3
13 0 3
15 0 2
,
censored:control censored:miR-137
event_at
6 0 0
7 0 0
9 0 0
,
]
"""
n = max(groups.shape)
assert n == max(durations.shape) == max(event_observed.shape) == max(min_observations.shape), "inputs must be of the same length."
groups, durations, event_observed, min_observations = map(lambda x: pd.Series(np.reshape(x, (n,))), [groups, durations, event_observed, min_observations])
unique_groups = groups.unique()
# set first group
g = unique_groups[0]
ix = (groups == g)
T = durations[ix]
C = event_observed[ix]
B = min_observations[ix]
g_name = str(g)
data = survival_table_from_events(T, C, B,
columns=['removed:' + g_name, "observed:" + g_name, 'censored:' + g_name, 'entrance' + g_name])
for g in unique_groups[1:]:
ix = groups == g
T = durations[ix]
C = event_observed[ix]
B = min_observations[ix]
g_name = str(g)
data = data.join(survival_table_from_events(T, C, B,
columns=['removed:' + g_name, "observed:" + g_name, 'censored:' + g_name, 'entrance' + g_name]),
how='outer')
data = data.fillna(0)
# hmmm pandas its too bad I can't do data.ix[:limit] and leave out the if.
if int(limit) != -1:
data = data.ix[:limit]
return unique_groups, data.filter(like='removed:'), data.filter(like='observed:'), data.filter(like='censored:')
def survival_table_from_events(durations, event_observed, min_observations,
columns=["removed", "observed", "censored", 'entrance'], weights=None):
"""
Parameters:
durations: (n,1) array of event times (durations individual was observed for)
event_observed: (n,1) boolean array, 1 if observed event, 0 is censored event.
min_observations: used for left truncation data. Sometimes subjects will show
up late in the study. min_observations is a (n,1) array of positive numbers representing
when the subject was first observed. A subject's life is then [min observation + duration observed]
columns: a 3-length array to call the, in order, removed individuals, observed deaths
and censorships.
Returns:
Pandas DataFrame with index as the unique times in event_times. The columns named
'removed' refers to the number of individuals who were removed from the population
by the end of the period. The column 'observed' refers to the number of removed
individuals who were observed to have died (i.e. not censored.) The column
'censored' is defined as 'removed' - 'observed' (the number of individuals who
left the population due to event_observed)
Example:
#input
survival_table_from_events( waltonT, np.ones_like(waltonT)) #available in test suite
#output
removed observed censored
event_at
6 1 1 0
7 2 2 0
9 3 3 0
13 3 3 0
15 2 2 0
"""
# deal with deaths and censorships
durations = np.asarray(durations) + min_observations
df = | pd.DataFrame(durations, columns=["event_at"]) | pandas.DataFrame |
import logging
import os
from typing import List, Dict, Optional
import numpy as np
import pandas as pd
import shap
from sklearn.cluster import KMeans
from d3m import container, utils
from d3m.metadata import base as metadata_base, hyperparams, params
from d3m.primitive_interfaces import base
from d3m.primitive_interfaces.base import CallResult
from d3m.primitive_interfaces.supervised_learning import PrimitiveBase
from distil.modeling.forest import ForestCV
from distil.modeling.metrics import classification_metrics, regression_metrics
from distil.utils import CYTHON_DEP
import version
__all__ = ("EnsembleForest",)
logger = logging.getLogger(__name__)
class Hyperparams(hyperparams.Hyperparams):
metric = hyperparams.Enumeration[str](
values=classification_metrics + regression_metrics,
default="f1Macro",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The D3M scoring metric to use during the fit phase. This can be any of the regression, classification or "
+ "clustering metrics.",
)
shap_max_dataset_size = hyperparams.Hyperparameter[int](
default=1500,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The maximum dataset size on which to apply SHAP interpretation to each sample individually. Otherwise, this number of samples will be"
+ "drawn from the data distribution after clustering (to approximate the distribution) and interpretation will only be applied to these"
+ "samples",
)
n_estimators = hyperparams.UniformInt(
lower=1,
upper=2048,
default=32,
description="The number of trees in the forest.",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter",
"https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter",
],
)
min_samples_leaf = hyperparams.UniformInt(
lower=1,
upper=31,
default=2,
description="Minimum number of samples to split leaf",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter",
"https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter",
],
)
class_weight = hyperparams.Enumeration[str](
values=["None", "balanced", "balanced_subsample"],
default="None",
description="todo",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
)
estimator = hyperparams.Enumeration[str](
values=["ExtraTrees", "RandomForest"],
default="ExtraTrees",
description="todo",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
)
grid_search = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Runs an internal grid search to fit the primitive, ignoring caller supplied values for "
+ "n_estimators, min_samples_leaf, class_weight, estimator",
)
small_dataset_threshold = hyperparams.Hyperparameter[int](
default=2000,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If grid_search is true, controls the application of the 'small_dataset_fits' and 'large_dataset_fits' "
+ "parameters - if the input dataset has fewer rows than the threshold value, 'small_dateset_fits' will be used when fitting. "
+ "Otherwise, 'num_large_fits' is used.",
)
small_dataset_fits = hyperparams.Hyperparameter[int](
default=5,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If grid_search is true, the number of random forests to fit when using small datasets.",
)
large_dataset_fits = hyperparams.Hyperparameter[int](
default=1,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If grid_search is true, the number of random forests to fit when using large datasets.",
)
compute_confidences = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Compute confidence values. Only valid when the task is classification.",
)
n_jobs = hyperparams.Hyperparameter[int](
default=64,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The value of the n_jobs parameter for the joblib library",
)
pos_label = hyperparams.Hyperparameter[Optional[str]](
default=None,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Name of the positive label in the binary case. If none is provided, second column is assumed to be positive",
)
class Params(params.Params):
model: ForestCV
target_cols: List[str]
label_map: Dict[int, str]
needs_fit: bool
binary: bool
input_hash: pd.Series
class EnsembleForestPrimitive(
PrimitiveBase[container.DataFrame, container.DataFrame, Params, Hyperparams]
):
"""
Generates an ensemble of random forests, with the number of internal models created controlled by the size of the
input dataframe. It accepts a dataframe as input, and returns a dataframe consisting of prediction values only as output.
Columns with string structural types are ignored.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "e0ad06ce-b484-46b0-a478-c567e1ea7e02",
"version": version.__version__,
"name": "EnsembleForest",
"python_path": "d3m.primitives.learner.random_forest.DistilEnsembleForest",
"source": {
"name": "Distil",
"contact": "mailto:<EMAIL>",
"uris": [
"https://github.com/uncharted-distil/distil-primitives/blob/main/distil/primitives/ensemble_forest.py",
"https://github.com/uncharted-distil/distil-primitives",
],
},
"installation": [
CYTHON_DEP,
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.RANDOM_FOREST,
],
"primitive_family": metadata_base.PrimitiveFamily.LEARNER,
},
)
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
# hack to get around typing constraints.
if self.hyperparams["class_weight"] == "None":
class_weight = None
else:
class_weight = self.hyperparams["class_weight"]
grid_search = self.hyperparams["grid_search"]
if grid_search is True:
current_hyperparams = None
else:
current_hyperparams = {
"estimator": self.hyperparams["estimator"],
"n_estimators": self.hyperparams[
"n_estimators"
], # [32, 64, 128, 256, 512, 1024, 2048],
"min_samples_leaf": self.hyperparams[
"min_samples_leaf"
], # '[1, 2, 4, 8, 16, 32],
}
if self.hyperparams["metric"] in classification_metrics:
current_hyperparams.update({"class_weight": class_weight})
else: # regression
current_hyperparams.update({"bootstrap": True})
self._model = ForestCV(
self.hyperparams["metric"],
random_seed=self.random_seed,
hyperparams=current_hyperparams,
grid_search=grid_search,
n_jobs=self.hyperparams["n_jobs"],
)
self._needs_fit = True
self._label_map: Dict[int, str] = {}
self._target_cols: List[str] = []
self._binary = False
def _get_component_columns(
self, output_df: container.DataFrame, source_col_index: int
) -> List[int]:
# Component columns are all column which have as source the referenced
# column index. This includes the aforementioned column index.
component_cols = [source_col_index]
# get the column name
col_name = output_df.metadata.query(
(metadata_base.ALL_ELEMENTS, source_col_index)
)["name"]
# get all columns which have this column as source
for c in range(0, len(output_df.columns)):
src = output_df.metadata.query((metadata_base.ALL_ELEMENTS, c))
if "source_column" in src and src["source_column"] == col_name:
component_cols.append(c)
return component_cols
def set_training_data(
self, *, inputs: container.DataFrame, outputs: container.DataFrame
) -> None:
# At this point anything that needed to be imputed should have been, so we'll
# clear out any remaining NaN values as a last measure.
# if we are doing classification the outputs need to be integer classes.
# label map is used to covert these back on produce.
col = outputs.columns[0]
if self._model.mode == "classification":
factor = pd.factorize(outputs[col])
outputs = pd.DataFrame(factor[0], columns=[col])
self._label_map = {k: v for k, v in enumerate(factor[1])}
self._target_cols = list(outputs.columns)
# remove nans from outputs, apply changes to inputs as well to ensure alignment
self._input_hash = pd.util.hash_pandas_object(inputs)
self._outputs = outputs[
outputs[col] != ""
].dropna() # not in place because we don't want to modify passed input
self._binary = self._outputs.iloc[:, 0].nunique(dropna=True) <= 2
row_diff = outputs.shape[0] - self._outputs.shape[0]
if row_diff != 0:
logger.warn(f"Removed {row_diff} rows due to NaN values in target data.")
self._inputs = inputs.loc[self._outputs.index, :]
else:
self._inputs = inputs
# same in other direction
inputs_rows = self._inputs.shape[0]
inputs_cols = self._inputs.shape[1]
self._inputs = self._inputs.select_dtypes(include="number")
col_diff = inputs_cols - self._inputs.shape[1]
if col_diff != 0:
logger.warn(f"Removed {col_diff} unencoded columns from training data.")
self._inputs = (
self._inputs.dropna()
) # not in place because because selection above doesn't create a copy
row_diff = inputs_rows - self._inputs.shape[0]
if row_diff != 0:
logger.warn(f"Removed {row_diff} rows due to NaN values in training data.")
self._outputs = self._outputs.loc[self._inputs.index, :]
self._model.num_fits = (
self.hyperparams["large_dataset_fits"]
if self._inputs.shape[0] > self.hyperparams["small_dataset_threshold"]
else self.hyperparams["small_dataset_fits"]
)
self._needs_fit = True
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
logger.debug(f"Fitting {__name__}")
if self._needs_fit:
self._model.fit(self._inputs.values, self._outputs.values)
self._needs_fit = False
return CallResult(None)
def produce(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> CallResult[container.DataFrame]:
logger.debug(f"Producing {__name__}")
# force a fit it hasn't yet been done
if self._needs_fit:
self.fit()
# drop any non-numeric columns
# drop all non-numeric columns
num_cols = inputs.shape[1]
inputs = inputs.select_dtypes(include="number")
col_diff = num_cols - inputs.shape[1]
if col_diff > 0:
logger.warn(f"Removed {col_diff} unencoded columns from produce data.")
# create dataframe to hold the result
result = self._model.predict(inputs.values)
if len(self._target_cols) > 1:
result_df = container.DataFrame()
for i, c in enumerate(self._target_cols):
col = container.DataFrame({c: result[:, i]})
result_df = | pd.concat([result_df, col], axis=1) | pandas.concat |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
import gc
train_df = pd.read_csv('../input/train.csv', parse_dates=["activation_date"])
test_df = pd.read_csv('../input/test.csv', parse_dates=["activation_date"])
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import random
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
from tqdm import tqdm
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
from nltk import word_tokenize
stopwords = stopwords.words('russian')
def genFeatures(x):
x["activation_weekday"] = x["activation_date"].dt.weekday
x["monthday"] = x["activation_date"].dt.day
x["weekinmonday"] = x["monthday"] // 7
##################Added in set 1 - 0.01 Improvement
x['price_new'] = np.log1p(x.price) # log transform improves co-relation with deal_price
x['count_null_in_row'] = x.isnull().sum(axis=1)# works
x['has_description'] = x.description.isnull().astype(int)
x['has_image'] = x.image.isnull().astype(int)
x['has_image_top'] = x.image_top_1.isnull().astype(int)
x['has_param1'] = x.param_1.isnull().astype(int)
x['has_param2'] = x.param_2.isnull().astype(int)
x['has_param3'] = x.param_3.isnull().astype(int)
x['has_price'] = x.price.isnull().astype(int)
#################Added in set 2 - 0.00x Improvement
x["description"].fillna("NA", inplace=True)
x["desc_nwords"] = x["description"].apply(lambda x: len(x.split()))
x['len_description'] = x['description'].apply(lambda x: len(x))
x["title_nwords"] = x["title"].apply(lambda x: len(x.split()))
x['len_title'] = x['title'].apply(lambda x: len(x))
x['params'] = x['param_1'].fillna('') + ' ' + x['param_2'].fillna('') + ' ' + x['param_3'].fillna('')
x['params'] = x['params'].str.strip()
x['len_params'] = x['params'].apply(lambda x: len(x))
x['words_params'] = x['params'].apply(lambda x: len(x.split()))
x['symbol1_count'] = x['description'].str.count('↓')
x['symbol2_count'] = x['description'].str.count('\*')
x['symbol3_count'] = x['description'].str.count('✔')
x['symbol4_count'] = x['description'].str.count('❀')
x['symbol5_count'] = x['description'].str.count('➚')
x['symbol6_count'] = x['description'].str.count('ஜ')
x['symbol7_count'] = x['description'].str.count('.')
x['symbol8_count'] = x['description'].str.count('!')
x['symbol9_count'] = x['description'].str.count('\?')
x['symbol10_count'] = x['description'].str.count(' ')
x['symbol11_count'] = x['description'].str.count('-')
x['symbol12_count'] = x['description'].str.count(',')
####################
return x
train_df = genFeatures(train_df)
test_df = genFeatures(test_df)
test_df['deal_probability']=10.0
############################
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
train_test = pd.concat((train_df, test_df), axis = 'rows')
## After cleaning => then find intersection
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
train_test["params_clean"]= list(train_test[["params"]].apply(lambda x: preprocess_data(x["params"]), axis=1))
train_test['count_common_words_title_desc'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
train_test['count_common_words_title_params'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['params_clean']).lower().split()))), axis=1)
train_test['count_common_words_params_desc'] = train_test.apply(lambda x: len(set(str(x['params_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
print("Cleaned texts..")
###################
# Count Nouns
import pymorphy2
morph = pymorphy2.MorphAnalyzer(result_type=None)
from fastcache import clru_cache as lru_cache
@lru_cache(maxsize=1000000)
def lemmatize_pos(word):
_, tag, norm_form, _, _ = morph.parse(word)[0]
return norm_form, tag.POS
def getPOS(x, pos1 = 'NOUN'):
lemmatized = []
x = clean_text(x)
#x = re.sub(u'[.]', ' ', x)
for s in x.split():
s, pos = lemmatize_pos(s)
if pos != None:
if pos1 in pos:
lemmatized.append(s)
return ' '.join(lemmatized)
train_test['get_nouns_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'NOUN'), axis=1))
train_test['get_nouns_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'NOUN'), axis=1))
train_test['get_adj_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'ADJ'), axis=1))
train_test['get_adj_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'ADJ'), axis=1))
train_test['get_verb_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'VERB'), axis=1))
train_test['get_verb_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'VERB'), axis=1))
# Count digits
def count_digit(x):
x = clean_text(x)
return len(re.findall(r'\b\d+\b', x))
train_test['count_of_digit_in_title'] = list(train_test.apply(lambda x: count_digit(x['title']), axis=1))
train_test['count_of_digit_in_desc'] = list(train_test.apply(lambda x: count_digit(x['description']), axis=1))
train_test['count_of_digit_in_params'] = list(train_test.apply(lambda x: count_digit(x['params']), axis=1))
## get unicode features
count_unicode = lambda x: len([c for c in x if ord(c) > 1105])
count_distunicode = lambda x: len({c for c in x if ord(c) > 1105})
train_test['count_of_unicode_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_unicode_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
train_test['count_of_distuni_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_distuni_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
###
count_caps = lambda x: len([c for c in x if c.isupper()])
train_test['count_caps_in_title'] = list(train_test.apply(lambda x: count_caps(x['title']), axis=1))
train_test['count_caps_in_desc'] = list(train_test.apply(lambda x: count_caps(x['description']), axis=1))
import string
count_punct = lambda x: len([c for c in x if c in string.punctuation])
train_test['count_punct_in_title'] = list(train_test.apply(lambda x: count_punct(x['title']), axis=1))
train_test['count_punct_in_desc'] = list(train_test.apply(lambda x: count_punct(x['description']), axis=1))
print("Computed POS Features and others..")
train_test['count_common_nouns'] = train_test.apply(lambda x: len(set(str(x['get_nouns_title']).lower().split()).intersection(set(str(x['get_nouns_desc']).lower().split()))), axis=1)
train_test['count_common_adj'] = train_test.apply(lambda x: len(set(str(x['get_adj_title']).lower().split()).intersection(set(str(x['get_adj_desc']).lower().split()))), axis=1)
train_test['ratio_of_unicode_in_title'] = train_test['count_of_unicode_in_title'] / train_test['len_title']
train_test['ratio_of_unicode_in_desc'] = train_test['count_of_unicode_in_desc'] / train_test['len_description']
train_test['ratio_of_punct_in_title'] = train_test['count_punct_in_title'] / train_test['len_title']
train_test['ratio_of_punct_in_desc'] = train_test['count_punct_in_desc'] / train_test['len_description']
train_test['ratio_of_cap_in_title'] = train_test['count_caps_in_title'] / train_test['len_title']
train_test['ratio_of_cap_in_desc'] = train_test['count_caps_in_desc'] / train_test['len_description']
train_test['count_nouns_in_title'] = train_test["get_nouns_title"].apply(lambda x: len(x.split()))
train_test['count_nouns_in_desc'] = train_test['get_nouns_desc'].apply(lambda x: len(x.split()))
train_test['count_adj_in_title'] = train_test["get_adj_title"].apply(lambda x: len(x.split()))
train_test['count_adj_in_desc'] = train_test['get_adj_desc'].apply(lambda x: len(x.split()))
train_test['count_verb_title'] = train_test['get_verb_title'].apply(lambda x: len(x.split()))
train_test['count_verb_desc'] = train_test['get_verb_desc'].apply(lambda x: len(x.split()))
train_test['ratio_nouns_in_title'] = train_test["count_nouns_in_title"] / train_test["title_nwords"]
train_test['ratio_nouns_in_desc'] = train_test["count_nouns_in_desc"] / train_test["desc_nwords"]
train_test['ratio_adj_in_title'] = train_test["count_adj_in_title"] / train_test["title_nwords"]
train_test['ratio_adj_in_desc'] = train_test["count_adj_in_desc"] / train_test["desc_nwords"]
train_test['ratio_vrb_in_title'] = train_test["count_verb_title"] / train_test["title_nwords"]
train_test['ratio_vrb_in_desc'] = train_test["count_verb_desc"] / train_test["desc_nwords"]
train_test["title"]= list(train_test[["title"]].apply(lambda x: clean_text(x["title"]), axis=1))
train_test["description"]= list(train_test[["description"]].apply(lambda x: clean_text(x["description"]), axis=1))
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
#######################
### Save
#######################
train_df = train_test.loc[train_test.deal_probability != 10].reset_index(drop = True)
test_df = train_test.loc[train_test.deal_probability == 10].reset_index(drop = True)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
train_df.to_feather('../train_basic_features.pkl')
test_df.to_feather('../test__basic_features.pkl')
#######################
### Label Enc
#######################
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
cat_vars = ["user_id", "region", "city", "parent_category_name", "category_name", "user_type", "param_1", "param_2", "param_3"]
for col in cat_vars:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_df[col].values.astype('str')) + list(test_df[col].values.astype('str')))
train_df[col] = lbl.transform(list(train_df[col].values.astype('str')))
test_df[col] = lbl.transform(list(test_df[col].values.astype('str')))
train_df.to_feather('../train_basic_features_lblencCats.pkl')
test_df.to_feather('../test__basic_features_lblencCats.pkl')
#######################
### One hots
#######################
train_df=pd.read_feather('../train_basic_features_lblencCats.pkl')
test_df=pd.read_feather('../test__basic_features_lblencCats.pkl')
from sklearn.externals import joblib
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.user_id.values.tolist() + test_df.user_id.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_id_onehot.pkl")
X = le.fit_transform(np.array(train_df.region.values.tolist() + test_df.region.values.tolist()).reshape(-1,1))
joblib.dump(X, "../region_onehot.pkl")
X = le.fit_transform(np.array(train_df.city.values.tolist() + test_df.city.values.tolist()).reshape(-1,1))
joblib.dump(X, "../city_onehot.pkl")
X = le.fit_transform(np.array(train_df.parent_category_name.values.tolist() + test_df.parent_category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../parent_category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.category_name.values.tolist() + test_df.category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.user_type.values.tolist() + test_df.user_type.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_type_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_1.values.tolist() + test_df.param_1.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_1_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_2.values.tolist() + test_df.param_2.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_2_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_3.values.tolist() + test_df.param_3.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_3_onehot.pkl")
train_df.drop(cat_vars, inplace = True, axis = 'columns')
test_df.drop(cat_vars, inplace = True, axis = 'columns')
train_df.to_feather('../train_basic_features_woCats.pkl')
test_df.to_feather('../test__basic_features_woCats.pkl')
#######################
### Tfidf
#######################
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf.pkl")
### TFIDF Vectorizer ###
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, #,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
### TFIDF Vectorizer ###
train_df['get_nouns_title'] = train_df['get_nouns_title'].fillna(' ')
test_df['get_nouns_title'] = test_df['get_nouns_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_title'].values.tolist() + test_df['get_nouns_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_title_tfidf.pkl")
del full_tfidf
print("TDIDF Title Noun..")
### TFIDF Vectorizer ###
train_df['get_nouns_desc'] = train_df['get_nouns_desc'].fillna(' ')
test_df['get_nouns_desc'] = test_df['get_nouns_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_desc'].values.tolist() + test_df['get_nouns_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Noun..")
### TFIDF Vectorizer ###
train_df['get_adj_title'] = train_df['get_adj_title'].fillna(' ')
test_df['get_adj_title'] = test_df['get_adj_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_title'].values.tolist() + test_df['get_adj_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Adj..")
### TFIDF Vectorizer ###
train_df['get_adj_desc'] = train_df['get_adj_desc'].fillna(' ')
test_df['get_adj_desc'] = test_df['get_adj_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_desc'].values.tolist() + test_df['get_adj_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Adj..")
### TFIDF Vectorizer ###
train_df['get_verb_title'] = train_df['get_verb_title'].fillna(' ')
test_df['get_verb_title'] = test_df['get_verb_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_title'].values.tolist() + test_df['get_verb_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Verb..")
### TFIDF Vectorizer ###
train_df['get_verb_desc'] = train_df['get_verb_desc'].fillna(' ')
test_df['get_verb_desc'] = test_df['get_verb_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_desc'].values.tolist() + test_df['get_verb_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Verb..")
###############################
# Sentence to seq
###############################
print('Generate Word Sequences')
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
df = pd.concat((train_df, test_df), axis = 'rows')
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['title'].tolist())
sequences = tokenizer.texts_to_sequences(df['title'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../titleSequences.pkl")
MAX_NUM_OF_WORDS = 10000
TIT_MAX_SEQUENCE_LENGTH = 20
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['params'].tolist())
sequences = tokenizer.texts_to_sequences(df['params'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../paramSequences.pkl")
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['description'].tolist())
sequences = tokenizer.texts_to_sequences(df['description'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../descSequences.pkl")
#######OHC WeekDay
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.activation_weekday.values.tolist() + test_df.activation_weekday.values.tolist()).reshape(-1,1))
################################################
# Cat encoding
################################################
train_df= | pd.read_feather('../train_basic_features.pkl') | pandas.read_feather |
"""
A collection of Algos used to create Strategy logic.
"""
from __future__ import division
import abc
import random
import re
import numpy as np
import pandas as pd
import sklearn.covariance
from future.utils import iteritems
import bt
from bt.core import Algo, AlgoStack, SecurityBase, is_zero
def run_always(f):
"""
Run always decorator to be used with Algo
to ensure stack runs the decorated Algo
on each pass, regardless of failures in the stack.
"""
f.run_always = True
return f
class PrintDate(Algo):
"""
This Algo simply print's the current date.
Can be useful for debugging purposes.
"""
def __call__(self, target):
print(target.now)
return True
class PrintTempData(Algo):
"""
This Algo prints the temp data.
Useful for debugging.
Args:
* fmt_string (str): A string that will later be formatted with the
target's temp dict. Therefore, you should provide
what you want to examine within curly braces ( { } )
"""
def __init__(self, fmt_string=None):
super(PrintTempData, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
if self.fmt_string:
print(self.fmt_string.format(**target.temp))
else:
print(target.temp)
return True
class PrintInfo(Algo):
"""
Prints out info associated with the target strategy. Useful for debugging
purposes.
Args:
* fmt_string (str): A string that will later be formatted with the
target object's __dict__ attribute. Therefore, you should provide
what you want to examine within curly braces ( { } )
Ex:
PrintInfo('Strategy {name} : {now}')
This will print out the name and the date (now) on each call.
Basically, you provide a string that will be formatted with target.__dict__
"""
def __init__(self, fmt_string="{name} {now}"):
super(PrintInfo, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
print(self.fmt_string.format(**target.__dict__))
return True
class Debug(Algo):
"""
Utility Algo that calls pdb.set_trace when triggered.
In the debug session, 'target' is available and can be examined through the
StrategyBase interface.
"""
def __call__(self, target):
import pdb
pdb.set_trace()
return True
class RunOnce(Algo):
"""
Returns True on first run then returns False.
Args:
* run_on_first_call: bool which determines if it runs the first time the algo is called
As the name says, the algo only runs once. Useful in situations
where we want to run the logic once (buy and hold for example).
"""
def __init__(self):
super(RunOnce, self).__init__()
self.has_run = False
def __call__(self, target):
# if it hasn't run then we will
# run it and set flag
if not self.has_run:
self.has_run = True
return True
# return false to stop future execution
return False
class RunPeriod(Algo):
def __init__(
self, run_on_first_date=True, run_on_end_of_period=False, run_on_last_date=False
):
super(RunPeriod, self).__init__()
self._run_on_first_date = run_on_first_date
self._run_on_end_of_period = run_on_end_of_period
self._run_on_last_date = run_on_last_date
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
# not a known date in our universe
if now not in target.data.index:
return False
# get index of the current date
index = target.data.index.get_loc(target.now)
result = False
# index 0 is a date added by the Backtest Constructor
if index == 0:
return False
# first date
if index == 1:
if self._run_on_first_date:
result = True
# last date
elif index == (len(target.data.index) - 1):
if self._run_on_last_date:
result = True
else:
# create pandas.Timestamp for useful .week,.quarter properties
now = pd.Timestamp(now)
index_offset = -1
if self._run_on_end_of_period:
index_offset = 1
date_to_compare = target.data.index[index + index_offset]
date_to_compare = pd.Timestamp(date_to_compare)
result = self.compare_dates(now, date_to_compare)
return result
@abc.abstractmethod
def compare_dates(self, now, date_to_compare):
raise (NotImplementedError("RunPeriod Algo is an abstract class!"))
class RunDaily(RunPeriod):
"""
Returns True on day change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's day has changed
compared to the last(or next if run_on_end_of_period) date, if not returns False.
Useful for daily rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.date() != date_to_compare.date():
return True
return False
class RunWeekly(RunPeriod):
"""
Returns True on week change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's week has changed
since relative to the last(or next) date, if not returns False. Useful for
weekly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.week != date_to_compare.week:
return True
return False
class RunMonthly(RunPeriod):
"""
Returns True on month change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's month has changed
since relative to the last(or next) date, if not returns False. Useful for
monthly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.month != date_to_compare.month:
return True
return False
class RunQuarterly(RunPeriod):
"""
Returns True on quarter change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's quarter has changed
since relative to the last(or next) date, if not returns False. Useful for
quarterly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.quarter != date_to_compare.quarter:
return True
return False
class RunYearly(RunPeriod):
"""
Returns True on year change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's year has changed
since relative to the last(or next) date, if not returns False. Useful for
yearly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year:
return True
return False
class RunOnDate(Algo):
"""
Returns True on a specific set of dates.
Args:
* dates (list): List of dates to run Algo on.
"""
def __init__(self, *dates):
"""
Args:
* dates (*args): A list of dates. Dates will be parsed
by pandas.to_datetime so pass anything that it can
parse. Typically, you will pass a string 'yyyy-mm-dd'.
"""
super(RunOnDate, self).__init__()
# parse dates and save
self.dates = [ | pd.to_datetime(d) | pandas.to_datetime |
"""myStats - python code for the textbook examples"""
import matplotlib.pyplot as plt
import numpy as np
import scipy
from scipy.stats import norm, t, chi2, mode, binom
import numpy
import scipy.stats as stats
from IPython.display import display, Markdown, Latex
import math
import pandas as pd
class MyStats:
def __init__(self):
pass
def get_pop_sd(arr):
print("arr : ", arr)
print("std of arr : ", np.std(arr))
print("\nMore precision with float32")
print("std of arr : ", np.std(arr, dtype=np.float32))
print("\nMore accuracy with float64")
print("std of arr : ", np.std(arr, dtype=np.float64))
def get_sample_sd(arr, ddof=1):
print("arr : ", arr)
print("std of arr : ", np.std(arr))
print("\nMore precision with float32")
print("std of arr : ", np.std(arr, dtype=np.float32, ddof=1))
print("\nMore accuracy with float64")
print("std of arr : ", np.std(arr, dtype=np.float64, ddof=1))
def get_mean(arr):
mean = scipy.mean(arr)
print("mean of arr : ",mean)
return mean
def get_sum_of_squares(x):
return sum(map(lambda i: i * i, x))
def get_sum_of_product(x, y):
return round(sum([a * b for a, b in zip(x, y)]),2)
class MeasuresOfCentralTendency:
def __init__(self):
pass
def get_mode(values):
return mode(values)
def get_median(values):
return numpy.median(values)
def get_mean(values):
return numpy.mean(values)
def get_trim_mean(values, proportioncut):
return scipy.stats.trim_mean(values, proportioncut)
def get_range(values):
return max(values) - min(values)
def get_variance(values,type='sample'):
if(type == 'sample'):
return numpy.var(values, ddof=1)
else:
return numpy.var(values)
def get_standard_deviation(values,type='sample'):
if(type == 'sample'):
return numpy.std(values, ddof=1)
else:
return numpy.std(values)
def get_coefficient_of_variation(values, type='sample'):
if (type == 'sample'):
return round(MyStats.MeasuresOfCentralTendency.get_standard_deviation(values, 'sample'), 2) / round(MyStats.MeasuresOfCentralTendency.get_mean(values),2)
else:
return round(MyStats.MeasuresOfCentralTendency.get_standard_deviation(values, 'pop'), 2) / round(MyStats.MeasuresOfCentralTendency.get_mean(values),2)
def description(array_values):
array_values.sort()
d = {'one': array_values}
df = | pd.DataFrame(d) | pandas.DataFrame |
from copy import deepcopy
from pathlib import Path
import pandas as pd
from pymongo import MongoClient
import projectconfig
from relnet.evaluation.file_paths import FilePaths
from relnet.state.network_generators import get_graph_ids_to_iterate
class EvaluationStorage:
MONGO_EXPERIMENT_COLLECTION = 'experiment_data'
MONGO_EVALUATION_COLLECTION = 'evaluation_data'
def __init__(self):
config = projectconfig.get_project_config()
self.mongo_client = MongoClient(config.BACKEND_URL)
self.db = self.mongo_client[config.MONGODB_DATABASE_NAME]
def find_latest_experiment_id(self):
result = self.db[self.MONGO_EXPERIMENT_COLLECTION].find().sort([("started_millis", -1)]).limit(1)[0]["experiment_id"]
return result
def get_hyperparameter_optimisation_data(self,
experiment_id,
model_seeds_to_skip,
train_individually):
latest_experiment = self.get_experiment_details(experiment_id)
file_paths = latest_experiment["file_paths"]
experiment_conditions = latest_experiment["experiment_conditions"]
hyperopt_data = []
network_generators = latest_experiment["network_generators"]
objective_functions = latest_experiment["objective_functions"]
agent_names = latest_experiment["agents"]
param_spaces = latest_experiment["parameter_search_spaces"]
for objective_function in objective_functions:
for agent_name in agent_names:
agent_grid = param_spaces[objective_function][agent_name]
search_space_keys = list(agent_grid.keys())
for hyperparams_id in search_space_keys:
for seed in experiment_conditions['experiment_params']['model_seeds']:
for network_generator in network_generators:
graph_ids_to_iterate = get_graph_ids_to_iterate(train_individually, network_generator, file_paths)
for graph_id in graph_ids_to_iterate:
setting = (network_generator, objective_function, agent_name, graph_id)
if setting in model_seeds_to_skip:
if seed in model_seeds_to_skip[setting]:
print(f"Skipping seed {seed} when computing optimal hyperparams.")
continue
model_prefix = FilePaths.construct_model_identifier_prefix(agent_name,
objective_function,
network_generator,
seed,
hyperparams_id,
graph_id=graph_id)
hyperopt_result_filename = FilePaths.construct_best_validation_file_name(model_prefix)
hyperopt_result_path = Path(file_paths['hyperopt_results_dir'], hyperopt_result_filename)
if hyperopt_result_path.exists():
with hyperopt_result_path.open('r') as f:
avg_eval_reward = float(f.readline())
hyperopt_data_row = {"network_generator": network_generator,
"objective_function": objective_function,
"agent_name": agent_name,
"hyperparams_id": hyperparams_id,
"avg_reward": avg_eval_reward,
"graph_id": graph_id}
hyperopt_data.append(hyperopt_data_row)
return param_spaces, | pd.DataFrame(hyperopt_data) | pandas.DataFrame |
import os
from collections import defaultdict, namedtuple
from itertools import chain, repeat, combinations, zip_longest
import re
from copy import copy, deepcopy
import pandas as pd
import numpy as np
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
class MoleculeType:
def __init__(self, data, top=None):
name, nrexcl = data.strip().split()
self.name = name
self.nrexcl = nrexcl
self.top = defaultdict(list) if top is None else top
def __repr__(self):
return f'MoleculeType("{self.name} {self.nrexcl}", {self.top})'
class CMapType:
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], str):
data = args[0].split()
else:
data = args
(
self.i,
self.j,
self.k,
self.l,
self.m,
self.func,
self.gridx,
self.gridy,
*self.values
) = data
assert int(self.gridx) * int(self.gridy) == len(self.values)
self.values = [float(v) for v in self.values]
def __repr__(self):
return (
f'<CMapType for atoms {self.i} {self.j} {self.k} {self.l} '
f'{self.m} on a {self.gridx}x{self.gridy} grid at {hex(id(self))}>'
)
def infer_dtypes(df):
new_df = pd.DataFrame()
for key, series in df.items():
try:
series_out = | pd.to_numeric(series) | pandas.to_numeric |
import bs4
from urllib.request import urlopen
#import datetime as dt
import pandas as pd
import re
import json
class Naver:
def date_format(self, d=''):
if d != '':
this_date = pd.to_datetime(d).date()
else:
this_date = pd.Timestamp.today().date() # 오늘 날짜를 지정
return (this_date)
def stock_price(self, historical_prices, stock_cd, start_date='', end_date='', page_n=1, last_page=0):
#nvr = self.NaverPrice()
start_date = self.date_format(start_date)
end_date = self.date_format(end_date)
naver_stock = 'http://finance.naver.com/item/sise_day.nhn?code=' + stock_cd + '&page=' + str(page_n)
source = urlopen(naver_stock).read()
source = bs4.BeautifulSoup(source, 'lxml')
dates = source.find_all('span', class_='tah p10 gray03') # 날짜 수집
prices = source.find_all('td', class_='num') # 종가 수집
for n in range(len(dates)):
if len(dates) > 0:
# 날짜 처리
this_date = dates[n].text
this_date = self.date_format(this_date)
if this_date <= end_date and this_date >= start_date:
# start_date와 end_date 사이에서 데이터 저장
# 종가 처리
this_close = prices[n*6].text
this_close = this_close.replace(',', '')
this_close = float(this_close)
# 딕셔너리에 저장
historical_prices[this_date] = this_close
elif this_date < start_date:
# start_date 이전이면 함수 종료
return (historical_prices)
# 페이지 네비게이션
if last_page == 0:
last_page = source.find_all('table')[1].find('td', class_='pgRR').find('a')['href']
last_page = last_page.split('&')[1]
last_page = last_page.split('=')[1]
last_page = float(last_page)
# 다음 페이지 호출
if page_n < last_page:
page_n = page_n + 1
self.stock_price(historical_prices, stock_cd, start_date, end_date, page_n, last_page)
return (historical_prices)
def index_korea(self, historical_prices, index_cd, start_date='', end_date='', page_n=1, last_page=0):
start_date = self.date_format(start_date)
end_date = self.date_format(end_date)
naver_index = 'http://finance.naver.com/sise/sise_index_day.nhn?code=' + index_cd + '&page=' + str(page_n)
source = urlopen(naver_index).read() # 지정한 페이지에서 코드 읽기
source = bs4.BeautifulSoup(source, 'lxml') # 뷰티풀 스프로 태그별로 코드 분류
dates = source.find_all('td', class_='date') # <td class="date">태그에서 날짜 수집
prices = source.find_all('td', class_='number_1') # <td class="number_1">태그에서 지수 수집
for n in range(len(dates)):
if dates[n].text.split('.')[0].isdigit():
# 날짜 처리
this_date = dates[n].text
this_date= self.date_format(this_date)
if this_date <= end_date and this_date >= start_date:
# start_date와 end_date 사이에서 데이터 저장
# 종가 처리
this_close = prices[n*4].text # prices 중 종가지수인 0,4,8,...번째 데이터 추출
this_close = this_close.replace(',', '')
this_close = float(this_close)
# 딕셔너리에 저장
historical_prices[this_date] = this_close
elif this_date < start_date:
# start_date 이전이면 함수 종료
return (historical_prices)
# 페이지 네비게이션
if last_page == 0:
last_page = source.find('td', class_='pgRR').find('a')['href']
# 마지막페이지 주소 추출
last_page = last_page.split('&')[1] # & 뒤의 page=506 부분 추출
last_page = last_page.split('=')[1] # = 뒤의 페이지번호만 추출
last_page = int(last_page) # 숫자형 변수로 변환
# 다음 페이지 호출
if page_n < last_page:
page_n = page_n + 1
self.index_korea(historical_prices, index_cd, start_date, end_date, page_n, last_page)
return (historical_prices)
# 구성종목 기본정보
def stock_info(self, stock_cd):
url_float = 'http://companyinfo.stock.naver.com/v1/company/c1010001.aspx?cmp_cd=' + stock_cd
source = urlopen(url_float).read()
soup = bs4.BeautifulSoup(source, 'lxml')
tmp = soup.find(id='cTB11').find_all('tr')[6].td.text
tmp = tmp.replace('\r', '')
tmp = tmp.replace('\n', '')
tmp = tmp.replace('\t', '')
tmp = re.split('/', tmp)
outstanding = tmp[0].replace(',', '')
outstanding = outstanding.replace('주', '')
outstanding = outstanding.replace(' ', '')
outstanding = int(outstanding)
floating = tmp[1].replace(' ', '')
floating = floating.replace('%', '')
floating = float(floating)
name = soup.find(id='pArea').find('div').find('div').find('tr').find('td').find('span').text
#k10_outstanding[stock_cd] = outstanding
#k10_floating[stock_cd] = floating
#k10_name[stock_cd] = name
return (name, outstanding, floating)
def index_global(self, d, symbol, start_date='', end_date='', page=1):
end_date = self.date_format(end_date)
if start_date == '':
start_date = end_date - pd.DateOffset(years=1)
start_date = self.date_format(start_date)
url = 'https://finance.naver.com/world/worldDayListJson.nhn?symbol='+symbol+'&fdtc=0&page='+str(page)
raw = urlopen(url)
data = json.load(raw)
if len(data) > 0:
for n in range(len(data)):
date = pd.to_datetime(data[n]['xymd']).date()
if date <= end_date and date >= start_date:
# start_date와 end_date 사이에서 데이터 저장
# 종가 처리
price = float(data[n]['clos'])
# 딕셔너리에 저장
d[date] = price
elif date < start_date:
# start_date 이전이면 함수 종료
return (d)
if len(data) == 10:
page += 1
self.index_global(d, symbol, start_date, end_date, page)
return (d)
class NaverStockInfo:
def read_src(self, stock_cd):
url_float = 'http://companyinfo.stock.naver.com/v1/company/c1010001.aspx?cmp_cd=' + stock_cd
source = urlopen(url_float).read()
soup = bs4.BeautifulSoup(source, 'lxml')
return (soup)
def stock_info(self, stock_cd):
url_float = 'http://companyinfo.stock.naver.com/v1/company/c1010001.aspx?cmp_cd=' + stock_cd
source = urlopen(url_float).read()
soup = bs4.BeautifulSoup(source, 'lxml')
tmp = soup.find(id='cTB11').find_all('tr')[6].td.text
tmp = tmp.replace('\r', '')
tmp = tmp.replace('\n', '')
tmp = tmp.replace('\t', '')
tmp = re.split('/', tmp)
outstanding = tmp[0].replace(',', '')
outstanding = outstanding.replace('주', '')
outstanding = outstanding.replace(' ', '')
outstanding = int(outstanding)
floating = tmp[1].replace(' ', '')
floating = floating.replace('%', '')
floating = float(floating)
name = soup.find(id='pArea').find('div').find('div').find('tr').find('td').find('span').text
return (name, outstanding, floating)
def outstanding(self, stock_cd):
soup = self.read_src(stock_cd)
tmp = soup.find(id='cTB11').find_all('tr')[6].td.text
tmp = tmp.replace('\r', '')
tmp = tmp.replace('\n', '')
tmp = tmp.replace('\t', '')
tmp = re.split('/', tmp)
outstanding = tmp[0].replace(',', '')
outstanding = outstanding.replace('주', '')
outstanding = outstanding.replace(' ', '')
outstanding = int(outstanding)
return (outstanding)
def floating(self, stock_cd):
soup = self.read_src(stock_cd)
tmp = soup.find(id='cTB11').find_all('tr')[6].td.text
tmp = tmp.replace('\r', '')
tmp = tmp.replace('\n', '')
tmp = tmp.replace('\t', '')
tmp = re.split('/', tmp)
floating = tmp[1].replace(' ', '')
floating = floating.replace('%', '')
floating = float(floating)
return (floating)
def floating(self, stock_cd):
soup = self.read_src(stock_cd)
tmp = soup.find(id='cTB11').find_all('tr')[6].td.text
tmp = tmp.replace('\r', '')
tmp = tmp.replace('\n', '')
tmp = tmp.replace('\t', '')
tmp = re.split('/', tmp)
floating = tmp[1].replace(' ', '')
floating = floating.replace('%', '')
floating = float(floating)
return (floating)
def float_convert(self, s):
try:
s = s.replace(' ', '')
s = s.replace(',', '')
if re.findall('억', s):
m = 100000000
s = s.replace('억', '')
elif re.findall('백만', s):
m = 1000000
s = s.replace('백만', '')
if re.findall('%', s):
m = 0.01
s = s.replace('%', '')
s = s.replace('원', '')
f = float(s) * m
except:
f = s
return (f)
def fundamentals(self, stock_cd, f):
factors = dict()
soup = self.read_src(stock_cd)
rows = len(soup.find_all('div', class_='fund fl_le')[0].find_all('tr'))
for r in range(1, rows, 1):
title = soup.find_all('div', class_='fund fl_le')[0].find_all('tr')[r].find_all('th')[0].text
value_current = soup.find_all('div', class_='fund fl_le')[0].find_all('tr')[r].find_all('td')[0].text
value_current = self.float_convert(value_current)
value_estimated = soup.find_all('div', class_='fund fl_le')[0].find_all('tr')[r].find_all('td')[1].text
value_estimated = self.float_convert(value_estimated)
factors[title] = [value_current, value_estimated]
print(title, value_current, value_estimated)
return (factors[f])
class DART:
def disclosure_search(self, auth_key, cd, base_date):
url_search = 'http://dart.fss.or.kr/api/search.json?auth='+auth_key+'&crp_cd='+cd+'&start_dt='+base_date+'&bsn_tp=A001'
raw = urlopen(url_search)
data = json.load(raw)
rcp_no = data['list'][0]['rcp_no']
return(rcp_no)
def view_doc(self, rcp_no):
url_doc = 'http://dart.fss.or.kr/dsaf001/main.do?rcpNo='+rcp_no
source = urlopen(url_doc).read()
soup = bs4.BeautifulSoup(source, 'lxml')
tmp = soup.find_all('script')[7].text
menu = tmp.split('new Tree.TreeNode')
i = 0
for m in menu:
if re.search(' 재무제표"', m):
num = i
i += 1
prop = menu[num].split('click: function() {viewDoc(')[1]
prop = prop.split(');}')[0]
prop = prop.replace("'", "")
prop = prop.replace(' ', '')
prop = prop.split(',')
return(prop)
def extract_fn_item(self, item, p0, p1, p2, p3, p4):
url_stmt = 'http://dart.fss.or.kr/report/viewer.do?rcpNo='+p0+'&dcmNo='+p1+'&eleId='+p2+'&offset='+p3+'&length='+p4+'&dtd=dart3.xsd'
#print(url_stmt)
source = urlopen(url_stmt).read()
soup = bs4.BeautifulSoup(source, 'lxml')
stmt = soup.find_all('tr')
i = 0
ss = []
for s in stmt:
if re.search(item, str(s)):
ss.append(i)
i += 1
titles = []
for s in ss:
itm = stmt[s]
itm = itm.find_all('td')
if len(itm)>=1:
itm_title = itm[0]
txt = ''
for t in itm_title.stripped_strings:
pass
itm_title = t
titles.append(itm_title)
if itm_title == item:
s_num = s
elif re.search(r'\(', itm_title):
if itm_title.split('(')[0] == item:
s_num = s
itm = stmt[s_num]
itm = itm.find_all('td')
itm_title = itm[0].find('p').text
itm_title = itm_title.replace('\u3000', '')
itm_figure = itm[1].find('p').text
itm_figure = itm_figure.replace(',', '')
itm_figure = float(itm_figure)
return(itm_title, itm_figure)
def extract_unit(self, p0, p1, p2, p3, p4):
url_stmt = 'http://dart.fss.or.kr/report/viewer.do?rcpNo='+p0+'&dcmNo='+p1+'&eleId='+p2+'&offset='+p3+'&length='+p4+'&dtd=dart3.xsd'
#print(url_stmt)
source = urlopen(url_stmt).read()
soup = bs4.BeautifulSoup(source, 'lxml')
stmt = soup.find_all('tr')
i = 0
for s in stmt:
if re.search('단위', str(s)):
u_num = i
i += 1
unit = stmt[u_num]
unit = unit.find_all('td')
if len(unit) > 1:
unit = unit[1].text
else:
unit = unit[0].text
unit = unit.split(':')
unit = unit[1]
try:
unit = unit.replace(')', '')
except:
pass
try:
unit = unit.replace(' ', '')
except:
pass
try:
unit = unit.replace('\n', '')
except:
pass
if unit == '백만원':
unit_num = 1000000
elif unit == '천원':
unit_num = 1000
else:
unit_num = 1
return(unit, unit_num)
def extract_fn_stmt(self, p0, p1, p2, p3, p4):
url_stmt = 'http://dart.fss.or.kr/report/viewer.do?rcpNo='+p0+'&dcmNo='+p1+'&eleId='+p2+'&offset='+p3+'&length='+p4+'&dtd=dart3.xsd'
source = urlopen(url_stmt).read()
soup = bs4.BeautifulSoup(source, 'lxml')
stmt = soup.find_all('tr')
fn_stmt_dict = {}
for r in range(len(stmt)):
try:
columns = []
for c in stmt[r].find_all('td'):
for t in c.stripped_strings:
t = t.replace(' \xa0', '')
t = t.replace(' ', '')
t = t.replace(',', '')
t = re.sub('\s', '', t)
# 0 처리
t = re.sub('^-$', '0', t)
# 음수처리
if re.match('\(', t):
t = t.replace('(', '')
t = t.replace(')', '')
try:
t = float(t)
except:
pass
t = t * -1
try:
t = float(t)
except:
pass
columns.append(t)
except:
pass
# 계정과목명 정제
if len(columns) > 3:
col_len = len(columns)
col_iter = 1
while col_iter <= col_len:
columns[0] = re.sub('[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩXIVxiv\d]+\.', '', columns[0]) # 로마숫자 제거
if not type(columns[1]) == float: # 텍스트 합체
columns[0] += columns[1]
del columns[1]
col_len -= 1
col_iter += 1
# print(len(columns), columns)
if len(columns) == 4:
fn_stmt_dict[columns[0]] = columns[1:4]
fn_stmt = pd.DataFrame.from_dict(fn_stmt_dict, orient='index', columns=['당기', '전기', '전전기'])
return(fn_stmt)
class FnGuide:
def fn_stmt(self, cd, i):
rows = {}
url = 'https://comp.fnguide.com/SVO2/ASP/SVD_Finance.asp?pGB=1&gicode=A'+cd+'&cID=&MenuYn=Y&ReportGB=D&NewMenuID=103&stkGb=701'
source = urlopen(url).read()
soup = bs4.BeautifulSoup(source, 'lxml')
tbl = soup.find_all('table')[i]
tr = tbl.find_all('tr')
n_tr = len(tr)
for r in range(n_tr):
td = tr[r]
n_th = len(td.find_all('th'))
n_td = len(td.find_all('td'))
cols = []
for n in range(n_th):
t = td.find_all('th')[n].text
t = self.fin_stmt_refine(t)
cols.append(t)
for n in range(n_td):
t = td.find_all('td')[n].text
t = self.fin_stmt_refine(t)
cols.append(t)
rows[cols[0]] = cols[1:]
df = pd.DataFrame.from_dict(rows, orient='index')
df.columns = df.iloc[0]
df = df[1:]
return(df)
def fn_ratio(self, cd, i):
rows = {}
url_ratio = 'https://comp.fnguide.com/SVO2/ASP/SVD_FinanceRatio.asp?pGB=1&gicode=A'+cd+'&cID=&MenuYn=Y&ReportGB=&NewMenuID=104&stkGb=701'
source = urlopen(url_ratio).read()
soup = bs4.BeautifulSoup(source, 'lxml')
tbl = soup.find_all('table')[i]
tr = tbl.find_all('tr')
n_tr = len(tr)
for r in range(n_tr):
td = tr[r]
n_th = len(td.find_all('th'))
n_td = len(td.find_all('td'))
if n_th+n_td > 1 :
cols = []
for n in range(n_th):
t = td.find_all('th')[n].text
t = self.fin_stmt_refine(t)
try: # 텍스트 정제
t = re.sub('계산에참여한계정펼치기', '', t)
txt = re.split('\(', t)[0]
tmp = re.search(txt, t[1:]).span()[0]
t = t[:tmp+1]
except:
pass
cols.append(t)
for n in range(n_td):
t = td.find_all('td')[n].text
t = self.fin_stmt_refine(t)
cols.append(t)
rows[cols[0]] = cols[1:]
df = | pd.DataFrame.from_dict(rows, orient='index') | pandas.DataFrame.from_dict |
import logging
import pandas as pd
import requests
import go_utils.lc as lc
import go_utils.mhm as mhm
from go_utils.constants import (
end_date,
landcover_protocol,
mosquito_protocol,
start_date,
)
def parse_api_data(response_json):
try:
results = response_json["results"]
df = | pd.DataFrame(results) | pandas.DataFrame |
############################################################################################
#Copyright 2021 Google LLC
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
import pdb
from sklearn.metrics import balanced_accuracy_score, classification_report
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score
from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error
from collections import defaultdict
import pandas as pd
import numpy as np
pd.set_option('display.max_columns',500)
import matplotlib.pyplot as plt
import copy
import warnings
warnings.filterwarnings(action='ignore')
import functools
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
################################################################################
import tensorflow as tf
np.random.seed(42)
tf.random.set_seed(42)
from tensorflow.keras import layers
from tensorflow import keras
from tensorflow.keras.layers.experimental.preprocessing import Normalization, StringLookup
from tensorflow.keras.layers.experimental.preprocessing import IntegerLookup, CategoryEncoding
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model, load_model
from tensorflow.keras import callbacks
from tensorflow.keras import backend as K
from tensorflow.keras import utils
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import regularizers
from tensorflow.keras import layers
from tensorflow.keras.models import Model, load_model
################################################################################
from deep_autoviml.modeling.one_cycle import OneCycleScheduler
##### Suppress all TF2 and TF1.x warnings ###################
tf2logger = tf.get_logger()
tf2logger.warning('Silencing TF2.x warnings')
tf2logger.root.removeHandler(tf2logger.root.handlers)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
################################################################################
import os
def check_if_GPU_exists(verbose=0):
GPU_exists = False
gpus = tf.config.list_physical_devices('GPU')
logical_gpus = tf.config.list_logical_devices('GPU')
tpus = tf.config.list_logical_devices('TPU')
#### In some cases like Kaggle kernels, the GPU is not enabled. Hence this check.
if logical_gpus:
# Restrict TensorFlow to only use the first GPU
if verbose:
print("GPUs found in this device...: ")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if len(logical_gpus) > 1:
device = "gpus"
else:
device = "gpu"
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
elif tpus:
device = "tpu"
print("GPUs found in this device...: ")
if verbose:
print("Listing all TPU devices: ")
for tpu in tpus:
print(tpu)
else:
print('Only CPU found on this device')
device = "cpu"
#### Set Strategy ##########
if device == "tpu":
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
if verbose:
print('Setting TPU strategy using %d devices' %strategy.num_replicas_in_sync)
except:
if verbose:
print('Setting TPU strategy using Colab...')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
elif device == "gpu":
strategy = tf.distribute.MirroredStrategy()
if verbose:
print('Setting Mirrored GPU strategy using %d devices' %strategy.num_replicas_in_sync)
elif device == "gpus":
strategy = tf.distribute.MultiWorkerMirroredStrategy()
if verbose:
print('Setting Multiworker GPU strategy using %d devices' %strategy.num_replicas_in_sync)
else:
strategy = tf.distribute.OneDeviceStrategy(device='/device:CPU:0')
if verbose:
print('Setting CPU strategy using %d devices' %strategy.num_replicas_in_sync)
return strategy
######################################################################################
def print_one_row_from_tf_dataset(test_ds):
"""
No matter how big a dataset or batch size, this handy function will print the first row.
This way you can test what's in each row of a tensorflow dataset that you sent in as input
You need to provide at least one column in the dataset for it to check if it should print it.
Inputs:
-------
test_ds: tf.data.DataSet - this must be batched and num_epochs must be an integer.
- otherwise it won't print!
"""
try:
if isinstance(test_ds, tuple):
dict_row = list(test_ds.as_numpy_iterator())[0]
else:
dict_row = test_ds
print("Printing one batch from the dataset:")
preds = list(dict_row.element_spec[0].keys())
if dict_row.element_spec[0][preds[0]].shape[0] is None or isinstance(
dict_row.element_spec[0][preds[0]].shape[0], int):
for batch, head in dict_row.take(1):
for labels, value in batch.items():
print("{:40s}: {}".format(labels, value.numpy()[:4]))
except:
print(' Error printing. Continuing...')
#########################################################################################
def print_one_row_from_tf_label(test_label):
"""
No matter how big a dataset or batch size, this handy function will print the first row.
This way you can test what's in each row of a tensorflow dataset that you sent in as input
You need to provide at least one column in the dataset for it to check if it should print it.
Inputs:
-------
test_label: tf.data.DataSet - this must be batched and num_epochs must be an integer.
- otherwise it won't print!
"""
if isinstance(test_label, tuple):
dict_row = list(test_label.as_numpy_iterator())[0]
else:
dict_row = test_label
preds = list(dict_row.element_spec[0].keys())
try:
### This is for multilabel problems only ####
if len(dict_row.element_spec[1]) >= 1:
labels = list(dict_row.element_spec[1].keys())
for feats, labs in dict_row.take(1):
for each_label in labels:
print(' label = %s, samples: %s' %(each_label, labs[each_label]))
except:
### This is for single problems only ####
if dict_row.element_spec[0][preds[0]].shape[0] is None or isinstance(
dict_row.element_spec[0][preds[0]].shape[0], int):
for feats, labs in dict_row.take(1):
print(" samples from label: %s" %(labs.numpy().tolist()[:10]))
##########################################################################################
from sklearn.base import TransformerMixin
from collections import defaultdict
import pandas as pd
import numpy as np
class My_LabelEncoder(TransformerMixin):
"""
################################################################################################
###### This Label Encoder class works just like sklearn's Label Encoder! #####################
##### You can label encode any column in a data frame using this new class. But unlike sklearn,
the beauty of this function is that it can take care of NaN's and unknown (future) values.
It uses the same fit() and fit_transform() methods of sklearn's LabelEncoder class.
################################################################################################
Usage:
MLB = My_LabelEncoder()
train[column] = MLB.fit_transform(train[column])
test[column] = MLB.transform(test[column])
"""
def __init__(self):
self.transformer = defaultdict(str)
self.inverse_transformer = defaultdict(str)
def fit(self,testx):
if isinstance(testx, pd.Series):
pass
elif isinstance(testx, np.ndarray):
testx = pd.Series(testx)
else:
return testx
outs = np.unique(testx.factorize()[0])
ins = np.unique(testx.factorize()[1]).tolist()
if -1 in outs:
ins.insert(0,np.nan)
self.transformer = dict(zip(ins,outs.tolist()))
self.inverse_transformer = dict(zip(outs.tolist(),ins))
return self
def transform(self, testx):
if isinstance(testx, pd.Series):
pass
elif isinstance(testx, np.ndarray):
testx = pd.Series(testx)
else:
return testx
ins = np.unique(testx.factorize()[1]).tolist()
missing = [x for x in ins if x not in self.transformer.keys()]
if len(missing) > 0:
for each_missing in missing:
max_val = np.max(list(self.transformer.values())) + 1
self.transformer[each_missing] = max_val
self.inverse_transformer[max_val] = each_missing
### now convert the input to transformer dictionary values
outs = testx.map(self.transformer).values
return outs
def inverse_transform(self, testx):
### now convert the input to transformer dictionary values
if isinstance(testx, pd.Series):
outs = testx.map(self.inverse_transformer).values
elif isinstance(testx, np.ndarray):
outs = pd.Series(testx).map(self.inverse_transformer).values
else:
outs = testx[:]
return outs
#################################################################################
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, balanced_accuracy_score
#################################################################################
def plot_history(history, metric, targets):
if isinstance(targets, str):
#### This is for single label problems
fig = plt.figure(figsize=(15,6))
#### first metric is always the loss - just plot it!
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
ax1 = plt.subplot(1, 2, 1)
ax1.set_title('Model Training vs Validation Loss')
plot_one_history_metric(history, "loss", ax1)
ax2 = plt.subplot(1, 2, 2)
ax2.set_title('Model Training vs Validation %s' %metric)
##### Now let's plot the second metric ####
plot_one_history_metric(history, metric, ax2)
else:
### This is for Multi-Label problems
for each_target in targets:
fig = plt.figure(figsize=(15,6))
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
ax1 = plt.subplot(1, 2, 1)
ax1.set_title('Model Training vs Validation Loss')
plot_one_history_metric(history, each_target+"_loss", ax1)
ax2 = plt.subplot(1, 2, 2)
### Since we are using total loss, we must find another metric to show.
### This is how we do it - by collecting all metrics with target name
### and pick the first one. This may or may not always get the best answer, but we will see.
metric1 = [x for x in hist.columns.tolist() if (each_target in x) & ("loss" not in x) ]
metric2 = metric1[0]
### the next line is not a typo! it should always be target[0]
### since val_monitor is based on the first target's metric only!
#metric1 = metric.replace(targets[0],'')
#metric2 = each_target + metric1
ax2.set_title('Model Training vs Validation %s' %metric2)
plot_one_history_metric(history, metric2, ax2)
plt.show();
#######################################################################################
def plot_one_history_metric(history, metric, ax):
train_metrics = history.history[metric]
val_metrics = history.history['val_'+metric]
epochs = range(1, len(train_metrics) + 1)
ax.plot(epochs, train_metrics)
ax.plot(epochs, val_metrics)
ax.set_xlabel("Epochs")
ax.set_ylabel(metric)
ax.legend(["train_"+metric, 'val_'+metric])
####################################################################################
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import balanced_accuracy_score
from collections import OrderedDict
from collections import Counter
def print_classification_model_stats(y_test, y_preds):
"""
This will print both multi-label and multi-class metrics.
You must send in only actual and predicted labels. No probabilities!!
"""
try:
assert y_preds.shape[1]
for i in range(y_preds.shape[1]):
if y_preds.shape[1] == y_test.shape[1]:
print('Target label %s results:' %(i+1))
print_classification_model_metrics(y_test[:,i], y_preds[:,i])
else:
print('error printing: number of labels in actuals and predicted are different ')
except:
### This is a binary class only #######
print_classification_model_metrics(y_test, y_preds)
def print_classification_model_metrics(y_true, predicted):
"""
This prints classification metrics in a nice format only for binary classes
"""
#### Use this to Test Classification Problems Only ####
try:
y_pred = predicted.argmax(axis=1)
except:
y_pred = predicted
print('Balanced Accuracy = %0.2f%%' %(
100*balanced_accuracy_score(y_true, y_pred)))
print('Confusion Matrix:')
print(confusion_matrix(y_true, y_pred))
print(classification_report(y_true, y_pred))
print('#####################################################################')
return balanced_accuracy_score(y_true, y_pred)
###################################################################################
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import seaborn as sns
def plot_classification_results(y_true, y_pred, labels, target_names, title_string=""):
try:
fig, axes = plt.subplots(1,2,figsize=(15,6))
draw_confusion_matrix(y_true, y_pred, labels, target_names, '%s Confusion Matrix' %title_string, ax=axes[0])
try:
clf_report = classification_report(y_true,
y_pred,
labels=labels,
target_names=target_names,
output_dict=True)
except:
clf_report = classification_report(y_true,y_pred,labels=target_names,
target_names=labels,output_dict=True)
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True,ax=axes[1],fmt='0.2f');
axes[1].set_title('Classification Report')
except:
print('Error: could not plot classification results. Continuing...')
######################################################################################
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, f1_score
def draw_confusion_matrix(y_test,y_pred, labels, target_names, model_name='Model',ax=''):
"""
This plots a beautiful confusion matrix based on input: ground truths and predictions
"""
#Confusion Matrix
'''Plotting CONFUSION MATRIX'''
import seaborn as sns
sns.set_style('darkgrid')
'''Display'''
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
pd.options.display.float_format = '{:,.2f}'.format
#Get the confusion matrix and put it into a df
cm = confusion_matrix(y_test, y_pred)
cm_df = pd.DataFrame(cm,
index = labels,
columns = target_names,
)
sns.heatmap(cm_df,
center=0,
cmap=sns.diverging_palette(220, 15, as_cmap=True),
annot=True,
fmt='g',
ax=ax)
ax.set_title(' %s \nF1 Score(avg = micro): %0.2f \nF1 Score(avg = macro): %0.2f' %(
model_name,f1_score(y_test, y_pred, average='micro'),f1_score(y_test, y_pred, average='macro')),
fontsize = 13)
ax.set_ylabel('True label', fontsize = 13)
ax.set_xlabel('Predicted label', fontsize = 13)
################################################################################
def print_regression_model_stats(actuals, predicted, targets='', plot_name=''):
"""
This program prints and returns MAE, RMSE, MAPE.
If you like the MAE and RMSE to have a title or something, just give that
in the input as "title" and it will print that title on the MAE and RMSE as a
chart for that model. Returns MAE, MAE_as_percentage, and RMSE_as_percentage
"""
if isinstance(actuals,pd.Series) or isinstance(actuals,pd.DataFrame):
actuals = actuals.values
if isinstance(predicted,pd.Series) or isinstance(predicted,pd.DataFrame):
predicted = predicted.values
if len(actuals) != len(predicted):
print('Error: Number of actuals and predicted dont match. Continuing...')
if targets == "":
try:
### This is for Multi_Label Problems ###
assert actuals.shape[1]
multi_label = True
if isinstance(actuals,pd.Series):
cols = [actuals.name]
elif isinstance(actuals,pd.DataFrame):
cols = actuals.columns.tolist()
else:
cols = ['target_'+str(i) for i in range(actuals.shape[1])]
except:
#### THis is for Single Label problems #####
multi_label = False
if isinstance(actuals,pd.Series):
cols = [actuals.name]
elif isinstance(actuals,pd.DataFrame):
cols = actuals.columns.tolist()
else:
cols = ['target_1']
else:
cols = copy.deepcopy(targets)
if isinstance(targets, str):
cols = [targets]
if len(cols) == 1:
multi_label = False
else:
multi_label = True
try:
plot_regression_scatters(actuals,predicted,cols,plot_name=plot_name)
except:
print('Could not draw regression plot but continuing...')
if multi_label:
for i in range(actuals.shape[1]):
actuals_x = actuals[:,i]
try:
predicted_x = predicted[:,i]
except:
predicted_x = predicted.ravel()
print('Regression Metrics for Target=%s' %cols[i])
mae, mae_asp, rmse_asp = print_regression_metrics(actuals_x, predicted_x)
else:
mae, mae_asp, rmse_asp = print_regression_metrics(actuals, predicted)
return mae, mae_asp, rmse_asp
################################################################################
from sklearn.metrics import r2_score
def print_regression_metrics(actuals, predicted):
predicted = np.nan_to_num(predicted)
mae = mean_absolute_error(actuals, predicted)
mae_asp = (mean_absolute_error(actuals, predicted)/actuals.std())*100
rmse_asp = (np.sqrt(mean_squared_error(actuals,predicted))/actuals.std())*100
rmse = print_rmse(actuals, predicted)
_ = print_mape(actuals, predicted)
mape = print_mape(actuals, predicted)
print(' MAE = %0.4f' %mae)
print(" MAPE = %0.0f%%" %(mape))
print(' RMSE = %0.4f' %rmse)
print(' MAE as %% std dev of Actuals = %0.1f%%' %(mae/abs(actuals).std()*100))
# Normalized RMSE print('RMSE = {:,.Of}'.format(rmse))
print(' R-Squared (%% ) = %0.0f%%' %(100*r2_score(actuals,predicted)))
print(' Normalized RMSE (%% of Std Dev of Actuals) = %0.0f%%' %(100*rmse/actuals.std()))
return mae, mae_asp, rmse_asp
################################################################################
def print_static_rmse(actuals, predicted, start_from=0,verbose=0):
"""
this calculates the ratio of the rmse error to the standard deviation of the actuals.
This ratio should be below 1 for a model to be considered useful.
The comparison starts from the row indicated in the "start_from" variable.
"""
predicted = np.nan_to_num(predicted)
rmse = np.sqrt(mean_squared_error(actuals[start_from:],predicted[start_from:]))
std_dev = actuals[start_from:].std()
if verbose >= 1:
print(' RMSE = %0.2f' %rmse)
print(' Std Deviation of Actuals = %0.2f' %(std_dev))
print(' Normalized RMSE = %0.1f%%' %(rmse*100/std_dev))
print(' R-Squared (%% ) = %0.0f%%' %(100*r2_score(actuals,predicted)))
return rmse, rmse/std_dev
################################################################################
from sklearn.metrics import mean_squared_error,mean_absolute_error
def print_rmse(y, y_hat):
"""
Calculating Root Mean Square Error https://en.wikipedia.org/wiki/Root-mean-square_deviation
"""
mse = np.mean((y - y_hat)**2)
return np.sqrt(mse)
def print_mape(y, y_hat):
"""
Calculating Mean Absolute Percent Error https://en.wikipedia.org/wiki/Mean_absolute_percentage_error
"""
perc_err = (100*(y - y_hat))/y
return np.mean(abs(perc_err))
################################################################################
from sklearn import metrics
import matplotlib.pyplot as plt
import copy
def print_classification_header(num_classes, num_labels, target_name):
######## This is where you start printing metrics ###############
if isinstance(num_classes, list) :
if np.max(num_classes) > 2:
print('Multi Label (multi-output) Multi-Class Report: %s' %target_name)
print('#################################################################')
else:
print('Multi Label (multi-output) Binary Class Metrics Report: %s' %target_name)
print('#################################################################')
else:
if num_classes > 2:
print('Single Label (single-output), Multi-Class Report: %s' %target_name)
print('#################################################################')
else:
print('Single Label, Multi Class Model Metrics Report: %s' %target_name)
print('#################################################################')
def print_classification_metrics(y_test, y_probs, proba_flag=True):
"""
####### Send in the actual_values and prediction_probabilities for binary classes
This will return back metrics and print them all in a neat format
"""
y_test = copy.deepcopy(y_test)
multi_label_flag = False
multi_class_flag = False
#### for some cases, you won't get proba, so check the proba_flag
if proba_flag:
y_preds = y_probs.argmax(axis=1)
else:
y_preds = copy.deepcopy(y_probs)
##### check if it is multi-class #####
if len(np.unique(y_test)) > 2 or max(np.unique(y_test)) >= 2:
multi_class_flag = True
elif len(np.unique(y_preds)) > 2 or max(np.unique(y_preds)) >= 2:
multi_class_flag = True
########### This is where we print the metrics ###################
try:
if not multi_class_flag and not multi_label_flag:
# Calculate comparison metrics for Binary classification results.
accuracy = metrics.accuracy_score(y_test, y_preds)
balanced_accuracy = metrics.balanced_accuracy_score(y_test, y_preds)
precision = metrics.precision_score(y_test, y_preds)
f1_score = metrics.f1_score(y_test, y_preds)
recall = metrics.recall_score(y_test, y_preds)
if type(np.mean((y_test==y_preds))) == pd.Series:
print(' Accuracy = %0.1f%%' %(np.mean(accuracy)*100))
else:
print(' Accuracy = %0.1f%%' %(accuracy*100))
print(' Balanced Accuracy = %0.1f%%' %(balanced_accuracy*100))
print(' Precision = %0.1f%%' %(precision*100))
if proba_flag:
average_precision = np.mean(metrics.precision_score(y_test, y_preds, average=None))
else:
average_precision = metrics.precision_score(y_test, y_preds, average='macro')
print(' Average Precision = %0.1f%%' %(average_precision*100))
print(' Recall = %0.1f%%' %(recall*100))
print(' F1 Score = %0.1f%%' %(f1_score*100))
if proba_flag:
roc_auc = metrics.roc_auc_score(y_test, y_probs[:,1])
#fpr, tpr, threshold = metrics.roc_curve(y_test, y_probs[:,1])
#roc_auc = metrics.auc(fpr, tpr)
print(' ROC AUC = %0.1f%%' %(roc_auc*100))
else:
roc_auc = 0
print('#####################################################')
return [accuracy, balanced_accuracy, precision, average_precision, f1_score, recall, roc_auc]
else:
# Calculate comparison metrics for Multi-Class classification results.
accuracy = np.mean((y_test==y_preds))
if multi_label_flag:
balanced_accuracy = np.mean(metrics.recall_score(y_test, y_preds, average=None))
precision = metrics.precision_score(y_test, y_preds, average=None)
average_precision = metrics.precision_score(y_test, y_preds, average='macro')
f1_score = metrics.f1_score(y_test, y_preds, average=None)
recall = metrics.recall_score(y_test, y_preds, average=None)
else:
balanced_accuracy = metrics.balanced_accuracy_score(y_test, y_preds)
precision = metrics.precision_score(y_test, y_preds, average = None)
average_precision = metrics.precision_score(y_test, y_preds,average='macro')
f1_score = metrics.f1_score(y_test, y_preds, average = None)
recall = metrics.recall_score(y_test, y_preds, average = None)
if type(np.mean((y_test==y_preds))) == pd.Series:
print(' Accuracy = %0.1f%%' %(np.mean(accuracy)*100))
else:
print(' Accuracy = %0.1f%%' %(accuracy*100))
print(' Balanced Accuracy (average recall) = %0.1f%%' %(balanced_accuracy*100))
print(' Average Precision (macro) = %0.1f%%' %(average_precision*100))
### these are basically one for each class #####
print(' Precisions by class:')
for precisions in precision:
print(' %0.1f%% ' %(precisions*100),end="")
print('\n Recall Scores by class:')
for recalls in recall:
print(' %0.1f%% ' %(recalls*100), end="")
print('\n F1 Scores by class:')
for f1_scores in f1_score:
print(' %0.1f%% ' %(f1_scores*100),end="")
# Return list of metrics to be added to a Dataframe to compare models.
except:
print(' print classification metrics erroring. Continuing...')
print('\n#####################################################')
return [accuracy, balanced_accuracy, precision, average_precision, f1_score, recall, 0]
##################################################################################################
def find_rare_class(classes, verbose=0):
######### Print the % count of each class in a Target variable #####
"""
Works on Multi Class too. Prints class percentages count of target variable.
It returns the name of the Rare class (the one with the minimum class member count).
This can also be helpful in using it as pos_label in Binary and Multi Class problems.
"""
counts = OrderedDict(Counter(classes))
total = sum(counts.values())
if verbose >= 1:
print(' Class -> Counts -> Percent')
sorted_keys = sorted(counts.keys())
for cls in sorted_keys:
print("%12s: % 7d -> % 5.1f%%" % (cls, counts[cls], counts[cls]/total*100))
if type(pd.Series(counts).idxmin())==str:
return pd.Series(counts).idxmin()
else:
return int(pd.Series(counts).idxmin())
###############################################################################
#####################################################################
##### REGRESSION CHARTS AND METRICS ARE PRINTED PLOTTED HERE
#####################################################################
import time
from itertools import cycle
def plot_regression_scatters(df, df2, num_vars, kind='scatter', plot_name=''):
"""
Great way to plot continuous variables fast. Just sent them in and it will take care of the rest!
"""
figsize = (10, 10)
colors = cycle('byrcmgkbyrcmgkbyrcmgkbyrcmgk')
num_vars_len = len(num_vars)
col = 2
start_time = time.time()
row = len(num_vars)
fig, ax = plt.subplots(row, col)
if col < 2:
fig.set_size_inches(min(15,8),row*5)
fig.subplots_adjust(hspace=0.5) ### This controls the space betwen rows
fig.subplots_adjust(wspace=0.3) ### This controls the space between columns
else:
fig.set_size_inches(min(col*10,20),row*5)
fig.subplots_adjust(hspace=0.3) ### This controls the space betwen rows
fig.subplots_adjust(wspace=0.3) ### This controls the space between columns
fig.suptitle('Regression Metrics Plots for %s Model' %plot_name, fontsize=20)
counter = 0
if row == 1:
ax = ax.reshape(-1,1).T
for k in np.arange(row):
row_color = next(colors)
for l in np.arange(col):
try:
if col==1:
if row == 1:
x = df[:]
y = df2[:]
else:
x = df[:,k]
y = df2[:,k]
ax1 = ax[k][l]
lineStart = x.min()
lineEnd = x.max()
ax1.scatter(x, y, color=row_color)
ax1.plot([lineStart, lineEnd], [lineStart, lineEnd], 'k-', color=next(colors))
ax1.set_xlabel('Actuals')
ax1.set_ylabel('Predicted')
ax1.set_title('Predicted vs Actuals Plot for Target = %s' %num_vars[k])
else:
if row == 1:
x = df[:]
y = df2[:]
else:
x = df[:,k]
y = df2[:,k]
lineStart = x.min()
lineEnd = x.max()
if l == 0:
ax1 = ax[k][l]
ax1.scatter(x, y, color = row_color)
ax1.plot([lineStart, lineEnd], [lineStart, lineEnd], 'k-', color = next(colors))
ax1.set_xlabel('Actuals')
ax1.set_ylabel('Predicted')
ax1.set_title('Predicted vs Actuals Plot for Target = %s' %num_vars[k])
else:
ax1 = ax[k][l]
try:
assert y.shape[1]
ax1.hist((x-y.ravel()), density=True,color = row_color)
except:
ax1.hist((x-y), density=True,color = row_color)
ax1.axvline(linewidth=2, color='k')
ax1.set_title('Residuals Plot for Target = %s' %num_vars[k])
except:
if col == 1:
counter += 1
else:
ax[k][l].set_title('No Predicted vs Actuals Plot for plot as %s is not numeric' %num_vars[k])
counter += 1
print('Regression Plots completed in %0.3f seconds' %(time.time()-start_time))
################################################################################
def plot_regression_residuals(y_test, y_test_preds, target, project_name, num_labels):
"""
Another set of plots for continuous variables.
"""
try:
if isinstance(target, str):
colors = cycle('byrcmgkbyrcmgkbyrcmgkbyrcmgk')
row_color = next(colors)
plt.figure(figsize=(15,6))
ax1 = plt.subplot(1, 2, 1)
residual = pd.Series((y_test - y_test_preds))
residual.plot(ax=ax1, color='b')
ax1.set_title('Residuals by each row in held-out set')
ax1.axhline(y=0.0, linewidth=2, color=next(colors))
pdf = save_valid_predictions(y_test, y_test_preds.ravel(), project_name, num_labels)
ax2 = plt.subplot(1, 2, 2)
pdf.plot(ax=ax2)
ax2.set_title('Actuals vs Predictions by each row in held-out set')
else:
pdf = save_valid_predictions(y_test, y_test_preds, project_name, num_labels)
plt.figure(figsize=(15,6))
colors = cycle('byrcmgkbyrcmgkbyrcmgkbyrcmgk')
for i in range(num_labels):
row_color = next(colors)
ax1 = plt.subplot(1, num_labels, i+1)
residual = pd.Series((y_test[:,i] - y_test_preds[:,i]))
residual.plot(ax=ax1, color=row_color)
ax1.set_title(f"Actuals_{i} (x-axis) vs. Residuals_{i} (y-axis)")
ax1.axhline(y=0.0, linewidth=2, color=next(colors))
plt.figure(figsize=(15, 6))
colors = cycle('byrcmgkbyrcmgkbyrcmgkbyrcmgk')
for j in range(num_labels):
row_color = next(colors)
pair_cols = ['actuals_'+str(j), 'predictions_'+str(j)]
ax2 = plt.subplot(1, num_labels, j+1)
pdf[pair_cols].plot(ax=ax2)
ax2.set_title('Actuals_{j} vs Predictions_{j} for each row ')
except:
print('Regression plots erroring. Continuing...')
#############################################################################################
import os
def save_valid_predictions(y_test, y_preds, project_name, num_labels):
if num_labels == 1:
pdf = | pd.DataFrame([y_test, y_preds]) | pandas.DataFrame |
"""util class for doing searches"""
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from typing import Dict, List
from operator import attrgetter
import pandas as pd
import numpy as np
from dfcx_scrapi.core import scrapi_base
from dfcx_scrapi.core import intents
from dfcx_scrapi.core import flows
from dfcx_scrapi.core import pages
from dfcx_scrapi.core import entity_types
from dfcx_scrapi.core import transition_route_groups
from google.cloud.dialogflowcx_v3beta1 import types
from google.oauth2 import service_account
# logging config
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
class SearchUtil(scrapi_base.ScrapiBase):
"""class for searching items"""
def __init__(
self,
creds_path: str = None,
creds_dict: Dict[str, str] = None,
creds: service_account.Credentials = None,
scope=False,
agent_id: str = None,
):
super().__init__(
creds_path=creds_path,
creds_dict=creds_dict,
creds=creds,
scope=scope,
)
logging.info("create dfcx creds %s", creds_path)
self.intents = intents.Intents(
creds_path=creds_path, creds_dict=creds_dict
)
self.entities = entity_types.EntityTypes(
creds_path=creds_path, creds_dict=creds_dict
)
self.flows = flows.Flows(creds_path=creds_path, creds_dict=creds_dict)
self.pages = pages.Pages(creds_path=creds_path, creds_dict=creds_dict)
self.route_groups = transition_route_groups.TransitionRouteGroups(
creds_path, creds_dict
)
self.creds_path = creds_path
self.intents_map = None
if agent_id:
self.agent_id = agent_id
self.flow_map = self.flows.get_flows_map(
agent_id=agent_id, reverse=True
)
self.intents_map = self.intents.get_intents_map(agent_id)
self.client_options = self._set_region(agent_id)
@staticmethod
def get_route_df(page_df: pd.DataFrame, route_group_df: pd.DataFrame):
"""Gets a route dataframe from page- and route-group-dataframes.
Args:
page_df: dataframe with required columns flow_name, page_name,
routes (where routes are types.TransitionRoute) such as
from get_page_df().
route_group_df: dataframe woth required columns flow_name,
page_name, route_group_name, routes (where routes are
types.TransitionRoute) such as from get_route_group_df().
Returns:
routes dataframe with columns:
flow_name,
page_name,
route_group_name,
intent,
condition,
trigger_fulfillment
"""
routes_df = (
pd.concat(
[page_df[["flow_name", "page_name", "routes"]], route_group_df],
ignore_index=True,
)
.explode("routes", ignore_index=True)
.dropna(subset=["routes"], axis="index")
.assign(
intent=lambda df: df.routes.apply(attrgetter("intent")),
condition=lambda df: df.routes.apply(attrgetter("condition")),
trigger_fulfillment=lambda df: df.routes.apply(
attrgetter("trigger_fulfillment")
),
)
.drop(columns="routes")
)
return routes_df
@staticmethod
def get_param_df(page_df: pd.DataFrame):
"""Gets a parameter dataframe from an input page dataframe.
Args:
page_df: dataframe with minimum columns flow_name, page_name,
parameters (types.Form.Parameter), such as from get_page_df().
Returns:
dataframe with columns:
flow_name,
page_name,
parameter_name,
reprompt_event_handlers,
initial_prompt_fulfillment
"""
param_df = (
page_df[["flow_name", "page_name", "parameters"]]
.explode("parameters", ignore_index=True)
.dropna(subset=["parameters"], axis="index")
.assign(
parameter_name=lambda df: df.parameters.apply(
attrgetter("display_name")
),
reprompt_event_handlers=lambda df: df.parameters.apply(
attrgetter("fill_behavior.reprompt_event_handlers")
),
initial_prompt_fulfillment=lambda df: df.parameters.apply(
attrgetter("fill_behavior.initial_prompt_fulfillment")
),
)
.drop(columns="parameters")
)
return param_df
@staticmethod
def get_event_handler_df(page_df, param_reprompt_event_handler_df):
"""Gets an event handler dataframe from page- and parameter-dataframes.
Args:
page_df: dataframe with minimum columns flow_name, page_name,
event_handlers (types.EventHandler), such as from
get_page_df().
param_reprompt_event_handler_df: dataframe with minimum columns
flow_name, page_name, parameter_name, reprompt_event_handlers
(types.EventHandler), such as from get_param_df().
Returns:
dataframe with columns: flow_name, page_name, parameter_name, event,
trigger_fulfillment.
"""
event_handler_df = (
pd.concat(
[
page_df[["flow_name", "page_name", "event_handlers"]],
param_reprompt_event_handler_df.rename(
columns={"reprompt_event_handlers": "event_handlers"}
),
],
ignore_index=True,
)
.explode("event_handlers", ignore_index=True)
.dropna(subset=["event_handlers"], axis="index")
.assign(
event=lambda df: df.event_handlers.apply(attrgetter("event")),
trigger_fulfillment=lambda df: df.event_handlers.apply(
attrgetter("trigger_fulfillment")
),
)
.drop(columns="event_handlers")
)
return event_handler_df
@staticmethod
def _get_msg_type(message: types.ResponseMessage):
"""Gets the response message type for a message from a fulfillment.
Args:
message: message structure from a fulfillment.
Returns:
type in {np.nan, text, custom_payload, play_audio,
live_agent_handoff, conversation_success, output_audio_text}.
"""
if pd.isna(message):
value = np.nan
elif isinstance(message, types.ResponseMessage) and (
str(message) == ""
):
value = np.nan
elif "text" in message:
value = "text"
elif "payload" in message:
value = "custom_payload"
elif "play_audio" in message:
value = "play_audio"
elif "live_agent_handoff" in message:
value = "live_agent_handoff"
elif "conversation_success" in message:
value = "conversation_success"
elif "output_audio_text" in message:
value = "output_audio_text"
else:
value = "unexpected value"
return value
@staticmethod
def _gather_text_responses(text_message: types.ResponseMessage.Text):
"""Flattens a Dialogflow CX text structure.
Args:
text_message: text such as is inside types.ResponseMessage.
Returns:
flattened text in a string.
"""
flat_texts = "\n".join(text_message.text)
return flat_texts
def _format_response_message(
self, message: types.ResponseMessage, message_format: str
):
"""Conditionally unpacks message formats.
Args:
message: structure such as from a fulfillment.
message_format: 'dict' or 'human-readable'
Returns:
unpacked contents of message.
"""
if pd.isna(message):
contents = np.nan
elif isinstance(message, types.ResponseMessage) and (
str(message) == ""
):
contents = np.nan
elif "payload" in message:
c = self.recurse_proto_marshal_to_dict(message.payload)
contents = {"payload": c} if (message_format == "dict") else c
elif "play_audio" in message:
c = {"audio_uri": message.play_audio.audio_uri}
contents = {"play_audio": c} if (message_format == "dict") else c
elif "live_agent_handoff" in message:
c = self.recurse_proto_marshal_to_dict(
message.live_agent_handoff.metadata
)
contents = (
{"live_agent_handoff": c} if (message_format == "dict") else c
)
elif "conversation_success" in message:
c = self.recurse_proto_marshal_to_dict(
message.conversation_success.metadata
)
contents = (
{"conversation_success": c} if (message_format == "dict") else c
)
elif "output_audio_text" in message:
c = message.output_audio_text.text
contents = (
{"output_audio_text": c} if (message_format == "dict") else c
)
elif "text" in message:
c = SearchUtil._gather_text_responses(message.text)
contents = {"text": c} if (message_format == "dict") else c
else:
contents = message
return contents
def _find_true_routes_flow_level(self, flow_display_name, flow_map):
flow_id = flow_map[flow_display_name]
start_page = self.flows.get_flow(flow_id) # pylint: disable=W0612
other_pages = self.pages.list_pages(flow_id)
# Start page - no entry fulfillment
pages_dataframe = pd.DataFrame()
for page in other_pages:
display_name = page.display_name
webhook = False
if page.entry_fulfillment.webhook:
webhook = True
has_parameters = False
if page.form.parameters:
has_parameters = True
has_true_route = False
has_true_final_route = False
for route in page.transition_routes:
if route.condition == "true":
has_true_route = True
if route.condition == '$page.params.status = "FINAL" AND true':
has_true_final_route = True
page_dataframe = pd.DataFrame(
columns=[
"flow_display_name",
"page_display_name",
"webhook_entry_fullfillment",
"has_parameters",
"has_true_route",
"has_true_and_final_route",
],
data=[
[
flow_display_name,
display_name,
webhook,
has_parameters,
has_true_route,
has_true_final_route,
]
],
)
pages_dataframe = pages_dataframe.append(page_dataframe)
return pages_dataframe
# Flows - event handlers
def _flow_level_handlers(self):
flows_in_agent = self.flows.list_flows(self.agent_id)
flow_event_handler_data = pd.DataFrame()
for flow in flows_in_agent:
flow_level_event_handlers = flow.event_handlers
flow_level_event_handlers_dataframe = pd.DataFrame()
for handler in flow_level_event_handlers:
flow_level_event_handlers_dataframe = (
flow_level_event_handlers_dataframe.append(
pd.DataFrame(
columns=[
"flow",
"event",
"messages",
"transition_flow",
"transition_page",
],
data=[
[
flow.display_name,
handler.event,
handler.trigger_fulfillment.messages,
handler.target_flow,
handler.target_page,
]
],
)
)
)
flow_event_handler_data = flow_event_handler_data.append(
flow_level_event_handlers_dataframe
)
return flow_event_handler_data
# Pages - event handlers
def _page_level_handlers(self):
page_level_event_handlers_all_dataframe = pd.DataFrame()
flow_map = self.flows.get_flows_map(self.agent_id)
for flow_ in flow_map.keys():
pages_in_flow = self.pages.list_pages(flow_)
for page in pages_in_flow:
page_level_event_handlers = page.event_handlers
page_level_event_handlers_dataframe = pd.DataFrame()
for handler in page_level_event_handlers:
page_level_event_handlers_dataframe = (
page_level_event_handlers_dataframe.append(
pd.DataFrame(
columns=[
"flow",
"page",
"event",
"messages",
"transition_flow",
"transition_page",
],
data=[
[
flow_map[flow_],
page.display_name,
handler.event,
handler.trigger_fulfillment.messages,
handler.target_flow,
handler.target_page,
]
],
)
)
)
page_level_event_handlers_all_dataframe = (
page_level_event_handlers_all_dataframe.append(
page_level_event_handlers_dataframe
)
)
return page_level_event_handlers_all_dataframe
# Parameters - event handlers
def _parameter_level_handlers(self):
parameter_level_event_handlers_all_dataframe = pd.DataFrame()
flow_map = self.flows.get_flows_map(self.agent_id)
for flow_ in flow_map.keys():
pages_in_flow = self.pages.list_pages(flow_)
for page in pages_in_flow:
parameters = page.form.parameters
for parameter in parameters:
parameter_event_handlers = (
parameter.fill_behavior.reprompt_event_handlers
)
param_lvl_event_df = pd.DataFrame()
for handler in parameter_event_handlers:
param_lvl_event_df = param_lvl_event_df.append(
pd.DataFrame(
columns=[
"flow",
"page",
"parameter",
"event",
"messages",
"transition_flow",
"transition_page",
],
data=[
[
flow_map[flow_],
page.display_name,
parameter.display_name,
handler.event,
handler.trigger_fulfillment.messages,
handler.target_flow,
handler.target_page,
]
],
)
)
parameter_level_event_handlers_all_dataframe = (
parameter_level_event_handlers_all_dataframe.append(
param_lvl_event_df
)
)
return parameter_level_event_handlers_all_dataframe
def find_list_parameters(self, agent_id):
"""This method extracts Parameters set at a page level that are
designated as "lists".
Page level parameters are tied to Entity Types and can be returned
as String or List types. If the user selects "list" at the page
level, the Entity Type will be returned with "is_list: True". This
function will allow the user to provide an Agent ID and will return
all instances of parameters being used as lists on pages.
Args:
- agent_id, the Agent ID string in the following format:
projects/<project_id>/locations/<location_id>/agents/<agent_id>
Returns:
- params_map, a Dict of parameter names and Pages they belong to
"""
# entities = self.dfcx.list_entity_types(agent_id)
flows_map = self.flows.get_flows_map(agent_id)
params_list = []
for flow in flows_map.keys():
temp_pages = self.pages.list_pages(flow)
for page in temp_pages:
for param in page.form.parameters:
if param.is_list:
params_list.append(param.display_name)
return params_list
def search_conditionals_page(self, page_id, search):
"""search page for an exact string in conditional routes
Args:
- page_id, the formatted CX Page ID to use
- search, string to search
Returns:
- locator, dataframe of the results of where this string was found
"""
locator = pd.DataFrame()
page = self.pages.get_page(page_id=page_id)
i = 1
for route in page.transition_routes:
if search.lower() in route.condition.lower():
iter_frame = pd.DataFrame(
columns=["resource_id", "condition", "route_id"],
data=[[page_id, route.condition, i]],
)
locator = locator.append(iter_frame)
i += 1
return locator
def search_conditionals_flow(self, flow_id, search):
"""search flow for an exact string in conditional routes
Args:
- flow_id, the formatted CX Flow ID to use
- search, string to search
Returns:
- locator, dataframe of the results of where this string was found
"""
locator = pd.DataFrame()
flow = self.flows.get_flow(flow_id=flow_id)
i = 1
for route in flow.transition_routes:
if search.lower() in route.condition.lower():
iter_frame = pd.DataFrame(
columns=["resource_id", "condition", "route_id"],
data=[[flow_id, route.condition, i]],
)
locator = locator.append(iter_frame)
i += 1
return locator
def search_conditionals(
self,
search,
agent_id,
flow_name=None,
page_name=None,
flag_search_all=False,
):
"""This is the master function where a user can search across
all pages in a flow, an entire agent etc.
Search conditionals for an exact string in conditional routes.
Args:
- search, string to search
- agent_id, the formatted CX Agent ID to use
- flow_name, (optional) the display name of the flow to search
- page_name, (optional) the display name of the page to search
- flag_search_all, (optional)
When set to True:
-if just an agent_id then entire agent is searched
-if just an agent_id and flow_name are specified
an entire flow is searched
-if an agent_id, flow_name and page_name are specified
a page is searched
When set to False:
-if just an agent_id and flow_name are specified
only the start page of the flow is searched
-if an agent_id, flow_name and page_name are specified
a page is searched
Returns:
- locator, dataframe of the results of where this string was found
"""
if page_name:
try:
flows_map = self.flows.get_flows_map(
agent_id=agent_id, reverse=True
)
# check - maybe other error types here
except ValueError:
logging.error(
"%s is not a valid flow_name for agent %s",
flow_name,
agent_id,
)
try:
pages_map = self.pages.get_pages_map(
flow_id=flows_map[flow_name], reverse=True
)
return self.search_conditionals_page(
page_id=pages_map[page_name], search=search
)
except ValueError:
logging.error(
"%s is not a valid page_name for flow %s in agent %s",
page_name,
flow_name,
agent_id,
)
if flow_name:
locator = pd.DataFrame()
try:
flows_map = self.flows.get_flows_map(
agent_id=agent_id, reverse=True
)
flow_search = self.search_conditionals_flow(
flow_id=flows_map[flow_name], search=search
)
flow_search.insert(0, "resource_name", flow_name)
flow_search.insert(0, "resource_type", "flow")
locator = locator.append(flow_search)
except ValueError:
logging.error(
"%s is not a valid flow_name for agent %s",
flow_name,
agent_id,
)
if flag_search_all:
pages_map = self.pages.get_pages_map(
flow_id=flows_map[flow_name], reverse=True
)
for page in pages_map:
page_search = self.search_conditionals_page(
page_id=pages_map[page], search=search
)
time.sleep(0.5)
page_search.insert(0, "resource_name", page)
page_search.insert(0, "resource_type", "page")
locator = locator.append(page_search)
return locator
if flow_name is None and page_name is None and flag_search_all is True:
locator = pd.DataFrame()
flows_map = self.flows.get_flows_map(
agent_id=agent_id, reverse=True
)
for flow in flows_map:
flow_search = self.search_conditionals_flow(
flow_id=flows_map[flow], search=search
)
flow_search.insert(0, "resource_name", flow)
flow_search.insert(0, "resource_type", "flow")
locator = locator.append(flow_search)
pages_map = self.pages.get_pages_map(
flow_id=flows_map[flow], reverse=True
)
for page in pages_map:
page_search = self.search_conditionals_page(
page_id=pages_map[page], search=search
)
time.sleep(0.5)
page_search.insert(0, "resource_name", page)
page_search.insert(0, "resource_type", "page")
locator = locator.append(page_search)
return locator
# not found
return None
def find_true_routes(self, agent_id: str = None):
"""This method extracts data to see if routes with no parameters have a
true route or pages with parameters have a true route +
page.params.status = "Final" route. Having these routes ensure a user
can escape this page no matter what.
Args:
- agent_id: The properly formatted CX Agent ID
Returns:
- agent_results: dataframe with:
flow_display_name: display name of the associated page
page_display_name: display name of the page with the associated
data
webhook_entry_fulfillments: True if a page has a webhook on entry
else False
has_parameters: True if a page has parameters else False
has_true_route: True if a page has a true route else False
has_true_and_final_route: True if a page has a route with true
+ page.params.status=Final else False
"""
if not agent_id:
agent_id = self.agent_id
agent_results = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import itertools
import math
import random
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Type
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from tsbench.analysis.utils import (
loocv_split,
num_fitting_processes,
run_parallel,
)
from tsbench.config import (
Config,
DATASET_REGISTRY,
DatasetConfig,
ModelConfig,
TrainConfig,
)
from tsbench.config.model.models import SeasonalNaiveModelConfig
from tsbench.constants import DEFAULT_DATA_PATH
from tsbench.evaluations.metrics.performance import Metric, Performance
from tsbench.evaluations.tracking import ModelTracker
from tsbench.forecasts import (
ensemble_forecasts,
EnsembleWeighting,
evaluate_forecasts,
Evaluation,
)
class EnsembleAnalyzer:
"""
The ensemble analyzer allows for evaluating the performance of ensembles across datasets. The
analysis is run in parallel and should, thus, not be used in a Jupyter notebook. Instead,
consider using the `tsbench` CLI.
"""
def __init__(
self,
tracker: ModelTracker,
ensemble_size: Optional[int] = 10,
ensemble_weighting: EnsembleWeighting = "uniform",
config_class: Optional[Type[ModelConfig]] = None,
):
"""
Args:
tracker: The tracker from which to obtain pretrained models and forecasts.
ensemble_size: The number of models to use when building an ensemble. If not provided,
uses as many models as possible.
ensemble_weighting: The type of ensemble weighting to use for averaging forecasts.
config_class: The class of models to ensemble. If this is provided, fewer models than
the given ensemble size might be selected.
"""
self.tracker = tracker
self.ensemble_size = ensemble_size
self.ensemble_weighting: EnsembleWeighting = ensemble_weighting
self.config_class = config_class
def run(self) -> Tuple[pd.DataFrame, Dict[str, List[ModelConfig]]]:
"""
Runs the evaluation on the data provided via the tracker. The data obtained from the
tracker is partitioned by the dataset and we run "grouped LOOCV" to compute performance
metrics on datasets. Metrics on each dataset are then returned as data frame.
Returns:
The metrics on the individual datasets.
The model choices for each dataset.
"""
results = run_parallel(
self._run_on_dataset,
data=list(loocv_split(self.tracker)),
num_processes=num_fitting_processes(),
)
performances = [r[0] for r in results]
member_mapping = {k: v for r in results for k, v in r[1].items()}
df = | pd.concat(performances) | pandas.concat |
import pandas as pd
import numpy as np
import pybedtools
import csv
import gzip
##deprecated by restrict_mutations_by_bed_efficient
def restrict_mutations_by_bed(df_mut, df_bed, unique=True, remove_X=True, replace_cols=False):
""" Restrict mutations to only those that overlap elements in a bed file.
"""
# df_mut = pd.read_table(f_mut, header=None, low_memory=False)
# df_bed = pd.read_table(f_bed, header=None, low_memory=False)
if remove_X:
df_mut = df_mut[df_mut.iloc[:, 0] != "X"]
df_bed = df_bed[df_bed.iloc[:, 0] != "X"]
bed_mut = pybedtools.BedTool.from_dataframe(df_mut)
bed_bed = pybedtools.BedTool.from_dataframe(df_bed)
bed_inter = bed_mut.intersect(bed_bed)
df_inter = bed_inter.to_dataframe()
if unique:
df_inter = df_inter.drop_duplicates()
if replace_cols:
# df_inter.columns = ['CHROM', 'START', 'END', 'REF', 'ALT', 'SAMPLE', 'GENE', 'ANNOT', 'MUT', 'CONTEXT']
df_inter.columns = df_mut.columns
return df_inter
def restrict_mutations_by_bed_efficient(f_mut, f_bed, bed12=False, drop_duplicates=False, drop_sex=False, replace_cols=False):
""" Restrict mutations to only those that overlap elements in a bed file.
"""
bed_mut = pybedtools.BedTool(f_mut)
bed_bed = pybedtools.BedTool(f_bed)
if bed12:
bed_bed = bed_bed.bed12tobed6()
bed_inter = bed_mut.intersect(bed_bed, wa=True)
df_mut = read_mutation_file(bed_inter.fn, drop_duplicates=drop_duplicates, drop_sex=drop_sex)
return df_mut
def read_mutation_file(path, drop_sex=True, drop_duplicates=False, unique_indels=True):
# cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'SAMPLE', 'ANNOT', 'MUT_TYPE', 'CONTEXT']
try:
with open(path) as f:
reader = csv.reader(f, delimiter='\t', skipinitialspace=True)
first_row = next(reader)
except UnicodeDecodeError: ## file is probably gzipped
with gzip.open(path, 'rt') as f:
reader = csv.reader(f, delimiter='\t', skipinitialspace=True)
first_row = next(reader)
num_cols = len(first_row)
if num_cols == 5:
cols = ['CHROM', 'POS', 'REF', 'ALT', 'SAMPLE']
dtype = dict(CHROM=str, POS=int, REF=str, ALT=str, SAMPLE=str)
elif num_cols == 6:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'SAMPLE']
dtype = dict(CHROM=str, START=int, END=int, REF=str, ALT=str, SAMPLE=str)
elif num_cols == 7:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'SAMPLE', 'ANNOT']
dtype = dict(CHROM=str, START=int, END=int, REF=str, ALT=str, SAMPLE=str, ANNOT=str)
elif num_cols == 8:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'SAMPLE', 'GENE', 'ANNOT']
dtype = dict(CHROM=str, START=int, END=int, REF=str, ALT=str, SAMPLE=str, GENE=str, ANNOT=str)
elif num_cols == 9:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'SAMPLE', 'ANNOT', 'MUT_TYPE', 'CONTEXT']
dtype = dict(CHROM=str, START=int, END=int, REF=str, ALT=str, SAMPLE=str, ANNOT=str, MUT_TYPE=str, CONTEXT=str)
elif num_cols == 10:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'SAMPLE', 'GENE', 'ANNOT', 'MUT_TYPE', 'CONTEXT']
dtype = dict(CHROM=str, START=int, END=int, REF=str, ALT=str, SAMPLE=str, GENE=str, ANNOT=str, MUT_TYPE=str, CONTEXT=str)
elif num_cols == 11:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'SAMPLE', 'GENE', 'ANNOT', 'MUT_TYPE', 'CONTEXT', 'STRAND']
dtype = dict(CHROM=str, START=int, END=int, REF=str, ALT=str, SAMPLE=str, GENE=str, ANNOT=str, MUT_TYPE=str, CONTEXT=str, STRAND=str)
df = pd.read_csv(path, sep="\t", low_memory=False, names=cols, dtype=dtype)
# df.columns = cols
# df.CHROM = df.CHROM.astype(str)
if drop_sex:
if set(df.CHROM.unique()) - set([str(i) for i in range(1, 23)]):
print('Restricting to autosomes')
df = df[df.CHROM.isin([str(i) for i in range(1, 23)])]
df['CHROM'] = df.CHROM.astype(int)
# else:
# dtype['CHROM'] = str
# df = df.astype(dtype)
if drop_duplicates:
df = drop_duplicate_mutations(df)
if unique_indels:
df = get_unique_indels(df)
return df
def drop_duplicate_mutations(df_mut):
df_dedup = df_mut.drop_duplicates(['CHROM', 'START', 'END', 'REF', 'ALT', 'SAMPLE'])
return df_dedup
def get_unique_indels(df_mut):
df_indel = df_mut[df_mut.ANNOT == 'INDEL']
df_snv = df_mut[df_mut.ANNOT != 'INDEL']
df_indel = df_indel.drop_duplicates(subset=['CHROM', 'START', 'END', 'REF', 'ALT', 'GENE'])
return pd.concat([df_snv, df_indel])
def tabulate_nonc_mutations_split(f_nonc_bed, f_mut):
try:
# df_nonc = pd.read_csv(f_nonc_bed, sep= '\t', header=None, low_memory = False,
# names=None)
df_nonc = pd.read_table(f_nonc_bed, names=['CHROM', 'START', 'END', "ELT", "SCORE", "STRAND", 'thickStart', 'thickEnd', 'rgb', 'blockCount', 'blockSizes', 'blockStarts'], low_memory=False)
df_nonc.CHROM = df_nonc.CHROM.astype(str)
df_nonc = df_nonc[df_nonc.CHROM.isin([str(c) for c in range(1, 23)])]
df_nonc.CHROM = df_nonc.CHROM.astype(int)
except:
raise Exception("ERROR: failed to load {}. make sure the bed file is in the correct bed12 format".format(df_nonc))
df_mut = read_mutation_file(f_mut, drop_duplicates=True)
assert ('GENE' in df_mut.columns and 'ANNOT' in df_mut.columns and'MUT_TYPE' in df_mut.columns)
# try:
# df_mut = pd.read_csv(f_mut,sep='\t', low_memory=False, index_col=False,
# names=['CHROM', 'START', 'END', 'REF', 'ALT', 'ID', 'GENE', 'ANNOT', 'MUT','CONTEXT'])
# except:
# raise Exception("ERROR: failed to load {}. make sure mut file is properly processed".format(df_mut))
if 'chr' in str(df_nonc.CHROM[0]):
df_nonc.CHROM = df_nonc.CHROM.map(lambda x: x.lstrip('chr'))
bed_mut = pybedtools.BedTool.from_dataframe(df_mut)
bed_bed = pybedtools.BedTool.from_dataframe(df_nonc)
bed_split = bed_bed.bed6()
bed_inter = bed_split.intersect(bed_mut, wao=True)
df_inter = bed_inter.to_dataframe(header=None, names=np.arange(bed_inter.field_count()))
df_inter = df_inter.drop(columns = [4,6,7,8,9,10,12,13,14,15])
# df_split = df_inter.groupby([0,1,2,3,5]).agg({11:lambda x:len(set(x).difference(set('.'))), 16:np.sum}).reset_index()
# df_split.columns = ['CHROM', 'START', 'END', 'ELT', 'STRAND','OBS_SAMPLES','OBS_MUT']
df_whole = pd.pivot_table(df_inter, values = [1,2,11,16], index = [0,3,5], aggfunc={16:np.sum, 1:lambda x: sorted(set(x)), 2:lambda x: sorted(set(x)),11:lambda x:len(set(x).difference(set('.')))}).reset_index()
df_whole.columns = ['CHROM', 'ELT', 'STRAND','BLOCK_STARTS', 'BLOCK_ENDS', 'OBS_SAMPLES', 'OBS_MUT']
return None, df_whole
# return df_split, df_whole
def tabulate_mutations_in_element(f_mut, f_elt_bed, bed12=False, drop_duplicates=False, all_elements=False,
max_muts_per_sample=1e9, max_muts_per_elt_per_sample=3e9, return_blacklist=False):
df_cnt = tabulate_muts_per_sample_per_element(f_mut, f_elt_bed, bed12=bed12, drop_duplicates=drop_duplicates)
df_cnt.rename({'SAMPLE': 'OBS_SAMPLES'}, axis=1, inplace=True)
# Remove hypermutated samples and cap total mutations from a sample in an element
# *if there are any samples
if len(df_cnt) > 0:
df_cnt_sample = df_cnt.pivot_table(index='OBS_SAMPLES', values='OBS_MUT', aggfunc=np.sum)
blacklist = df_cnt_sample[df_cnt_sample.OBS_MUT > max_muts_per_sample].index
df_cnt = df_cnt[~df_cnt.OBS_SAMPLES.isin(blacklist)]
else:
blacklist = []
df_cnt.loc[df_cnt.OBS_SNV > max_muts_per_elt_per_sample, 'OBS_SNV'] = max_muts_per_elt_per_sample
df_cnt.loc[df_cnt.OBS_INDEL > max_muts_per_elt_per_sample, 'OBS_INDEL'] = max_muts_per_elt_per_sample
df_summary = df_cnt.pivot_table(index='ELT',
values=['OBS_SAMPLES', 'OBS_SNV', 'OBS_INDEL'],
aggfunc={'OBS_SAMPLES': len, 'OBS_SNV': np.sum, 'OBS_INDEL': np.sum})
if len(df_summary) == 0:
df_summary = | pd.DataFrame({'OBS_SAMPLES':[], 'OBS_SNV':[], 'OBS_INDEL':[], 'ELT':[]}) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
| pd.Timestamp("2015-01-07") | pandas.Timestamp |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = | DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]}) | pandas.core.api.DataFrame |
#
# Set up Jupyter MAGIC commands "sql".
# %sql will return results from a DB2 select statement or execute a DB2 command
#
# IBM 2021: <NAME>
# Version 2021-11-26
#
from __future__ import print_function
import multiprocessing
from IPython.display import HTML as pHTML, Image as pImage, display as pdisplay, Javascript as Javascript
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic, needs_local_scope)
import ibm_db
import pandas
import ibm_db_dbi
import json
import getpass
import pickle
import time
import re
import warnings
import matplotlib
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
_settings = {
"maxrows" : 10,
"maxgrid" : 5,
"display" : "PANDAS",
"threads" : 0,
"database" : "",
"hostname" : "localhost",
"port" : "50000",
"protocol" : "TCPIP",
"uid" : "DB2INST1",
"pwd" : "password",
"ssl" : "",
"passthru" : ""
}
_environment = {
"jupyter" : True,
"qgrid" : True
}
_display = {
'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': False,
'defaultColumnWidth': 150,
'rowHeight': 28,
'enableColumnReorder': False,
'enableTextSelectionOnCells': True,
'editable': False,
'autoEdit': False,
'explicitInitialization': True,
'maxVisibleRows': 5,
'minVisibleRows': 5,
'sortable': True,
'filterable': False,
'highlightSelectedCell': False,
'highlightSelectedRow': True
}
# Db2 and Pandas data types
_db2types = ["unknown",
"string",
"smallint",
"int",
"bigint",
"real",
"float",
"decfloat16",
"decfloat34",
"decimal",
"boolean",
"clob",
"blob",
"xml",
"date",
"time",
"timestamp"]
_pdtypes = ["object",
"string",
"Int16",
"Int32",
"Int64",
"float32",
"float64",
"float64",
"float64",
"float64",
"boolean",
"string",
"object",
"string",
"string",
"string",
"datetime64"]
# Connection settings for statements
_connected = False
_hdbc = None
_hdbi = None
_stmt = []
_stmtID = []
_stmtSQL = []
_vars = {}
_macros = {}
_flags = []
_debug = False
# Db2 Error Messages and Codes
sqlcode = 0
sqlstate = "0"
sqlerror = ""
sqlelapsed = 0
# Check to see if QGrid is installed
try:
import qgrid
qgrid.set_defaults(grid_options=_display)
except:
_environment['qgrid'] = False
if (_environment['qgrid'] == False):
print("Warning: QGRID is unavailable for displaying results in scrollable windows.")
print(" Install QGRID if you want to enable scrolling of result sets.")
# Check if we are running in iPython or Jupyter
try:
if (get_ipython().config == {}):
_environment['jupyter'] = False
_environment['qgrid'] = False
else:
_environment['jupyter'] = True
except:
_environment['jupyter'] = False
_environment['qgrid'] = False
# Check if pandas supports data types in the data frame - Introduced in 1.3 of pandas
_pandas_dtype = False
try:
_vrm = pandas.__version__.split(".")
_version = 0
_release = 0
_modlevel = 0
if (len(_vrm) >= 1):
_version = int(_vrm[0])
if (len(_vrm) >= 2):
_release = int(_vrm[1])
if (len(_vrm) >= 3):
_modlevel = int(_vrm[2])
if (_version >= 1 and _release >= 3):
_pandas_dtype = True
else:
_pandas_dtype = False
except:
_pandas_dtype = False
if (_pandas_dtype == False):
print("Warning: PANDAS level does not support Db2 typing which will can increase memory usage.")
print(" Install PANDAS version 1.3+ for more efficient dataframe creation.")
# Check if we have parallism available
_parallel = False
try:
import multiprocessing as mp
from multiprocessing.sharedctypes import Value, Array
_parallel = True
except:
_parallel = False
if (_parallel == False):
print("Warning: Parallelism is unavailable and THREADS option will be ignored.")
print(" Install MULTIPROCESSING if you want allow multiple SQL threads to run in parallel.")
_settings["threads"] = 0
#
# Set Options for the Db2 Magic Commands
#
def setOptions(inSQL):
global _settings, _display
cParms = inSQL.split()
cnt = 0
if (len(cParms) == 1):
print("(MAXROWS) Maximum number of rows displayed: " + str(_settings.get("maxrows",10)))
print("(MAXGRID) Maximum grid display size: " + str(_settings.get("maxgrid",5)))
print("(DISPLAY) Use PANDAS or GRID display format for output: " + _settings.get("display","PANDAS"))
print("(THREADS) Maximum number of threads to use when running SQL: " + str(_settings.get("threads",0)))
return
while cnt < len(cParms):
if cParms[cnt][0] == "?":
print("%sql OPTION MAXROWS n MAXGRID n DISPLAY n THREADS n")
print("LIST - List the current option settings")
print("MAXROWS n - The maximum number of rows displayed when returning results")
print("MAXGRID n - Maximum size of a scrollable GRID window")
print("THREADS n - Maximum number of parallel threads to use when running SQL")
return
if cParms[cnt].upper() == 'MAXROWS':
if cnt+1 < len(cParms):
try:
_settings["maxrows"] = int(cParms[cnt+1])
if (_settings["maxrows"] > 100 or _settings["maxrows"] <= 0):
_settings["maxrows"] = 100
except Exception as err:
errormsg("Invalid MAXROWS value provided.")
pass
cnt = cnt + 1
else:
errormsg("No maximum rows specified for the MAXROWS option.")
return
elif cParms[cnt].upper() == 'MAXGRID':
if cnt+1 < len(cParms):
try:
maxgrid = int(cParms[cnt+1])
if (maxgrid <= 5): # Minimum window size is 5
maxgrid = 5
_display["maxVisibleRows"] = int(maxgrid)
try:
qgrid.set_defaults(grid_options=_display)
_settings["maxgrid"] = maxgrid
except:
_environment['qgrid'] = False
except Exception as err:
errormsg("Invalid MAXGRID value provided.")
pass
cnt = cnt + 1
else:
errormsg("No maximum rows specified for the MAXGRID option.")
return
elif cParms[cnt].upper() == 'DISPLAY':
if cnt+1 < len(cParms):
if (cParms[cnt+1].upper() == 'GRID'):
_settings["display"] = 'GRID'
elif (cParms[cnt+1].upper() == 'PANDAS'):
_settings["display"] = 'PANDAS'
else:
errormsg("Invalid DISPLAY value provided.")
cnt = cnt + 1
else:
errormsg("No value provided for the DISPLAY option.")
return
elif cParms[cnt].upper() == 'THREADS':
if cnt+1 < len(cParms):
try:
threads = int(cParms[cnt+1])
if (threads < 0):
threads = 0
elif (threads > 12):
threads = 12
else:
pass
_settings["threads"] = threads
except Exception as err:
errormsg("Invalid THREADS value provided.")
pass
cnt = cnt + 1
else:
errormsg("No thread count specified for the THREADS option.")
return
elif (cParms[cnt].upper() == 'LIST'):
print("(MAXROWS) Maximum number of rows displayed: " + str(_settings.get("maxrows",10)))
print("(MAXGRID) Maximum grid display size: " + str(_settings.get("maxgrid",5)))
print("(DISPLAY) Use PANDAS or GRID display format for output: " + _settings.get("display","PANDAS"))
print("(THREADS) Maximum number of threads to use when running SQL: " + str(_settings.get("threads",0)))
return
else:
cnt = cnt + 1
save_settings()
print("(MAXROWS) Maximum number of rows displayed: " + str(_settings.get("maxrows",10)))
print("(MAXGRID) Maximum grid display size: " + str(_settings.get("maxgrid",5)))
print("(DISPLAY) Use PANDAS or GRID display format for output: " + _settings.get("display","PANDAS"))
print("(THREADS) Maximum number of threads to use when running SQL: " + str(_settings.get("threads",0)))
return
#
# Display help (link to documentation)
#
def sqlhelp():
global _environment
print("Db2 Magic Documentation: https://ibm.github.io/db2-jupyter/")
return
# Split port and IP addresses
def split_string(in_port,splitter=":"):
# Split input into an IP address and Port number
global _settings
checkports = in_port.split(splitter)
ip = checkports[0]
if (len(checkports) > 1):
port = checkports[1]
else:
port = None
return ip, port
# Parse the CONNECT statement and execute if possible
def parseConnect(inSQL,local_ns):
global _settings, _connected
_connected = False
cParms = inSQL.split()
cnt = 0
_settings["ssl"] = ""
while cnt < len(cParms):
if cParms[cnt].upper() == 'TO':
if cnt+1 < len(cParms):
_settings["database"] = cParms[cnt+1].upper()
cnt = cnt + 1
else:
errormsg("No database specified in the CONNECT statement")
return
elif cParms[cnt].upper() == "SSL":
if cnt+1 < len(cParms):
if (cParms[cnt+1].upper() in ("ON","TRUE")):
_settings["ssl"] = "SECURITY=SSL;"
elif (cParms[cnt+1].upper() in ("OFF","FALSE")):
_settings["ssl"] = ""
elif (cParms[cnt+1] != ""):
cert = cParms[cnt+1]
_settings["ssl"] = "Security=SSL;SSLServerCertificate={cert};"
cnt = cnt + 1
else:
errormsg("No setting provided for the SSL option (ON | OFF | TRUE | FALSE | certifcate)")
return
elif cParms[cnt].upper() == 'CREDENTIALS':
if cnt+1 < len(cParms):
credentials = cParms[cnt+1]
if (credentials in local_ns):
tempid = eval(credentials,local_ns)
if (isinstance(tempid,dict) == False):
errormsg("The CREDENTIALS variable (" + credentials + ") does not contain a valid Python dictionary (JSON object)")
return
else:
tempid = None
if (tempid == None):
fname = credentials + ".pickle"
try:
with open(fname,'rb') as f:
_id = pickle.load(f)
except:
errormsg("Unable to find credential variable or file.")
return
else:
_id = tempid
try:
_settings["database"] = _id.get("db","")
_settings["hostname"] = _id.get("hostname","")
_settings["port"] = _id.get("port","50000")
_settings["uid"] = _id.get("username","")
_settings["pwd"] = _id.get("password","")
try:
fname = credentials + ".pickle"
with open(fname,'wb') as f:
pickle.dump(_id,f)
except:
errormsg("Failed trying to write Db2 Credentials.")
return
except:
errormsg("Credentials file is missing information. db/hostname/port/username/password required.")
return
else:
errormsg("No Credentials name supplied")
return
cnt = cnt + 1
elif cParms[cnt].upper() == 'USER':
if cnt+1 < len(cParms):
_settings["uid"] = cParms[cnt+1]
cnt = cnt + 1
else:
errormsg("No userid specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'USING':
if cnt+1 < len(cParms):
_settings["pwd"] = cParms[cnt+1]
if (_settings.get("pwd","?") == '?'):
_settings["pwd"] = getpass.getpass("Password [password]: ") or "password"
cnt = cnt + 1
else:
errormsg("No password specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'HOST':
if cnt+1 < len(cParms):
hostport = cParms[cnt+1]
ip, port = split_string(hostport)
if (port == None): _settings["port"] = "50000"
_settings["hostname"] = ip
cnt = cnt + 1
else:
errormsg("No hostname specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'PORT':
if cnt+1 < len(cParms):
_settings["port"] = cParms[cnt+1]
cnt = cnt + 1
else:
errormsg("No port specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'PASSTHRU':
if cnt+1 < len(cParms):
_settings["passthru"] = cParms[cnt+1]
cnt = cnt + 1
else:
errormsg("No passthru parameters specified in the CONNECT statement")
return
elif cParms[cnt].upper() in ('CLOSE','RESET') :
try:
result = ibm_db.close(_hdbc)
_hdbi.close()
except:
pass
success("Connection closed.")
if cParms[cnt].upper() == 'RESET':
_settings["database"] = ''
return
else:
cnt = cnt + 1
_ = db2_doConnect()
def db2_doConnect():
global _hdbc, _hdbi, _connected
global _settings
if _connected == False:
if len(_settings.get("database","")) == 0:
return False
dsn = (
"DRIVER={{IBM DB2 ODBC DRIVER}};"
"DATABASE={0};"
"HOSTNAME={1};"
"PORT={2};"
"PROTOCOL=TCPIP;ConnectTimeout=15;"
"UID={3};"
"PWD={4};{5};{6}").format(_settings.get("database",""),
_settings.get("hostname",""),
_settings.get("port","50000"),
_settings.get("uid",""),
_settings.get("pwd",""),
_settings.get("ssl",""),
_settings.get("passthru",""))
# Get a database handle (hdbc) and a statement handle (hstmt) for subsequent access to DB2
try:
_hdbc = ibm_db.connect(dsn, "", "")
except Exception as err:
db2_error(False,True) # errormsg(str(err))
_connected = False
_settings["database"] = ''
return False
try:
_hdbi = ibm_db_dbi.Connection(_hdbc)
except Exception as err:
db2_error(False,True) # errormsg(str(err))
_connected = False
_settings["database"] = ''
return False
_connected = True
# Save the values for future use
save_settings()
success("Connection successful.")
return True
def load_settings():
# This routine will load the settings from the previous session if they exist
global _settings
fname = "db2connect.pickle"
try:
with open(fname,'rb') as f:
_settings = pickle.load(f)
_settings["maxgrid"] = 5
except:
pass
return
def save_settings():
# This routine will save the current settings if they exist
global _settings
fname = "db2connect.pickle"
try:
with open(fname,'wb') as f:
pickle.dump(_settings,f)
except:
errormsg("Failed trying to write Db2 Configuration Information.")
return
def db2_error(quiet,connect=False):
global sqlerror, sqlcode, sqlstate, _environment
try:
if (connect == False):
errmsg = ibm_db.stmt_errormsg().replace('\r',' ')
errmsg = errmsg[errmsg.rfind("]")+1:].strip()
else:
errmsg = ibm_db.conn_errormsg().replace('\r',' ')
errmsg = errmsg[errmsg.rfind("]")+1:].strip()
sqlerror = errmsg
msg_start = errmsg.find("SQLSTATE=")
if (msg_start != -1):
msg_end = errmsg.find(" ",msg_start)
if (msg_end == -1):
msg_end = len(errmsg)
sqlstate = errmsg[msg_start+9:msg_end]
else:
sqlstate = "0"
msg_start = errmsg.find("SQLCODE=")
if (msg_start != -1):
msg_end = errmsg.find(" ",msg_start)
if (msg_end == -1):
msg_end = len(errmsg)
sqlcode = errmsg[msg_start+8:msg_end]
try:
sqlcode = int(sqlcode)
except:
pass
else:
sqlcode = 0
except:
errmsg = "Unknown error."
sqlcode = -99999
sqlstate = "-99999"
sqlerror = errmsg
return
msg_start = errmsg.find("SQLSTATE=")
if (msg_start != -1):
msg_end = errmsg.find(" ",msg_start)
if (msg_end == -1):
msg_end = len(errmsg)
sqlstate = errmsg[msg_start+9:msg_end]
else:
sqlstate = "0"
msg_start = errmsg.find("SQLCODE=")
if (msg_start != -1):
msg_end = errmsg.find(" ",msg_start)
if (msg_end == -1):
msg_end = len(errmsg)
sqlcode = errmsg[msg_start+8:msg_end]
try:
sqlcode = int(sqlcode)
except:
pass
else:
sqlcode = 0
if quiet == True: return
if (errmsg == ""): return
html = '<p><p style="border:2px; border-style:solid; border-color:#FF0000; background-color:#ffe6e6; padding: 1em;">'
if (_environment["jupyter"] == True):
pdisplay(pHTML(html+errmsg+"</p>"))
else:
print(errmsg)
# Print out an error message
def errormsg(message):
global _environment
if (message != ""):
html = '<p><p style="border:2px; border-style:solid; border-color:#FF0000; background-color:#ffe6e6; padding: 1em;">'
if (_environment["jupyter"] == True):
pdisplay(pHTML(html + message + "</p>"))
else:
print(message)
def success(message):
if (message not in (None,"")):
print(message)
return
def debug(message,error=False):
global _environment
if (message in (None,"")):
return
if (_environment["jupyter"] == True):
spacer = "<br>" + " "
else:
spacer = "\n "
lines = message.split('\n')
msg = ""
indent = 0
for line in lines:
delta = line.count("(") - line.count(")")
if (msg == ""):
msg = line
indent = indent + delta
else:
if (delta < 0): indent = indent + delta
msg = msg + spacer * (indent*2) + line
if (delta > 0): indent = indent + delta
if (indent < 0): indent = 0
if (error == True):
html = '<p><pre style="font-family: monospace; border:2px; border-style:solid; border-color:#FF0000; background-color:#ffe6e6; padding: 1em;">'
else:
html = '<p><pre style="font-family: monospace; border:2px; border-style:solid; border-color:#008000; background-color:#e6ffe6; padding: 1em;">'
if (_environment["jupyter"] == True):
pdisplay(pHTML(html + msg + "</pre></p>"))
else:
print(msg)
return
def setMacro(inSQL,parms):
global _macros
names = parms.split()
if (len(names) < 2):
errormsg("No command name supplied.")
return None
macroName = names[1].upper()
_macros[macroName] = inSQL # inSQL.replace("\t"," ")
return
def checkMacro(in_sql):
global _macros
if (len(in_sql) == 0): return(in_sql) # Nothing to do
tokens = parseArgs(in_sql,None) # Take the string and reduce into tokens
macro_name = tokens[0].upper() # Uppercase the name of the token
if (macro_name not in _macros):
return(in_sql) # No macro by this name so just return the string
result = runMacro(_macros[macro_name],in_sql,tokens) # Execute the macro using the tokens we found
return(result) # Runmacro will either return the original SQL or the new one
def splitassign(arg):
var_name = "null"
var_value = "null"
arg = arg.strip()
eq = arg.find("=")
if (eq != -1):
var_name = arg[:eq].strip()
temp_value = arg[eq+1:].strip()
if (temp_value != ""):
ch = temp_value[0]
if (ch in ["'",'"']):
if (temp_value[-1:] == ch):
var_value = temp_value[1:-1]
else:
var_value = temp_value
else:
var_value = temp_value
else:
var_value = arg
return var_name, var_value
def parseArgs(argin,_vars):
quoteChar = ""
blockChar = ""
inQuote = False
inBlock = False
inArg = True
args = []
arg = ''
for ch in argin.lstrip():
if (inBlock == True):
if (ch == ")"):
inBlock = False
arg = arg + ch
else:
arg = arg + ch
elif (inQuote == True):
if (ch == quoteChar):
inQuote = False
arg = arg + ch #z
else:
arg = arg + ch
elif (ch == "("): # Do we have a block
arg = arg + ch
inBlock = True
elif (ch == "\"" or ch == "\'"): # Do we have a quote
quoteChar = ch
arg = arg + ch #z
inQuote = True
elif (ch == " "):
if (arg != ""):
arg = subvars(arg,_vars)
args.append(arg)
else:
args.append("null")
arg = ""
else:
arg = arg + ch
if (arg != ""):
arg = subvars(arg,_vars)
args.append(arg)
return(args)
def runMacro(script,in_sql,tokens):
result = ""
runIT = True
code = script.split("\n")
level = 0
runlevel = [True,False,False,False,False,False,False,False,False,False]
ifcount = 0
flags = ""
_vars = {}
for i in range(0,len(tokens)):
vstr = str(i)
_vars[vstr] = tokens[i]
if (len(tokens) == 0):
_vars["argc"] = "0"
else:
_vars["argc"] = str(len(tokens)-1)
for line in code:
line = line.strip()
if (line == "" or line == "\n"): continue
if (line[0] == "#"): continue # A comment line starts with a # in the first position of the line
args = parseArgs(line,_vars) # Get all of the arguments
if (args[0] == "if"):
ifcount = ifcount + 1
if (runlevel[level] == False): # You can't execute this statement
continue
level = level + 1
if (len(args) < 4):
print("Macro: Incorrect number of arguments for the if clause.")
return in_sql
arg1 = args[1]
arg2 = args[3]
if (len(arg2) > 2):
ch1 = arg2[0]
ch2 = arg2[-1:]
if (ch1 in ['"',"'"] and ch1 == ch2):
arg2 = arg2[1:-1].strip()
op = args[2]
if (op in ["=","=="]):
if (arg1 == arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in ["<=","=<"]):
if (arg1 <= arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in [">=","=>"]):
if (arg1 >= arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in ["<>","!="]):
if (arg1 != arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in ["<"]):
if (arg1 < arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in [">"]):
if (arg1 > arg2):
runlevel[level] = True
else:
runlevel[level] = False
else:
print("Macro: Unknown comparison operator in the if statement:" + op)
continue
elif (args[0] in ["exit","echo"] and runlevel[level] == True):
msg = ""
for msgline in args[1:]:
if (msg == ""):
msg = subvars(msgline,_vars)
else:
msg = msg + " " + subvars(msgline,_vars)
if (msg != ""):
if (args[0] == "echo"):
debug(msg,error=False)
else:
debug(msg,error=True)
if (args[0] == "exit"): return ''
elif (args[0] == "pass" and runlevel[level] == True):
pass
elif (args[0] == "flags" and runlevel[level] == True):
if (len(args) > 1):
for i in range(1,len(args)):
flags = flags + " " + args[i]
flags = flags.strip()
elif (args[0] == "var" and runlevel[level] == True):
value = ""
for val in args[2:]:
if (value == ""):
value = subvars(val,_vars)
else:
value = value + " " + subvars(val,_vars)
value.strip()
_vars[args[1]] = value
elif (args[0] == 'else'):
if (ifcount == level):
runlevel[level] = not runlevel[level]
elif (args[0] == 'return' and runlevel[level] == True):
return(f"{flags} {result}")
elif (args[0] == "endif"):
ifcount = ifcount - 1
if (ifcount < level):
level = level - 1
if (level < 0):
print("Macro: Unmatched if/endif pairs.")
return ''
else:
if (runlevel[level] == True):
if (result == ""):
result = subvars(line,_vars)
else:
result = result + "\n" + subvars(line,_vars)
return(f"{flags} {result}")
def subvars(script,_vars):
if (_vars == None): return script
remainder = script
result = ""
done = False
while done == False:
bv = remainder.find("{")
if (bv == -1):
done = True
continue
ev = remainder.find("}")
if (ev == -1):
done = True
continue
result = result + remainder[:bv]
vvar = remainder[bv+1:ev].strip()
remainder = remainder[ev+1:]
modifier = ""
if (len(vvar) == 0):
errormsg(f"No variable name supplied in the braces {{}}")
return script
upper = False
allvars = False
concat = " "
if (len(vvar) > 1):
modifier = vvar[0]
if (modifier == "^"):
upper = True
vvar = vvar[1:]
elif (modifier == "*"):
vvar = vvar[1:]
allvars = True
concat = " "
elif (vvar[0] == ","):
vvar = vvar[1:]
allvars = True
concat = ","
else:
pass
if (vvar in _vars):
if (upper == True):
items = _vars[vvar].upper()
elif (allvars == True):
try:
iVar = int(vvar)
except:
return(script)
items = ""
sVar = str(iVar)
while sVar in _vars:
if (items == ""):
items = _vars[sVar]
else:
items = items + concat + _vars[sVar]
iVar = iVar + 1
sVar = str(iVar)
else:
items = _vars[vvar]
else:
if (allvars == True):
items = ""
else:
items = "null"
result = result + items
if (remainder != ""):
result = result + remainder
return(result)
def splitargs(arguments):
import types
# String the string and remove the ( and ) characters if they at the beginning and end of the string
results = []
step1 = arguments.strip()
if (len(step1) == 0): return(results) # Not much to do here - no args found
if (step1[0] == '('):
if (step1[-1:] == ')'):
step2 = step1[1:-1]
step2 = step2.strip()
else:
step2 = step1
else:
step2 = step1
# Now we have a string without brackets. Start scanning for commas
quoteCH = ""
pos = 0
arg = ""
args = []
while pos < len(step2):
ch = step2[pos]
if (quoteCH == ""): # Are we in a quote?
if (ch in ('"',"'")): # Check to see if we are starting a quote
quoteCH = ch
arg = arg + ch
pos += 1
elif (ch == ","): # Are we at the end of a parameter?
arg = arg.strip()
args.append(arg)
arg = ""
inarg = False
pos += 1
else: # Continue collecting the string
arg = arg + ch
pos += 1
else:
if (ch == quoteCH): # Are we at the end of a quote?
arg = arg + ch # Add the quote to the string
pos += 1 # Increment past the quote
quoteCH = "" # Stop quote checking (maybe!)
else:
pos += 1
arg = arg + ch
if (quoteCH != ""): # So we didn't end our string
arg = arg.strip()
args.append(arg)
elif (arg != ""): # Something left over as an argument
arg = arg.strip()
args.append(arg)
else:
pass
results = []
for arg in args:
result = []
if (len(arg) > 0):
if (arg[0] in ('"',"'")):
value = arg[1:-1]
isString = True
isNumber = False
else:
isString = False
isNumber = False
try:
value = eval(arg)
if (type(value) == int):
isNumber = True
elif (isinstance(value,float) == True):
isNumber = True
else:
value = arg
except:
value = arg
else:
value = ""
isString = False
isNumber = False
result = [value,isString,isNumber]
results.append(result)
return results
def createDF(hdbc,hdbi,sqlin,local_ns):
import datetime
import ibm_db
global sqlcode, _settings, _parallel
NoDF = False
YesDF = True
if (hdbc == None or hdbi == None):
errormsg("You need to connect to a database before issuing this command.")
return NoDF, None
# Strip apart the command into tokens based on spaces
tokens = sqlin.split()
token_count = len(tokens)
if (token_count < 5): # Not enough parameters
errormsg("Insufficient arguments for USING command")
return NoDF, None
keyword_command = tokens[0].upper()
dfName = tokens[1]
keyword_create = tokens[2].upper()
keyword_table = tokens[3].upper()
table = tokens[4]
if (dfName not in local_ns):
errormsg("The variable ({dfName}) does not exist in the local variable list.")
return NoDF, None
try:
dfValue = eval(dfName,None,local_ns) # globals()[varName] # eval(varName)
except:
errormsg("The variable ({dfName}) does not contain a value.")
return NoDF, None
if (keyword_create in ("SELECT","WITH")):
if (_parallel == False):
errormsg("Parallelism is not availble on this system.")
return NoDF, None
thread_count = _settings.get("threads",0)
if (thread_count in (0,1)):
errormsg("The THREADS option is currently set to 0 or 1 which disables parallelism.")
return NoDF, None
ok, df = dfSQL(hdbc,hdbi,sqlin,dfName,dfValue,thread_count)
if (ok == False):
return NoDF, None
else:
return YesDF, df
if (isinstance(dfValue,pandas.DataFrame) == False): # Not a Pandas dataframe
errormsg("The variable ({dfName}) is not a Pandas dataframe.")
return NoDF, None
if (keyword_create not in ("CREATE","REPLACE","APPEND") or keyword_table != "TABLE"):
errormsg("Incorrect syntax: %sql using <df> create table <name> [options]")
return NoDF, None
if (token_count % 2 != 1):
errormsg("Insufficient arguments for USING command.")
return NoDF, None
flag_withdata = False
flag_asis = False
flag_float = False
flag_integer = False
limit = -1
for token_idx in range(5,token_count,2):
option_key = tokens[token_idx].upper()
option_val = tokens[token_idx+1].upper()
if (option_key == "WITH" and option_val == "DATA"):
flag_withdata = True
elif (option_key == "COLUMNS" and option_val == "ASIS"):
flag_asis = True
elif (option_key == "KEEP" and option_val == "FLOAT64"):
flag_float = True
elif (option_key == "KEEP" and option_val == "INT64"):
flag_integer = True
elif (option_key == "LIMIT"):
if (option_val.isnumeric() == False):
errormsg("The LIMIT must be a valid number from -1 (unlimited) to the maximun number of rows to insert")
return NoDF, None
limit = int(option_val)
else:
errormsg("Invalid options. Must be either WITH DATA | COLUMNS ASIS | KEEP FLOAT64 | KEEP FLOAT INT64")
return NoDF, None
if (keyword_create == "REPLACE"):
sql = f"DROP TABLE {table}"
ok = execSQL(hdbc,sql,quiet=True)
sql = []
columns = dict(dfValue.dtypes)
sql.append(f'CREATE TABLE {table} (')
datatypes = []
comma = ""
for column in columns:
datatype = str(columns[column])
datatype = datatype.upper()
if (datatype == "OBJECT"):
datapoint = dfValue[column][0]
if (isinstance(datapoint,datetime.datetime)):
type = "TIMESTAMP"
elif (isinstance(datapoint,datetime.time)):
type = "TIME"
elif (isinstance(datapoint,datetime.date)):
type = "DATE"
elif (isinstance(datapoint,float)):
if (flag_float == True):
type = "FLOAT"
else:
type = "DECFLOAT"
elif (isinstance(datapoint,int)):
if (flag_integer == True):
type = "BIGINT"
else:
type = "INTEGER"
elif (isinstance(datapoint,str)):
maxlength = dfValue[column].apply(str).apply(len).max()
type = f"VARCHAR({maxlength})"
else:
type = "CLOB"
elif (datatype == "INT64"):
type = "BIGINT"
elif (datatype == "INT32"):
type = "INT"
elif (datatype == "INT16"):
type = "SMALLINT"
elif (datatype == "FLOAT64"):
if (flag_float == True):
type = "FLOAT"
else:
type = "DECFLOAT"
elif (datatype == "FLOAT32"):
if (flag_float == True):
type = "REAL"
else:
type = "DECFLOAT"
elif ("DATETIME64" in datatype):
type = "TIMESTAMP"
elif (datatype == "BOOLEAN"):
type = "BINARY"
elif (datatype == "STRING"):
maxlength = dfValue[column].apply(str).apply(len).max()
type = f"VARCHAR({maxlength})"
else:
type = "CLOB"
datatypes.append(type)
if (flag_asis == False):
if (isinstance(column,str) == False):
column = str(column)
identifier = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
column_name = column.strip().upper()
new_name = ""
for ch in column_name:
if (ch not in identifier):
new_name = new_name + "_"
else:
new_name = new_name + ch
new_name = new_name.lstrip('_').rstrip('_')
if (new_name == "" or new_name[0] not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
new_name = f'"{column}"'
else:
new_name = f'"{column}"'
sql.append(f" {new_name} {type}")
sql.append(")")
sqlcmd = ""
for i in range(0,len(sql)):
if (i > 0 and i < len(sql)-2):
comma = ","
else:
comma = ""
sqlcmd = "{}\n{}{}".format(sqlcmd,sql[i],comma)
if (keyword_create != "APPEND"):
print(sqlcmd)
ok = execSQL(hdbc,sqlcmd,quiet=False)
if (ok == False):
return NoDF, None
if (flag_withdata == True or keyword_create == "APPEND"):
autocommit = ibm_db.autocommit(hdbc)
ibm_db.autocommit(hdbc,False)
row_count = 0
insert_sql = ""
rows, cols = dfValue.shape
for row in range(0,rows):
insert_row = ""
for col in range(0, cols):
value = dfValue.iloc[row][col]
value = str(value)
if (value.upper() in ("NAN","<NA>","NAT")):
value = "NULL"
else:
addquotes_flag = False
if (datatypes[col] == "CLOB" or "VARCHAR" in datatypes[col]):
addquotes_flag = True
elif (datatypes[col] in ("TIME","DATE","TIMESTAMP")):
addquotes_flag = True
elif (datatypes[col] in ("INTEGER","INT","SMALLINT","BIGINT","DECFLOAT","FLOAT","BINARY","REAL")):
addquotes_flag = False
else:
addquotes_flag = True
if (addquotes_flag == True):
value = addquotes(value,True)
if (insert_row == ""):
insert_row = f"{value}"
else:
insert_row = f"{insert_row},{value}"
if (insert_sql == ""):
insert_sql = f"INSERT INTO {table} VALUES ({insert_row})"
else:
insert_sql = f"{insert_sql},({insert_row})"
row_count += 1
if (row_count % 1000 == 0 or row_count == limit):
try:
result = ibm_db.exec_immediate(hdbc, insert_sql) # Run it
except:
db2_error(False)
return NoDF, None
ibm_db.commit(hdbc)
print(f"\r{row_count} of {rows} rows inserted.",end="")
insert_sql = ""
if (row_count == limit):
break
if (insert_sql != ""):
try:
result = ibm_db.exec_immediate(hdbc, insert_sql) # Run it
except:
db2_error(False)
return NoDF, None
ibm_db.commit(hdbc)
ibm_db.autocommit(hdbc,autocommit)
print("\nInsert completed.")
return NoDF, None
def sqlParser(sqlin,local_ns):
sql_cmd = ""
encoded_sql = sqlin
firstCommand = "(?:^\s*)([a-zA-Z]+)(?:\s+.*|$)"
findFirst = re.match(firstCommand,sqlin)
if (findFirst == None): # We did not find a match so we just return the empty string
return sql_cmd, encoded_sql
cmd = findFirst.group(1)
sql_cmd = cmd.upper()
#
# Scan the input string looking for variables in the format :var. If no : is found just return.
# Var must be alpha+number+_ to be valid
#
if (':' not in sqlin): # A quick check to see if parameters are in here, but not fool-proof!
return sql_cmd, encoded_sql
inVar = False
inQuote = ""
varName = ""
encoded_sql = ""
STRING = 0
NUMBER = 1
LIST = 2
RAW = 3
PANDAS = 5
for ch in sqlin:
if (inVar == True): # We are collecting the name of a variable
if (ch.upper() in "@_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789[]"):
varName = varName + ch
continue
else:
if (varName == ""):
encode_sql = encoded_sql + ":"
elif (varName[0] in ('[',']')):
encoded_sql = encoded_sql + ":" + varName
else:
if (ch == '.'): # If the variable name is stopped by a period, assume no quotes are used
flag_quotes = False
else:
flag_quotes = True
varValue, varType = getContents(varName,flag_quotes,local_ns)
if (varType != PANDAS and varValue == None):
encoded_sql = encoded_sql + ":" + varName
else:
if (varType == STRING):
encoded_sql = encoded_sql + varValue
elif (varType == NUMBER):
encoded_sql = encoded_sql + str(varValue)
elif (varType == RAW):
encoded_sql = encoded_sql + varValue
elif (varType == PANDAS):
insertsql = ""
coltypes = varValue.dtypes
rows, cols = varValue.shape
for row in range(0,rows):
insertrow = ""
for col in range(0, cols):
value = varValue.iloc[row][col]
if (coltypes[col] == "object"):
value = str(value)
value = addquotes(value,True)
else:
strvalue = str(value)
if ("NAN" in strvalue.upper()):
value = "NULL"
if (insertrow == ""):
insertrow = f"{value}"
else:
insertrow = f"{insertrow},{value}"
if (insertsql == ""):
insertsql = f"({insertrow})"
else:
insertsql = f"{insertsql},({insertrow})"
encoded_sql = encoded_sql + insertsql
elif (varType == LIST):
start = True
for v in varValue:
if (start == False):
encoded_sql = encoded_sql + ","
if (isinstance(v,int) == True): # Integer value
encoded_sql = encoded_sql + str(v)
elif (isinstance(v,float) == True):
encoded_sql = encoded_sql + str(v)
else:
flag_quotes = True
try:
if (v.find('0x') == 0): # Just guessing this is a hex value at beginning
encoded_sql = encoded_sql + v
else:
encoded_sql = encoded_sql + addquotes(v,flag_quotes) # String
except:
encoded_sql = encoded_sql + addquotes(str(v),flag_quotes)
start = False
encoded_sql = encoded_sql + ch
varName = ""
inVar = False
elif (inQuote != ""):
encoded_sql = encoded_sql + ch
if (ch == inQuote): inQuote = ""
elif (ch in ("'",'"')):
encoded_sql = encoded_sql + ch
inQuote = ch
elif (ch == ":"): # This might be a variable
varName = ""
inVar = True
else:
encoded_sql = encoded_sql + ch
if (inVar == True):
varValue, varType = getContents(varName,True,local_ns) # We assume the end of a line is quoted
if (varType != PANDAS and varValue == None):
encoded_sql = encoded_sql + ":" + varName
else:
if (varType == STRING):
encoded_sql = encoded_sql + varValue
elif (varType == RAW):
encoded_sql = encoded_sql + varValue
elif (varType == NUMBER):
encoded_sql = encoded_sql + str(varValue)
elif (varType == PANDAS):
insertsql = ""
coltypes = varValue.dtypes
rows, cols = varValue.shape
for row in range(0,rows):
insertrow = ""
for col in range(0, cols):
value = varValue.iloc[row][col]
if (coltypes[col] == "object"):
value = str(value)
value = addquotes(value,True)
else:
strvalue = str(value)
if ("NAN" in strvalue.upper()):
value = "NULL"
if (insertrow == ""):
insertrow = f"{value}"
else:
insertrow = f"{insertrow},{value}"
if (insertsql == ""):
insertsql = f"({insertrow})"
else:
insertsql = f"{insertsql},({insertrow})"
encoded_sql = encoded_sql + insertsql
elif (varType == LIST):
flag_quotes = True
start = True
for v in varValue:
if (start == False):
encoded_sql = encoded_sql + ","
if (isinstance(v,int) == True): # Integer value
encoded_sql = encoded_sql + str(v)
elif (isinstance(v,float) == True):
encoded_sql = encoded_sql + str(v)
else:
try:
if (v.find('0x') == 0): # Just guessing this is a hex value
encoded_sql = encoded_sql + v
else:
encoded_sql = encoded_sql + addquotes(v,flag_quotes) # String
except:
encoded_sql = encoded_sql + addquotes(str(v),flag_quotes)
start = False
return sql_cmd, encoded_sql
def plotData(hdbi, sql):
try:
df = | pandas.read_sql(sql,hdbi) | pandas.read_sql |
# encoding: UTF-8
"""
一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
import talib
import numpy as np
import pandas as pd
from retry import retry
from vnpy.trader.app.ctaStrategy.ctaBase import ENGINETYPE_TRADING
from vnpy.trader.vtObject import VtBarData
from vnpy.trader.vtConstant import EMPTY_STRING
from vnpy.trader.app.ctaStrategy.ctaTemplate import CtaTemplate
########################################################################
class cn_wtiStrategy(CtaTemplate):
"""结合ATR和RSI指标的一个分钟线交易策略"""
className = 'cn_wtiStrategy'
author = u'用Python的交易员'
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'atrLength',
'fastMaLength',
'slowMaLength',
'rsiLength',
'rsiEntry',
'trailingPercent']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'atrValue',
'atrMa',
'rsiValue',
'rsiBuy',
'rsiSell']
def __init__(self, ctaEngine, setting):
self.contract_size = 0
self.slowMaLength = 0
self.fastMaLength = 0
self.margin_percent = 0
self.stop_profit = 0
"""Constructor"""
super(cn_wtiStrategy, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
self.last_entry_price = 0
self.pos = 0
self.inited = False
self.trading = False
self.init_data_loaded = False
# 策略变量
self.bar = None # K线对象
self.barMinute = EMPTY_STRING # K线当前的分钟
self.bufferSize = 100 # 需要缓存的数据的大小
self.bufferCount = 0 # 目前已经缓存了的数据的计数
self.highArray = np.zeros(self.bufferSize) # K线最高价的数组
self.lowArray = np.zeros(self.bufferSize) # K线最低价的数组
self.closeArray = np.zeros(self.bufferSize) # K线收盘价的数组
self.atrCount = 0 # 目前已经缓存了的ATR的计数
self.atrArray = np.zeros(self.bufferSize) # ATR指标的数组
self.atrValue = 0 # 最新的ATR指标数值
self.atrMa = 0 # ATR移动平均的数值
self.no_data = 0
self.rsiValue = 0 # RSI指标的数值
self.rsiBuy = 0 # RSI买开阈值
self.rsiSell = 0 # RSI卖开阈值
self.intraTradeHigh = 0 # 移动止损用的持仓期内最高价
self.intraTradeLow = 0 # 移动止损用的持仓期内最低价
# self.orderList = [] # 保存委托代码的列表
self.initCapital = 10000.0
self.initDays = 2
self.barHour = -1
self.stop = 0 # 用于判断上一个交易是否是止盈操作
self.closeList = []
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' % self.name)
# 初始化RSI入场阈值
# self.rsiBuy = 50 + self.rsiEntry
# self.rsiSell = 50 - self.rsiEntry
# 载入历史数据,并采用回放计算的方式初始化策略数值
# position = self.ctaEngine.query_position()
# print('qry postion: {0}'.format(position))
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.init_data_loaded = True
self.putEvent()
def onStart(self):
"""启动策略(必须由用户继承实现)"""
# self.ctaEngine.test()
self.writeCtaLog(u'%s策略启动' % self.name)
self.putEvent()
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' % self.name)
self.putEvent()
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.onBar(self.bar)
bar = VtBarData()
bar.vtSymbol = 'cn_wti.cnus'
bar.symbol = 'cn_wti'
bar.exchange = 'cnus'
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
def onBar(self, bar):
# print '--------------bar.datetime-----------------', bar.datetime
# if self.barHour != int(bar.datetime.day):
# self.barHour = int(bar.datetime.day)
# self.bar = VtBarData()
# self.bar.open = bar.open
# self.bar.high = bar.high
# self.bar.low = bar.low
# self.bar.close = bar.close
# self.bar.datetime = bar.datetime
# k_line_new = True
# else:
# self.bar.high = max(bar.high, self.bar.high)
# self.bar.low = min(bar.low, self.bar.low)
# self.bar.close = bar.close
# k_line_new = False
# # return
#
# # 保存K线数据
# if k_line_new:
# if bar.close == self.closeArray[-1]:
# self.no_data += 1
# else:
# self.no_data = 0
#
# if self.no_data > 10: # 无数的周期过多时进行过滤
# return
# self.closeArray[0:self.bufferSize - 1] = self.closeArray[1:self.bufferSize]
# self.bufferCount += 1
#
# self.closeArray[-1] = bar.close
# if not self.init_data_loaded or self.bufferCount < self.slowMaLength:
# return
# slow_arr = talib.MA(self.closeArray, self.slowMaLength)
self.closeList.append(bar.close)
# print(self.closeArray)
if self.bufferCount < 120:
self.bufferCount += 1
return
if len(self.closeList) > self.fastMaLength:
self.closeList = self.closeList[-120:]
df = | pd.Series(self.closeList) | pandas.Series |
import numpy as np
import pandas as pd
import unittest
from featurewiz._timeseries import _percent_rank, pctrank, apply
class TimeSeriesTestCase(unittest.TestCase):
def test__percent_rank(self):
self.assertEqual(100, _percent_rank([1, 2, 3, 4, 5, 6, 7, 8]))
self.assertEqual(0, _percent_rank([1, 2, 3, 4, 5, 6, 7, 0]))
self.assertEqual(4/7*100, _percent_rank([1, 2, 3, 4, 5, 6, 7, 4]))
self.assertTrue(np.isclose(np.nan, _percent_rank([1, 2, 3, 4, 5, 6, 7, np.nan]), equal_nan=True))
def test_pctrank_category(self):
a = pd.Series([1, 2, 3, 4, 5, 6, 7, 2, 5])
cat = pd.Series([1, 0, 1, 0, 1, 0, 1, 0, 1])
res = pctrank(a, 4, category=cat)
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 100, 1/3*100, 2/3*100]),
res.values, equal_nan=True))
def test_pctrank(self):
res = pctrank(pd.Series([1, 2, 3, 4, 5, 6, 7, 0]), 3)
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 100, 100, 100, 100, 0]), res.values, equal_nan=True))
def test_pctrank_categorize(self):
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 2]),
pctrank(pd.Series([1, 2, 3, 4]), 3, categorize_by=3),
equal_nan=True))
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 1]),
pctrank(pd.Series([1, 2, 3, 2]), 3, categorize_by=3),
equal_nan=True))
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 0]),
pctrank(pd.Series([1, 2, 3, 0]), 3, categorize_by=3),
equal_nan=True))
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 0]),
pctrank(pd.Series([1, 2, 3, 0]), 3, categorize_by=[0, 33, 100]),
equal_nan=True))
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 1]),
pctrank(pd.Series([1, 2, 3, 2.5]), 3, categorize_by=[0, 33, 100]),
equal_nan=True))
def test_apply(self):
a = pd.Series([1, 2, 3, 4, 5, 6, 7])
res_exp = pd.Series([np.nan, np.nan, 6, 9, 12, 15, 18])
self.assertTrue(np.allclose(res_exp, apply(a, 3, np.sum), equal_nan=True))
def test_apply_categorical(self):
a = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])
cat = pd.Series([1, 0, 1, 0, 1, 0, 1, 0, 1])
res_exp = pd.Series([np.nan, np.nan, np.nan, np.nan,
1+3+5,
2+4+6,
3+5+7,
4+6+8,
5+7+9,
])
self.assertTrue(np.allclose(res_exp, apply(a, 3, np.sum, category=cat), equal_nan=True))
def test_apply_categorical_return_as_cat(self):
a = | pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]) | pandas.Series |
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import warnings
warnings.filterwarnings('ignore')
# Importing the training set
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:, 1:2].values
dataset_train['Date'] = pd.to_datetime(dataset_train['Date'], infer_datetime_format=True)
time_new = dataset_train['Date'].iloc[0]
dataset_train['Date'] = dataset_train['Date'].apply(lambda time_new: time_new.date())
plt.figure(figsize=(14, 5), dpi=100)
plt.plot(dataset_train['Date'], dataset_train['Open'], label='Google_Stock_Price_Train')
plt.vlines(datetime.date(2016,4, 20), 0, 800, linestyles='--', colors='gray', label='Train/Test data cut-off')
plt.xlabel('Date')
plt.ylabel('USD')
plt.title('Google stock price')
plt.legend()
plt.show()
def get_technical_indicators(feature):
# Create 7 and 21 days Moving Average
dataset_train['ma7'] = dataset_train[feature].rolling(window=7).mean()
dataset_train['ma21'] = dataset_train[feature].rolling(window=21).mean()
# Create MACD
dataset_train['26ema'] = | pd.DataFrame.ewm(dataset_train[feature], span=26) | pandas.DataFrame.ewm |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import datetime
from reda.importers.eit_version_2010 import _average_swapped_current_injections
def _extract_adc_data(mat, **kwargs):
"""Extract adc-channel related data (i.e., data that is captured for all 48
channels of the 40-channel medusa system
"""
md = mat['MD'].squeeze()
frequencies = mat['MP']['fm'].take(0)
# it seems that there exist different file formats under this same official
# version.
if md['fm'].size == frequencies.size:
use_v = 0
else:
use_v = 1
# print('@@@')
# import IPython
# IPython.embed()
# exit()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, frequencies.size):
frequency = frequencies[f_id]
if use_v == 0:
def get_field(key):
return md[key][f_id]
elif use_v == 1:
def get_field(key):
indices = np.where(
md['fm'].take(0) == frequencies[f_id])
return md[key].take(0)[indices]
# def get_field(key):
# indices = np.where(md['fm'].take(f_id) == frequencies[f_id])
# return md[key].take(f_id)[indices]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in get_field('Time')]
).T.squeeze()
column_names = ['ch{:02}'.format(i) for i in range(48)]
ab = get_field('cni')
index_pairs = [
(channel, 'Ug3_{}'.format(i)) for channel in column_names
for i in range(3)
]
Ug3 = get_field('Ug3')
ug3_reshaped = Ug3.reshape([Ug3.shape[0], Ug3.shape[1] * 3])
df = pd.DataFrame(
ug3_reshaped,
index=pd.MultiIndex.from_arrays(
[
ab[:, 0],
ab[:, 1],
np.ones(ab.shape[0]) * frequency,
timestamp
],
names=['a', 'b', 'frequency', 'datetime']
),
columns=pd.MultiIndex.from_tuples(
index_pairs, names=['channel', 'parameter'])
)
dfl.append(df)
dfl = pd.concat(dfl)
dfl.sort_index(axis=0, inplace=True)
dfl.sort_index(axis=1, inplace=True)
return dfl
def _extract_md(mat, **kwargs):
"""Note that the md struct for this version is structured differently than
the others...
"""
md = mat['MD'].squeeze()
frequencies = mat['MP']['fm'].take(0)
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, frequencies.size):
def get_field(key):
indices = np.where(
md['fm'].take(0) == frequencies[f_id])
return md[key].take(0)[indices]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in get_field('Time')]
).T.squeeze()
df = pd.DataFrame()
df['datetime'] = timestamp
ab = get_field('cni')
df['a'] = ab[:, 0]
df['b'] = ab[:, 1]
df['U0'] = get_field('U0')
Is3 = get_field('Is3')
df['Is1'] = Is3[:, 0]
df['Is2'] = Is3[:, 1]
df['Is3'] = Is3[:, 2]
df['Is'] = np.mean(Is3, axis=1)
# [mA]
df['Iab'] = df['Is'] * 1000
Il3 = get_field('Il3')
df['Il1'] = Il3[:, 0]
df['Il2'] = Il3[:, 1]
df['Il3'] = Il3[:, 2]
df['Il'] = np.mean(Il3, axis=1)
# [mA]
df['Ileakage'] = df['Il'] * 1000
df['frequency'] = frequencies[f_id]
dfl.append(df)
df = pd.concat(dfl)
return df
def _extract_emd(mat, **kwargs):
emd = mat['EMD'].squeeze()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, emd.size):
# print('Frequency: ', emd[f_id]['fm'])
fdata = emd[f_id]
# fdata_md = md[f_id]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in fdata['Time'].squeeze()]
).T
df = pd.DataFrame(
np.hstack((
timestamp,
fdata['ni'],
fdata['nu'][:, np.newaxis],
fdata['Z3'],
fdata['Is3'],
fdata['Il3'],
fdata['Zg3'],
)),
)
df.columns = (
'datetime',
'a',
'b',
'p',
'Z1',
'Z2',
'Z3',
'Is1',
'Is2',
'Is3',
'Il1',
'Il2',
'Il3',
'Zg1',
'Zg2',
'Zg3',
)
df['frequency'] = np.ones(df.shape[0]) * fdata['fm']
# cast to correct type
df['datetime'] = pd.to_datetime(df['datetime'])
df['a'] = df['a'].astype(int)
df['b'] = df['b'].astype(int)
df['p'] = df['p'].astype(int)
df['Z1'] = df['Z1'].astype(complex)
df['Z2'] = df['Z2'].astype(complex)
df['Z3'] = df['Z3'].astype(complex)
df['Zg1'] = df['Zg1'].astype(complex)
df['Zg2'] = df['Zg2'].astype(complex)
df['Zg3'] = df['Zg3'].astype(complex)
df['Is1'] = df['Is1'].astype(complex)
df['Is2'] = df['Is2'].astype(complex)
df['Is3'] = df['Is3'].astype(complex)
df['Il1'] = df['Il1'].astype(complex)
df['Il2'] = df['Il2'].astype(complex)
df['Il3'] = df['Il3'].astype(complex)
dfl.append(df)
if len(dfl) == 0:
return None
df = | pd.concat(dfl) | pandas.concat |
""" Indexer for raman data files """
import hashlib
from typing import List
# get_directory_paths_for_run_mode
# from .index_selection import index_selection
import logging
import sys
from pathlib import Path
import pandas as pd
from raman_fitting.config.filepath_helper import get_directory_paths_for_run_mode
# parse_filepath_to_sid_and_pos
from raman_fitting.indexing.filename_parser import index_dtypes_collection
from raman_fitting.indexing.filename_parser_collector import make_collection
# from raman_fitting.utils._dev_sqlite_db import df_to_db_sqlalchemy
# from .. import __package_name__
logger = logging.getLogger(__name__)
logger.propagate = False
__all__ = ["MakeRamanFilesIndex"]
#%%
class MakeRamanFilesIndex:
"""
Finds the RAMAN files in the data folder from config and creates an overview, on the attribute .index
finds a list of files,
"""
# index_file_sample_cols = {'FileStem': 'string',
# 'SampleID': 'string',
# 'SamplePos': 'int64',
# 'SampleGroup': 'string',
# 'FilePath': 'string')
# index_file_stat_cols = ('FileCreationDate' , 'FileCreation','FileModDate', 'FileMod', 'FileHash')
# INDEX_FILE_NAME = 'index.csv'
debug = False
table_name = "ramanfiles"
# RESULTS_DIR = config.RESULTS_DIR,
# DATASET_DIR = config.DATASET_DIR,
# INDEX_FILE = config.INDEX_FILE,
def __init__(
self, force_reload=True, run_mode="normal", dataset_dirs=None, **kwargs
):
self._cqnm = self.__class__.__qualname__
self._kwargs = kwargs
self.force_reload = force_reload
self.run_mode = run_mode
if not dataset_dirs:
dataset_dirs = get_directory_paths_for_run_mode(run_mode=self.run_mode)
self.dataset_dirs = dataset_dirs
for k, val in self.dataset_dirs.items():
if isinstance(val, Path):
setattr(self, k, val)
# if val.is_dir() or val.is_file():
self.raman_files = self.find_files(data_dir=self.DATASET_DIR)
self.index = pd.DataFrame()
self._error_parse_filenames = []
if "normal" in run_mode and not self.debug and not self.force_reload:
self.index = self.load_index()
else:
self.index = self.reload_index()
self.index_selection = self.index_selection(self.index, **self._kwargs)
@staticmethod
def find_files(data_dir: Path = Path()) -> List:
"""
Creates a list of all raman type files found in the DATASET_DIR which are used in the creation of the index.
"""
if not isinstance(data_dir, Path):
logger.warning(f"find_files warning: arg is not Path.")
return []
raman_files_raw = []
if data_dir.exists():
RFs = data_dir.rglob("*txt")
if RFs:
raman_files_raw = [
i
for i in RFs
if not "fail" in i.stem and not "Labjournal" in str(i)
]
logger.info(
f"find_files {len(raman_files_raw)} files were found in the chosen data dir:\n\t{data_dir}"
)
else:
logger.warning(
f"find_files warning: the chose data file dir was empty.\n{data_dir}\mPlease choose another directory which contains your data files."
)
else:
logger.warning(
f"find_files warning: the chosen data file dir does not exists.\n{data_dir}\nPlease choose an existing directory which contains your data files."
)
return raman_files_raw
def make_index(self):
"""loops over the files and scrapes the index data from each file"""
raman_files = self.raman_files
pp_collection = make_collection(raman_files, **self._kwargs)
index = | pd.DataFrame([i.parse_result for i in pp_collection]) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.sparse import (
SparseArray,
SparseDtype,
)
arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
arr = SparseArray(arr_data)
class TestGetitem:
def test_getitem(self):
dense = arr.to_dense()
for i in range(len(arr)):
tm.assert_almost_equal(arr[i], dense[i])
tm.assert_almost_equal(arr[-i], dense[-i])
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize(
"slc",
[
np.s_[:],
np.s_[1:10],
np.s_[1:100],
np.s_[10:1],
np.s_[:-3],
np.s_[-5:-4],
np.s_[:-12],
np.s_[-12:],
np.s_[2:],
np.s_[2::3],
np.s_[::2],
np.s_[::-1],
np.s_[::-2],
np.s_[1:6:2],
np.s_[:-6:-2],
],
)
@pytest.mark.parametrize(
"as_dense", [[np.nan] * 10, [1] * 10, [np.nan] * 5 + [1] * 5, []]
)
def test_getslice(self, slc, as_dense):
as_dense = np.array(as_dense)
arr = SparseArray(as_dense)
result = arr[slc]
expected = SparseArray(as_dense[slc])
tm.assert_sp_array_equal(result, expected)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:], fill_value=0)
tm.assert_sp_array_equal(res, exp)
msg = "too many indices for array"
with pytest.raises(IndexError, match=msg):
sparse[4:, :]
with pytest.raises(IndexError, match=msg):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
def test_getitem_bool_sparse_array(self):
# GH 23122
spar_bool = SparseArray([False, True] * 5, dtype=np.bool8, fill_value=True)
exp = SparseArray([np.nan, 2, np.nan, 5, 6])
tm.assert_sp_array_equal(arr[spar_bool], exp)
spar_bool = ~spar_bool
res = arr[spar_bool]
exp = SparseArray([np.nan, 1, 3, 4, np.nan])
tm.assert_sp_array_equal(res, exp)
spar_bool = SparseArray(
[False, True, np.nan] * 3, dtype=np.bool8, fill_value=np.nan
)
res = arr[spar_bool]
exp = SparseArray([np.nan, 3, 5])
tm.assert_sp_array_equal(res, exp)
def test_getitem_bool_sparse_array_as_comparison(self):
# GH 45110
arr = SparseArray([1, 2, 3, 4, np.nan, np.nan], fill_value=np.nan)
res = arr[arr > 2]
exp = SparseArray([3.0, 4.0], fill_value=np.nan)
tm.assert_sp_array_equal(res, exp)
def test_get_item(self):
zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
assert np.isnan(arr[1])
assert arr[2] == 1
assert arr[7] == 5
assert zarr[0] == 0
assert zarr[2] == 1
assert zarr[7] == 5
errmsg = "must be an integer between -10 and 10"
with pytest.raises(IndexError, match=errmsg):
arr[11]
with pytest.raises(IndexError, match=errmsg):
arr[-11]
assert arr[-1] == arr[len(arr) - 1]
class TestSetitem:
def test_set_item(self):
arr = SparseArray(arr_data).copy()
def setitem():
arr[5] = 3
def setslice():
arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
class TestTake:
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
arr.take(2)
def test_take(self):
exp = SparseArray(np.take(arr_data, [2, 3]))
tm.assert_sp_array_equal(arr.take([2, 3]), exp)
exp = SparseArray(np.take(arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(arr.take([0, 1, 2]), exp)
def test_take_all_empty(self):
a = pd.array([0, 0], dtype=SparseDtype("int64"))
result = a.take([0, 1], allow_fill=True, fill_value=np.nan)
tm.assert_sp_array_equal(a, result)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(arr_data, [-1]))
tm.assert_sp_array_equal(arr.take([-1]), exp)
exp = SparseArray(np.take(arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(arr.take([-4, -3, -2]), exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# TODO: actionable?
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
msg = "out of bounds value in 'indices'"
with pytest.raises(IndexError, match=msg):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError, match=msg):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError, match=msg):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH#12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# TODO: actionable?
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'."
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
msg = "out of bounds value in 'indices'"
with pytest.raises(IndexError, match=msg):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError, match=msg):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError, match=msg):
sparse.take(np.array([1, 5]), fill_value=True)
@pytest.mark.parametrize("kind", ["block", "integer"])
def test_take_filling_all_nan(self, kind):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan], kind=kind)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind=kind)
| tm.assert_sp_array_equal(result, expected) | pandas._testing.assert_sp_array_equal |
import logging
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
import sentry_sdk
from solarforecastarbiter import utils
def _make_aggobs(obsid, ef=pd.Timestamp('20191001T1100Z'),
eu=None, oda=None):
return {
'observation_id': obsid,
'effective_from': ef,
'effective_until': eu,
'observation_deleted_at': oda
}
nindex = pd.date_range(start='20191004T0000Z',
freq='1h', periods=10)
@pytest.fixture()
def ids():
return ['f2844284-ea0a-11e9-a7da-f4939feddd82',
'f3e310ba-ea0a-11e9-a7da-f4939feddd82',
'09ed7cf6-ea0b-11e9-a7da-f4939feddd82',
'0fe9f2ba-ea0b-11e9-a7da-f4939feddd82',
'67ea9200-ea0e-11e9-832b-f4939feddd82']
@pytest.fixture()
def aggobs(ids):
return tuple([
_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191004T0501Z')),
_make_aggobs(ids[2], eu=pd.Timestamp('20191004T0400Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0700Z'),
eu=pd.Timestamp('20191004T0800Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0801Z')),
_make_aggobs(ids[3], oda=pd.Timestamp('20191005T0000Z')),
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191003T0000Z'))
])
def test_compute_aggregate(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_missing_from_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
aggobs = list(aggobs[:-2]) + [
_make_aggobs('09ed7cf6-ea0b-11e9-a7da-f4939fed889')]
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_empty_data(aggobs, ids):
data = {}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:2], nindex)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_missing_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
data[ids[-1]] = pd.DataFrame({'value': [1] * 8, 'quality_flag': [0] * 8},
index=nindex[:-2])
aggobs = list(aggobs[:-2]) + [_make_aggobs(ids[-1])]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series(
[3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, None, None],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_deleted_not_removed(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids}
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_not_removed_yet(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# with last aggobs, would try and get data before effective_until,
# but was deleted, so raise error
aggobs = list(aggobs[:-2]) + [
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191004T0700Z'))]
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_but_removed_before(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# aggobs[-1] properly removed
aggobs = list(aggobs[:-2]) + [aggobs[-1]]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}))
def test_compute_aggregate_mean(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'mean', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([1.0] * 10, index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_no_overlap(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3], 'quality_flag': [2, 10, 338]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0230Z'])),
ids[1]: pd.DataFrame(
{'value': [3, 2, 1], 'quality_flag': [9, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0200Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'median', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, None, 2.5, None],
'quality_flag': [2, 10, 9, 338 | 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_before_effective(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3, 0, 0], 'quality_flag': [2, 10, 338, 0, 0]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z'])),
ids[1]: pd.DataFrame(
{'value': [None, 2.0, 1.0], 'quality_flag': [0, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0201Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'max', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, 3.0, 2.0, 1.0],
'quality_flag': [2, 10, 338, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_bad_cols():
data = {'a': pd.DataFrame([0], index=pd.DatetimeIndex(
['20191001T1200Z']))}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC',
'mean', [_make_aggobs('a')])
def test_compute_aggregate_index_provided(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
the_index = nindex.copy()[::2]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], the_index)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 3.0],
index=the_index),
'quality_flag': pd.Series([0]*5, index=the_index)})
)
@pytest.mark.parametrize('dfindex,missing_idx', [
(pd.date_range(start='20191004T0000Z', freq='1h', periods=11), -1),
(pd.date_range(start='20191003T2300Z', freq='1h', periods=11), 0),
])
def test_compute_aggregate_missing_values_with_index(
aggobs, ids, dfindex, missing_idx):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], dfindex)
assert pd.isnull(agg['value'][missing_idx])
def test_compute_aggregate_partial_missing_values_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
data[ids[2]] = pd.DataFrame({'value': [1] * 5, 'quality_flag': [0] * 5},
index=nindex[5:])
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], nindex)
expected = pd.DataFrame(
{'value': pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}
)
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_obs_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
aggobs[:-2], nindex)
def test_compute_aggregate_out_of_effective(aggobs, ids):
limited_aggobs = [aggob
for aggob in aggobs
if aggob['effective_until'] is not None]
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
max_time = pd.Series([o['effective_until'] for o in limited_aggobs]).max()
ooe_index = pd.date_range(
max_time + pd.Timedelta('1H'),
max_time + | pd.Timedelta('25H') | pandas.Timedelta |
## Machine learning testing script
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import posanal as usrpos
import random
plot_ML = True
data_init = | pd.read_csv('results_exp1-5.csv') | pandas.read_csv |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import unittest
import pandas as pd
import numpy as np
from math import sqrt
import numba
import sdc
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs,
count_parfor_OneD_Vars, count_array_OneD_Vars,
dist_IR_contains)
from datetime import datetime
import random
class TestDate(unittest.TestCase):
@unittest.skip("needs support for boxing/unboxing DatetimeIndex")
def test_datetime_index_in(self):
def test_impl(dti):
return dti
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
dti = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(dti).values, test_impl(dti).values)
def test_datetime_index(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).values
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_kw(self):
def test_impl(df):
return pd.DatetimeIndex(data=df['str_date']).values
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_arg(self):
def test_impl(A):
return A
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_datetime_getitem(self):
def test_impl(A):
return A[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
self.assertEqual(hpat_func(A), test_impl(A))
def test_ts_map(self):
def test_impl(A):
return A.map(lambda x: x.hour)
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date(self):
def test_impl(A):
return A.map(lambda x: x.date())[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date2(self):
def test_impl(df):
return df.apply(lambda row: row.dt_ind.date(), axis=1)[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_ts_map_date_set(self):
def test_impl(df):
df['hpat_date'] = df.dt_ind.map(lambda x: x.date())
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
hpat_func(df)
df['pd_date'] = df.dt_ind.map(lambda x: x.date())
np.testing.assert_array_equal(df['hpat_date'], df['pd_date'])
def test_date_series_unbox(self):
def test_impl(A):
return A[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series().map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_date_series_unbox2(self):
def test_impl(A):
return A[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_datetime_index_set(self):
def test_impl(df):
df['sdc'] = pd.DatetimeIndex(df['str_date']).values
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
hpat_func(df)
df['std'] = pd.DatetimeIndex(df['str_date'])
allequal = (df['std'].equals(df['sdc']))
self.assertTrue(allequal)
def test_timestamp(self):
def test_impl():
dt = datetime(2017, 4, 26)
ts = pd.Timestamp(dt)
return ts.day + ts.hour + ts.microsecond + ts.month + ts.nanosecond + ts.second + ts.year
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_extract(self):
def test_impl(s):
return s.month
hpat_func = sdc.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
month = hpat_func(ts)
self.assertEqual(month, 4)
def test_timestamp_date(self):
def test_impl(s):
return s.date()
hpat_func = sdc.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
self.assertEqual(hpat_func(ts), test_impl(ts))
def test_datetimeindex_str_comp(self):
def test_impl(df):
return (df.A >= '2011-10-23').values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = sdc.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetimeindex_str_comp2(self):
def test_impl(df):
return ('2011-10-23' <= df.A).values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = sdc.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_df(self):
def test_impl(df):
df = pd.DataFrame({'A': pd.DatetimeIndex(df['str_date'])})
return df.A
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_date(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).date
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_max(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).max()
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
self.assertEqual(hpat_func(df), test_impl(df))
def test_datetime_index_min(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).min()
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
self.assertEqual(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_days(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.days
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_seconds(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.seconds
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_microseconds(self):
def test_impl(df):
s = | pd.DatetimeIndex(df['str_date']) | pandas.DatetimeIndex |
import logging
from temporal_granularity.src.metrics.metrics import Metrics
from pandas.util.testing import assert_frame_equal
import pandas as pd
import sys
from pathlib import Path
project_dir = Path("__file__").resolve().parents[1]
sys.path.insert(0, '{}/temporal_granularity/'.format(project_dir))
logging.basicConfig(level=logging.DEBUG)
class Test_Metrics:
def test_all_nrmse(self):
original_solar = | pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]}) | pandas.DataFrame |
"""Tests for importing and exporting data to SBDF files."""
import datetime
import decimal
import os
import unittest
from time import perf_counter
import pandas
from spotfire import fast_sbdf, sbdf
def fast_slow_parity(filename):
start = perf_counter()
slow_df = sbdf.import_data(filename)
mid = perf_counter()
fast_df = fast_sbdf.import_data(filename)
end = perf_counter()
t1 = mid - start
t2 = end - mid
print(f"Slow read took {t1:.4f}s, fast read took {t2:.4f}s")
with pandas.option_context("display.max_columns", 30, "display.expand_frame_repr", False):
print(slow_df.head(15))
print(fast_df.head(15))
print(slow_df.dtypes)
print(fast_df.dtypes)
print()
print()
print(slow_df.eq(fast_df).all())
assert t2 < t1
equality_by_column = slow_df.eq(fast_df).all()
assert equality_by_column.all()
return slow_df
class SbdfTest(unittest.TestCase):
"""Unit tests for public functions in 'spotfire.sbdf' module."""
def test_read_0(self):
"""Reading simple SBDF files should work."""
dataframe = fast_slow_parity("%s/files/sbdf/0.sbdf" % os.path.dirname(__file__))
self.assertEqual(dataframe.shape, (0, 12))
def verify(dict_, pre, post):
"""Check all metadata entries for a given table/column."""
self.assertEqual(dict_["%sMetaBoolean%s" % (pre, post)][0], True)
self.assertEqual(dict_["%sMetaInteger%s" % (pre, post)][0], 3)
self.assertEqual(dict_["%sMetaLong%s" % (pre, post)][0], 2)
self.assertAlmostEqual(dict_["%sMetaFloat%s" % (pre, post)][0], 0.333333343267441)
self.assertEqual(dict_["%sMetaDouble%s" % (pre, post)][0], 3.14)
self.assertEqual(dict_["%sMetaDateTime%s" % (pre, post)][0], datetime.datetime(1583, 1, 1))
self.assertEqual(dict_["%sMetaDate%s" % (pre, post)][0], datetime.date(1583, 1, 1))
self.assertEqual(dict_["%sMetaTime%s" % (pre, post)][0], datetime.time(0, 0, 33))
self.assertEqual(dict_["%sMetaTimeSpan%s" % (pre, post)][0], datetime.timedelta(0, 12, 300000))
self.assertEqual(dict_["%sMetaString%s" % (pre, post)][0], "The")
self.assertEqual(dict_["%sMetaDecimal%s" % (pre, post)][0], decimal.Decimal('33.4455'))
self.assertEqual(dict_["%sMetaBinary%s" % (pre, post)][0], b"\x01")
# Check table metadata
verify(dataframe.spotfire_table_metadata, "SbdfTest.Table", "")
# Check column metadata
verify(dataframe["Boolean"].spotfire_column_metadata, "SbdfTest.Column", "0")
verify(dataframe["Integer"].spotfire_column_metadata, "SbdfTest.Column", "1")
verify(dataframe["Long"].spotfire_column_metadata, "SbdfTest.Column", "2")
verify(dataframe["Float"].spotfire_column_metadata, "SbdfTest.Column", "3")
verify(dataframe["Double"].spotfire_column_metadata, "SbdfTest.Column", "4")
verify(dataframe["DateTime"].spotfire_column_metadata, "SbdfTest.Column", "5")
verify(dataframe["Date"].spotfire_column_metadata, "SbdfTest.Column", "6")
verify(dataframe["Time"].spotfire_column_metadata, "SbdfTest.Column", "7")
verify(dataframe["TimeSpan"].spotfire_column_metadata, "SbdfTest.Column", "8")
verify(dataframe["String"].spotfire_column_metadata, "SbdfTest.Column", "9")
verify(dataframe["Decimal"].spotfire_column_metadata, "SbdfTest.Column", "10")
verify(dataframe["Binary"].spotfire_column_metadata, "SbdfTest.Column", "11")
def test_read_1(self):
"""Reading simple SBDF files should work."""
dataframe = fast_slow_parity("%s/files/sbdf/1.sbdf" % os.path.dirname(__file__))
self.assertEqual(dataframe.shape, (1, 12))
self.assertEqual(dataframe.at[0, "Boolean"], False)
self.assertEqual(dataframe.at[0, "Integer"], 69)
self.assertTrue(pandas.isnull(dataframe.at[0, "Long"]))
self.assertEqual(dataframe.at[0, "Float"], 12.)
self.assertEqual(dataframe.at[0, "Double"], 116.18)
self.assertTrue( | pandas.isnull(dataframe.at[0, "DateTime"]) | pandas.isnull |
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import random
from datetime import datetime, timedelta
import time
import re
import requests
from unit import *
# from rule import *
base_path_1 = "../dataset/"
base_path_2 = "../dataset/tmp/"
base_path_3 = "../output/"
def ensemble(file1, file2, file3, a):
df1 = pd.read_csv(file1, sep=',')
df1.index = df1['test_id']
df1 = df1.drop(["test_id"], axis=1)
df2 = pd.read_csv(file2, sep=',')
df2.index = df2['test_id']
df2 = df2.drop(["test_id"], axis=1)
# df2 = df2.sort_index()
df3 = df1 * a + df2 * (1 - a)
df3.to_csv(file3, index=True, sep=',')
def ensemble_three_file(file1, file2, file3, file4, a, b, c):
df1 = pd.read_csv(file1, sep=',')
df1.index = df1['test_id']
df1 = df1.drop(["test_id"], axis=1)
df2 = pd.read_csv(file2, sep=',')
df2.index = df2['test_id']
df2 = df2.drop(["test_id"], axis=1)
df3 = pd.read_csv(file3, sep=',')
df3.index = df3['test_id']
df3 = df3.drop(["test_id"], axis=1)
df4 = df1 * a + df2 * b + df3 * c
df4.to_csv(file4, index=True, sep=',')
#from model import test
'''
type_="0301-0531_0801-0410" , feature_first=True
type_="0301-0531_0801-0410" , feature_first=False
type_="2017_0101-2018_0410_less" , feature_first=False
ans_file = base_path_3 + "test/" + end_day + "-xgboost_weather" + type + "_" + str(feature_first) + ".csv"
'''
def cal_ensemble_best_xgboost():
score = np.zeros(6)
num = 0.0
for j in range(11, 28):
end_day = "2018-04-" + str(j)
file1 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file2 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file6 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file4 = base_path_3 + end_day + "xgboost_mean_ensemble.csv"
file5 = base_path_3 + end_day + "xgboost_median_ensemble.csv"
ensemble_median(file1, file2, file3, file6, file4, file5)
# ensemble_mean_3(file1, file2, file3, file4, file5)
score1, _, _ = test(end_day, file1)
score2, _, _ = test(end_day, file2)
score3, _, _ = test(end_day, file3)
score6, _, _ = test(end_day, file6)
score4, _, _ = test(end_day, file4)
score5, _, _ = test(end_day, file5)
score_now = np.array([score1, score2, score3, score6, score4, score5])
score += score_now
print ("score: ", score_now)
num += 1.0
avg_score = score / num
print ("avg_score: ", avg_score)
# a = 0.3
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# True) + ".csv"
# file2 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file3 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file4 = base_path_3 + "xgboost_ensemble.csv"
# ensemble(file2, file3, file4, a)
# score, _, _ = test(end_day, file4)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# best_params = [0.0, 0.0, 0.0]
# best_score = 2.0
# for i in range(11):
# a = i * 1.0 / 10.0
# for k in range(11):
# b = k * 1.0 / 10.0
# if a + b > 1.0:
# continue
# c = 1 - a - b
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# True) + ".csv"
# file2 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file3 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file4 = base_path_3 + "xgboost_ensemble.csv"
# ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
# score, _, _ = test(end_day, file4)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# if avg_score < best_score:
# best_params = [a, b, c]
# best_score = avg_score
# print best_params
'''
get_test(type="0301-0531_0801-0410", feature_first=False)
get_test(type="0301-0531_0801-0410", feature_first=True)
get_test(type="2017_0101-2018_0410", feature_first=True)
get_test(type="2017_0101-2018_0410_less", feature_first=False)
ans_file = base_path_3 + "test/" + end_day + "-ext_with_weather_" + type + "_" + str(feature_first) + ".csv"
'''
def cal_ensemble_best_ext_with_weather():
# for i in range(11):
a = 1.0
b = 0.0
c = 0.0
total_score = 0.0
num = 0.0
for j in range(11, 28):
end_day = "2018-04-" + str(j)
file1 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file3 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "ext_with_weather_ensemble.csv"
ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
score, _, _ = test(end_day, file4)
print ("score: ", score)
total_score += score
num += 1.0
avg_score = total_score / num
print ("avg_score: ", avg_score)
# best_params = [0.0, 0.0, 0.0]
# best_score = 2.0
# for i in range(11):
# a = i * 1.0 / 10.0
# for k in range(11):
# b = k*1.0/10.0
# if a + b > 1.0:
# continue
# c = 1.0 - a -b
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(False) + ".csv"
# file2 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(True) + ".csv"
# file3 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "2017_0101-2018_0410_less" + "_" + str(False) + ".csv"
# file4 = base_path_3 + "ext_with_weather_ensemble.csv"
# ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
# score, _, _ = test(end_day, file4)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# if avg_score < best_score:
# best_params = [a, b, b]
# best_score = avg_score
# print best_params
'''
get_test(type="0301-0531_0801-0410")
get_test(type="2017_0101-2018_0410_less")
get_test(type="2017_0101-2018_0410_test")
ans_file = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + type + ".csv"
'''
def cal_ensemble_best_ext_with_weather_three_metric():
# for i in range(11):
a = 1.0
b = 0.0
c = 0.0
total_score = 0.0
num = 0.0
for j in range(11, 28):
end_day = "2018-04-" + str(j)
file1 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "0301-0531_0801-0410" + ".csv"
file2 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_less" + ".csv"
file3 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_test" + ".csv"
file4 = base_path_3 + "ext_with_weather_three_metric_ensemble.csv"
ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
score, _, _ = test(end_day, file4)
print ("score: ", score)
total_score += score
num += 1.0
avg_score = total_score / num
print ("avg_score: ", avg_score)
# best_params = [0.0, 0.0, 0.0]
# best_score = 2.0
# for i in range(11):
# a = i * 1.0 / 10.0
# for k in range(11):
# b = k*1.0/10.0
# if a + b > 1.0:
# continue
# c = 1.0 - a -b
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "0301-0531_0801-0410" + ".csv"
# file2 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_less" + ".csv"
# file3 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_test" + ".csv"
# file4 = base_path_3 + "ext_with_weather_three_metric_ensemble.csv"
# ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
# score, _, _ = test(end_day, file4)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# if avg_score < best_score:
# best_params = [a, b, b]
# best_score = avg_score
# print best_params
def cal_ensemble_all():
a = 0.6
total_score = 0.0
total_score1 = 0.0
total_score2 = 0.0
num = 0.0
for j in range(11, 28):
end_day = "2018-04-" + str(j)
file1 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file3 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "ext_with_weather_ensemble.csv"
# ensemble_three_file(file1, file2, file3, file4=file4, a=1.0, b=0.0, c=0.0)
file5 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file6 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file7 = base_path_3 + "xgboost_ensemble.csv"
ensemble(file5, file6, file7, a=0.3)
file8 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "0301-0531_0801-0410" + ".csv"
file9 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_less" + ".csv"
file10 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_test" + ".csv"
file11 = base_path_3 + "ext_with_weather_three_metric_ensemble.csv"
# ensemble_three_file(file8, file9, file10, file4=file11, a=1.0, b=0.0, c=0.0)
file12 = base_path_3 + "ensemble_all_1.csv"
ensemble(file1, file7, file12, a=0.6)
score, _, _ = test(end_day, file12)
print ("score_1: ", score)
total_score1 += score
print ("after rule:", score)
file5 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file6 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file7 = base_path_3 + "xgboost_ensemble.csv"
ensemble(file5, file6, file7, a=0.2)
file13 = base_path_3 + "ensemble_all_2.csv"
ensemble(file1, file7, file13, a=0.7)
score, _, _ = test(end_day, file13)
total_score2 += score
print ("score_2: ", score)
file14 = base_path_3 + end_day + "_ensemble_zhoujie.csv"
ensemble(file12, file13, file14, a=0.6)
score, _, _ = test(end_day, file14)
total_score += score
print ("score: ", score)
num += 1.0
avg_score1 = total_score1 / num
avg_score2 = total_score2 / num
avg_score = total_score / num
print ("avg_score: ", avg_score1, avg_score2, avg_score)
# best_params = [0.0]
# best_score = 2.0
# for i in range(11):
# a = i * 1.0 / 10.0
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file2 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
# True) + ".csv"
# file3 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file4 = base_path_3 + "ext_with_weather_ensemble.csv"
# # ensemble_three_file(file1, file2, file3, file4=file4, a=1.0, b=0.0, c=0.0)
#
# # file5 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# # True) + ".csv"
# file5 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file6 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file7 = base_path_3 + "xgboost_ensemble.csv"
# ensemble(file5, file6, file7, a=0.3)
#
# file8 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "0301-0531_0801-0410" + ".csv"
# file9 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_less" + ".csv"
# file10 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_test" + ".csv"
# file11 = base_path_3 + "ext_with_weather_three_metric_ensemble.csv"
# # ensemble_three_file(file8, file9, file10, file4=file11, a=1.0, b=0.0, c=0.0)
#
# file12 = base_path_3 + "ensemble_all.csv"
# # ensemble_three_file(file4, file4, file11, file4=file12, a=a, b=b, c=c)
# # ensemble_three_file(file1, file7, file8, file4=file12, a=a, b=b, c=c)
# ensemble(file1, file7, file12, a=a)
# score, _, _ = test(end_day, file12)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# if avg_score < best_score:
# best_params = [a]
# best_score = avg_score
# print best_params
def get_ans_1(end_day, caiyun=False):
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file5 = base_path_3 + "" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file6 = base_path_3 + "" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file7 = base_path_3 + "xgboost_ensemble.csv"
else:
file1 = base_path_3 + "" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + "" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
True) + "_caiyun.csv"
file6 = base_path_3 + "" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file7 = base_path_3 + "xgboost_ensemble_caiyun.csv"
ensemble(file5, file6, file7, a=0.2)
if caiyun == False:
file12 = base_path_3 + end_day + "_ensemble_all_1.csv"
else:
file12 = base_path_3 + end_day + "_ensemble_all_1_caiyun.csv"
ensemble(file1, file7, file12, a=0.7)
def get_ans_2(end_day, caiyun=False):
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file5 = base_path_3 + "" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file6 = base_path_3 + "" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file7 = base_path_3 + end_day + "_xgboost_ensemble.csv"
else:
file1 = base_path_3 + "" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + "" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file6 = base_path_3 + "" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file7 = base_path_3 + end_day + "_xgboost_ensemble_caiyun.csv"
ensemble(file5, file6, file7, a=0.3)
if caiyun == False:
file12 = base_path_3 + end_day + "_ensemble_all_2.csv"
else:
file12 = base_path_3 + end_day + "_ensemble_all_2_caiyun.csv"
ensemble(file1, file7, file12, a=0.6)
def get_ans(end_day="2018-05-08", caiyun=False):
# get_ans_1(end_day, caiyun=caiyun)
# get_ans_2(end_day, caiyun=caiyun)
# if caiyun == False:
# file13 = base_path_3 + end_day + "_ensemble_all_1.csv"
# file14 = base_path_3 + end_day + "_ensemble_all_2.csv"
# file15 = base_path_3 + end_day + "_ensemble_all_zhoujie.csv"
# else:
# file13 = base_path_3 + end_day + "_ensemble_all_1_caiyun.csv"
# file14 = base_path_3 + end_day + "_ensemble_all_2_caiyun.csv"
# file15 = base_path_3 + end_day + "_ensemble_all_zhoujie_caiyun.csv"
# ensemble(file13, file14, file15, a=0.4)
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file5 = base_path_3 + end_day + "lightgbm_mean_ensemble.csv"
file6 = base_path_3 + end_day + "lightgbm_median_ensemble.csv"
else:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + end_day + "lightgbm_mean_ensemble_caiyun.csv"
file6 = base_path_3 + end_day + "lightgbm_median_ensemble_caiyun.csv"
ensemble_median(file1, file2, file3, file4, file5, file6)
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file5 = base_path_3 + end_day + "lightgbm_mean_ensemble_0429.csv"
file6 = base_path_3 + end_day + "lightgbm_median_ensemble_0429.csv"
else:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + end_day + "lightgbm_mean_ensemble_0429_caiyun.csv"
file6 = base_path_3 + end_day + "lightgbm_median_ensemble_0429_caiyun.csv"
ensemble_median(file1, file2, file3, file4, file5, file6)
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file5 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file6 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file7 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file8 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file9 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file10 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file11 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file12 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file13 = base_path_3 + end_day + "lightgbm_mean_ensemble.csv"
file14 = base_path_3 + end_day + "lightgbm_median_ensemble.csv"
else:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file6 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file7 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file8 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file9 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file10 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file11 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file12 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file13 = base_path_3 + end_day + "lightgbm_mean_ensemble_caiyun.csv"
file14 = base_path_3 + end_day + "lightgbm_median_ensemble_caiyun.csv"
ensemble_medians([file1, file2, file5, file6], file13, file14)
if caiyun == False:
file24 = base_path_3 + "" + end_day + "_weight_mean_0410.csv"
file25 = base_path_3 + "" + end_day + "_weight_mean_0429.csv"
file26 = base_path_3 + "" + end_day + "_weight_mean_0410_0429.csv"
else:
file24 = base_path_3 + "" + end_day + "_weight_mean_0410_caiyun.csv"
file25 = base_path_3 + "" + end_day + "_weight_mean_0429_caiyun.csv"
file26 = base_path_3 + "" + end_day + "_weight_mean_0410_0429_caiyun.csv"
ensemble_medians_with_weight([file5, file7, file8], [0.6, 0.4, 0.0], file24)
ensemble_medians_with_weight([file9, file11, file12], [0.5, 0.3, 0.2], file25)
ensemble_medians_with_weight([file13, file14], [0.4, 0.6], file26)
if caiyun == False:
file27 = base_path_3 + "" + end_day + "_mean_0410.csv"
file28 = base_path_3 + "" + end_day + "_median_0410.csv"
else:
file27 = base_path_3 + "" + end_day + "_mean_0410_caiyun.csv"
file28 = base_path_3 + "" + end_day + "_median_0410_caiyun.csv"
ensemble_medians([file5, file7, file8], file27, file28)
if caiyun == False:
file15 = base_path_3 + end_day + "lightgbm_mean_ensemble_6.csv"
file16 = base_path_3 + end_day + "lightgbm_median_ensemble_6.csv"
else:
file15 = base_path_3 + end_day + "lightgbm_mean_ensemble_6_caiyun.csv"
file16 = base_path_3 + end_day + "lightgbm_median_ensemble_6_caiyun.csv"
ensemble_medians([file1, file2, file3, file4, file5, file6, file7, file8], file15, file16)
if caiyun == False:
file17 = base_path_3 + end_day + "lightgbm_mean_ensemble_29_6.csv"
file18 = base_path_3 + end_day + "lightgbm_median_ensemble_29_6.csv"
else:
file17 = base_path_3 + end_day + "lightgbm_mean_ensemble_29_6_caiyun.csv"
file18 = base_path_3 + end_day + "lightgbm_median_ensemble_29_6_caiyun.csv"
ensemble_medians([file1, file2, file3, file4, file9, file10, file11, file12], file17, file18)
if caiyun == False:
file19 = base_path_3 + end_day + "lightgbm_ensemble_mean_4.csv"
file20 = base_path_3 + end_day + "lightgbm_ensemble_median_4.csv"
else:
file19 = base_path_3 + end_day + "lightgbm_ensemble_mean_4_caiyun.csv"
file20 = base_path_3 + end_day + "lightgbm_ensemble_median_4_caiyun.csv"
ensemble_medians([file5, file7, file9, file11], file19, file20)
if caiyun == False:
file21 = base_path_3 + end_day + "lightgbm_ensemble_mean_2.csv"
file22 = base_path_3 + end_day + "lightgbm_ensemble_median_2.csv"
else:
file21 = base_path_3 + end_day + "lightgbm_ensemble_mean_2_caiyun.csv"
file22 = base_path_3 + end_day + "lightgbm_ensemble_median_2_caiyun.csv"
ensemble_medians([file9, file11], file21, file22)
if caiyun == False:
file23 = base_path_3 + end_day + "lightgbm_ensemble_mean_weight.csv"
else:
file23 = base_path_3 + end_day + "lightgbm_ensemble_mean_weight_caiyun.csv"
ensemble_medians_with_weight([file12, file8], [0.3, 0.7], file23)
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + ".csv"
file24 = base_path_3 + end_day + "lightgbm_ensemble_mean__clean_4.csv"
file25 = base_path_3 + end_day + "lightgbm_ensemble_median_clean_4.csv"
else:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + "_caiyun.csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + "_caiyun.csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + "_caiyun.csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + "_caiyun.csv"
file24 = base_path_3 + end_day + "lightgbm_ensemble_mean__clean_4_caiyun.csv"
file25 = base_path_3 + end_day + "lightgbm_ensemble_median_clean_4_caiyun.csv"
# ensemble_medians([file1, file2, file3, file4], file24, file25)
if caiyun == False:
file26 = base_path_3 + end_day + "lightgbm_ensemble_mean_clean_2.csv"
file27 = base_path_3 + end_day + "lightgbm_ensemble_median_clean_2.csv"
else:
file26 = base_path_3 + end_day + "lightgbm_ensemble_mean_clean_2_caiyun.csv"
file27 = base_path_3 + end_day + "lightgbm_ensemble_median_clean_2_caiyun.csv"
# ensemble_medians([file1, file3], file26, file27)
# file19 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file20 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file21 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "3" + "_" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# # file22 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "4" + "_" + "0301-0531_0801-0410" + "_" + str(
# # False) + ".csv"
# file23 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file24 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file25 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "3" + "_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file26 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "4" + "_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file27 = base_path_3 + "" + end_day + "-lightgbm_log_mean_ensemble.csv"
# file28 = base_path_3 + "" + end_day + "-lightgbm_log_median_ensemble.csv"
# ensemble_medians(
# [file1, file2, file3, file4, file5, file6, file7, file8, file19, file20, file21, file23, file24, file25,
# file26], file27, file28)
def get_ans_latter(end_day="2018-05-11", caiyun=False):
get_ans_1(end_day, caiyun=caiyun)
get_ans_2(end_day, caiyun=caiyun)
if caiyun == False:
file13 = base_path_3 + end_day + "_ensemble_all_1.csv"
file14 = base_path_3 + end_day + "_ensemble_all_2.csv"
file15 = base_path_3 + end_day + "_ensemble_all_zhoujie.csv"
else:
file13 = base_path_3 + end_day + "_ensemble_all_1_caiyun.csv"
file14 = base_path_3 + end_day + "_ensemble_all_2_caiyun.csv"
file15 = base_path_3 + end_day + "_ensemble_all_zhoujie_caiyun.csv"
ensemble(file13, file14, file15, a=0.4)
def ensemble_mean():
data_base = "../output/"
df1 = pd.read_csv(data_base + 'friend/sub20180502_060127.csv')
df2 = pd.read_csv(data_base + '2018-05-01-ext_with_weather_0301-0531_0801-0410_False.csv')
df3 = pd.read_csv(data_base + '2018-05-01-xgboost_weather0301-0531_0801-0410_False.csv')
df4 = pd.read_csv(data_base + 'friend/res2018-05-01.csv')
df1.columns = ['test_id', 'PM2.5_df1', 'PM10_df1', 'O3_df1']
df2.columns = ['test_id', 'PM2.5_df2', 'PM10_df2', 'O3_df2']
df3.columns = ['test_id', 'PM2.5_df3', 'PM10_df3', 'O3_df3']
df4.columns = ['test_id', 'PM2.5_df4', 'PM10_df4', 'O3_df4']
df = df1
df = pd.merge(df, df2, on='test_id', how='left')
df = pd.merge(df, df3, on='test_id', how='left')
df = pd.merge(df, df4, on='test_id', how='left')
# df.columns
def en_mean(x):
return np.mean([x[0], x[1], x[2], x[3]])
def en_median(x):
return np.median([x[0], x[1], x[2], x[3]])
df['PM2.5'] = df[['PM2.5_df1', 'PM2.5_df2', 'PM2.5_df3', 'PM2.5_df4']].apply(lambda x: en_mean(x), axis=1)
df['PM10'] = df[['PM10_df1', 'PM10_df2', 'PM10_df3', 'PM10_df4']].apply(lambda x: en_mean(x), axis=1)
df['O3'] = df[['O3_df1', 'O3_df2', 'O3_df3', 'O3_df4']].apply(lambda x: en_mean(x), axis=1)
df[['test_id', 'PM2.5', 'PM10', 'O3']].to_csv(data_base + 'four_result_mean0501.csv', index=False)
df['PM2.5'] = df[['PM2.5_df1', 'PM2.5_df2', 'PM2.5_df3', 'PM2.5_df4']].apply(lambda x: en_median(x), axis=1)
df['PM10'] = df[['PM10_df1', 'PM10_df2', 'PM10_df3', 'PM10_df4']].apply(lambda x: en_median(x), axis=1)
df['O3'] = df[['O3_df1', 'O3_df2', 'O3_df3', 'O3_df4']].apply(lambda x: en_median(x), axis=1)
df[['test_id', 'PM2.5', 'PM10', 'O3']].to_csv(data_base + 'four_result_median0501.csv', index=False)
def ensemble_mean_2():
data_base = "../output/"
df1 = pd.read_csv(data_base + 'friend/sub20180502_060127.csv')
df2 = pd.read_csv(data_base + '2018-05-01-ext_with_weather_0301-0531_0801-0410_False.csv')
df3 = pd.read_csv(data_base + '2018-05-01-xgboost_weather0301-0531_0801-0410_False.csv')
df4 = pd.read_csv(data_base + 'friend/res2018-05-01.csv')
df1.columns = ['test_id', 'PM2.5_df1', 'PM10_df1', 'O3_df1']
df2.columns = ['test_id', 'PM2.5_df2', 'PM10_df2', 'O3_df2']
df3.columns = ['test_id', 'PM2.5_df3', 'PM10_df3', 'O3_df3']
df4.columns = ['test_id', 'PM2.5_df4', 'PM10_df4', 'O3_df4']
df = df1
df = pd.merge(df, df2, on='test_id', how='left')
df = pd.merge(df, df3, on='test_id', how='left')
df = pd.merge(df, df4, on='test_id', how='left')
# df.columns
def en_mean(x):
return np.mean([x[0], x[1], x[2]])
def en_median(x):
return np.median([x[0], x[1], x[2]])
df['PM2.5'] = df[['PM2.5_df1', 'PM2.5_df2', 'PM2.5_df3']].apply(lambda x: en_mean(x), axis=1)
df['PM10'] = df[['PM10_df1', 'PM10_df2', 'PM10_df3']].apply(lambda x: en_mean(x), axis=1)
df['O3'] = df[['O3_df1', 'O3_df2', 'O3_df3']].apply(lambda x: en_mean(x), axis=1)
df[['test_id', 'PM2.5', 'PM10', 'O3']].to_csv(data_base + 'three_result_mean0501.csv', index=False)
df['PM2.5'] = df[['PM2.5_df1', 'PM2.5_df2', 'PM2.5_df3']].apply(lambda x: en_median(x), axis=1)
df['PM10'] = df[['PM10_df1', 'PM10_df2', 'PM10_df3']].apply(lambda x: en_median(x), axis=1)
df['O3'] = df[['O3_df1', 'O3_df2', 'O3_df3']].apply(lambda x: en_median(x), axis=1)
df[['test_id', 'PM2.5', 'PM10', 'O3']].to_csv(data_base + 'three_result_median0501.csv', index=False)
def ensemble_mean_3(file1, file2, file3, fileto1, fileto2):
data_base = "../output/"
df1 = pd.read_csv(file1)
df2 = | pd.read_csv(file2) | pandas.read_csv |
# Import standard libraries
import sys
sys.path.append('C:/Users/rohan/Documents/Projects/Food_Demand_Forecasting_Challenge/Food_Demand_Forecasting_Challenge')
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
def features_by_center(df):
"""
Compute the mean values of Checkout Price (CP) and discount (D) for all meal_ids within a week--center_id combination
"""
df_ep = df.loc[df['emailer_for_promotion'] == 1, :]
df_hf = df.loc[df['homepage_featured'] == 1, :]
for data_set in [df, df_ep, df_hf]:
for data_cut in [['week','center_id'],
['week','center_id','category'],
['week','center_id','cuisine']]:
data_cut_name = "_".join(data_cut)
for col in ['checkout_price','discount']:
gdf = data_set.groupby(data_cut)[col].mean().reset_index()
if data_set.equals(df_ep):
# Set column names
gdf.columns = data_cut + [data_cut_name + '_ep_mean_' + col]
# Merge Mean with original dataset
df = pd.merge(df, gdf, on=data_cut, how='left')
# Compute difference between originl value and Mean
df[col + '_minus_' + data_cut_name + '_ep_mean_' + col] = (df[col] - df[data_cut_name + '_ep_mean_' + col])
# Check if original value is greater than Mean
df[col + '_gt_' + data_cut_name + '_ep_mean_' + col] = (df[col] > df[data_cut_name + '_ep_mean_' + col]).astype(int)
# Drop the Mean
df = df.drop([data_cut_name + '_ep_mean_' + col], axis=1)
elif data_set.equals(df_hf):
# Set column names
gdf.columns = data_cut + [data_cut_name + '_hf_mean_' + col]
# Merge Mean with original dataset
df = | pd.merge(df, gdf, on=data_cut, how='left') | pandas.merge |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = | pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
import openpyxl
import pandas as pd
from datetime import datetime, timedelta
import xlsxwriter
now = datetime.now()
date_time = now.strftime("%m_%d_%Y %I_%M_%p")
federal_tax_rate_path = "./federaltaxrates.csv"
state_tax_rate_path = "./statetaxrates.csv"
city_tax_rate_path = "./NYCtaxrates.csv"
# calculate social security tax
class EffectiveFederalTax:
def __init__(self, salary, marital_status):
self.salary = salary
self.marital_status = marital_status
def calculateSocialSecurityTaxDue(self):
if self.salary >= 147000:
return 9114
else:
return round(self.salary * 0.062, 2)
# calculate federal income tax + remainder of fica (medicare) for single filers
class EffectiveFederalTaxSingle(EffectiveFederalTax):
def __init__(self, salary, deductions):
super().__init__(salary, "single")
self.deductions = deductions
def calculateFederalIncomeTaxDue(self):
federal_tax_rate_table = | pd.read_csv(federal_tax_rate_path) | pandas.read_csv |
import numpy
from pandas import Series
from pandas.util.testing import assert_series_equal
from prepkit.processor.unit.isnull import Isnull
from ..._helper import array_uint8
def test_process():
processor = Isnull()
target = | Series([0, '', None, numpy.nan]) | pandas.Series |
# Python infrastrukturelementer
import subprocess
import sys
from typing import Dict, List, Set, Tuple, IO
from enum import IntEnum
# Kommandolinje- og databasehåndtering
import click
import fire.cli
from fire.cli import firedb
from fire.api.model import (
# Typingelementer fra databaseAPIet:
Koordinat,
Punkt,
PunktInformation,
PunktInformationType,
Sag,
Sagsevent,
Sagsinfo,
Srid,
)
from sqlalchemy.orm import aliased
from sqlalchemy.orm.exc import NoResultFound
# Beregning
import numpy as np
import statsmodels.api as sm
from math import sqrt
from pyproj import Proj
from scipy import stats
# Datahåndtering
import pandas as pd
import xlsxwriter
import xmltodict
from datetime import datetime
# ------------------------------------------------------------------------------
@click.group()
def mtl():
"""Motoriseret trigonometrisk nivellement: Arbejdsflow, beregning og analyse"""
pass
# ------------------------------------------------------------------------------
def get_observation_strings(
filinfo: List[Tuple[str, float]], verbose: bool = False
) -> List[str]:
"""Pil observationsstrengene ud fra en række råfiler"""
kol = IntEnum(
"kol",
"fra til dato tid L dH journal T setups sky sol vind sigt kommentar",
start=0,
)
observationer = list()
for fil in filinfo:
filnavn = fil[0]
spredning = fil[1]
if verbose:
print("Læser " + filnavn + " med spredning ", spredning)
try:
with open(filnavn, "rt", encoding="utf-8") as obsfil:
for line in obsfil:
if "#" != line[0]:
continue
line = line.lstrip("#").strip()
# Check at observationen er i et af de kendte formater
tokens = line.split(" ", 13)
assert len(tokens) in (9, 13, 14), (
"Malform input line: " + line + " i fil: " + filnavn
)
# Bring observationen på kanonisk 14-feltform.
for i in range(len(tokens), 13):
tokens.append(0)
if len(tokens) < 14:
tokens.append('""')
tokens[13] = tokens[13].lstrip('"').strip().rstrip('"')
# Korriger de rædsomme dato/tidsformater
tid = " ".join((tokens[kol.dato], tokens[kol.tid]))
try:
isotid = datetime.strptime(tid, "%d.%m.%Y %H.%M")
except ValueError:
sys.exit(
"Argh - ikke-understøttet datoformat: '"
+ tid
+ "' i fil: "
+ filnavn
)
# Reorganiser søjler og omsæt numeriske data fra strengrepræsentation til tal
reordered = [
tokens[kol.journal],
tokens[kol.fra],
tokens[kol.til],
float(tokens[kol.dH]),
float(tokens[kol.L]),
int(tokens[kol.setups]),
spredning,
tokens[kol.kommentar],
isotid,
float(tokens[kol.T]),
int(tokens[kol.sky]),
int(tokens[kol.sol]),
int(tokens[kol.vind]),
int(tokens[kol.sigt]),
filnavn,
]
observationer.append(reordered)
except FileNotFoundError:
print("Kunne ikke læse filen '" + filnavn + "'")
return observationer
# ------------------------------------------------------------------------------
def punkt_information(ident: str) -> PunktInformation:
"""Find alle informationer for et fikspunkt"""
pi = aliased(PunktInformation)
pit = aliased(PunktInformationType)
try:
punktinfo = (
firedb.session.query(pi)
.filter(pit.name.startswith("IDENT:"), pi.tekst == ident)
.first()
)
except NoResultFound:
fire.cli.print(f"Error! {ident} not found!", fg="red", err=True)
sys.exit(1)
if punktinfo is not None:
fire.cli.print(f"Fandt {ident}", fg="green", err=False)
else:
fire.cli.print(f"Fandt ikke {ident}", fg="cyan", err=False)
return punktinfo
# ------------------------------------------------------------------------------
def punkt_kote(punktinfo: PunktInformation, koteid: int) -> Koordinat:
"""Find aktuelle koordinatværdi for koordinattypen koteid"""
if punktinfo is None:
return None
for koord in punktinfo.punkt.koordinater:
if koord.sridid != koteid:
continue
if koord.registreringtil is None:
return koord
return None
# ------------------------------------------------------------------------------
def punkt_geometri(punktinfo: PunktInformation, ident: str) -> Tuple[float, float]:
"""Find placeringskoordinat for punkt"""
if punktinfo is None:
return (11, 56)
try:
geom = firedb.hent_geometri_objekt(punktinfo.punktid)
# Turn the string "POINT (lon lat)" into the tuple "(lon, lat)"
geo = eval(str(geom.geometri).lstrip("POINT").strip().replace(" ", ","))
# TODO: Perhaps just return (56,11) Kattegat pain instead
assert len(geo) == 2, "Bad geometry format: " + str(geom.geometri)
except NoResultFound:
fire.cli.print(f"Error! Geometry for {ident} not found!", fg="red", err=True)
sys.exit(1)
return geo
# ------------------------------------------------------------------------------
# TODO: Bør nok være en del af API
# ------------------------------------------------------------------------------
def hent_sridid(db, srid: str) -> int:
srider = db.hent_srider()
for s in srider:
if s.name == srid:
return s.sridid
# TODO: kast en undtagelse (throw an exception)
return 0
# ------------------------------------------------------------------------------
def find_path(graph: Dict[str, Set[str]], start: str, end: str, path=[]):
"""
Mikroskopisk backtracking netkonnektivitetstest. Baseret på et
essay af GvR fra https://www.python.org/doc/essays/graphs/, men
her moderniseret fra Python 1.5 til 3.7 og modificeret til
at arbejde på dict-over-set (originalen brugte dict-over-list)
"""
path = path + [start]
if start == origin:
return path
if start not in graph:
return None
for node in graph[start]:
if node not in path:
newpath = path_to_origin(graph, node, origin, path)
if newpath:
return newpath
return None
# ------------------------------------------------------------------------------
# Eksempel:
#
# graph = {
# 'A': {'B', 'C'},
# 'B': {'C', 'D'},
# 'C': {'D'},
# 'D': {'C'},
# 'E': {'F'},
# 'F': {'C'},
# 'G': {}
# }
#
# print (path_to_origin (graph, 'A', 'C'))
# print (path_to_origin (graph, 'A', 'G'))
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def find_nyetablerede(projektnavn):
"""Opbyg oversigt over nyetablerede punkter"""
print("Finder nyetablerede punkter")
try:
nyetablerede = pd.read_excel(
projektnavn + ".xlsx",
sheet_name="Nyetablerede punkter",
usecols="A:E",
dtype={
"Foreløbigt navn": np.object,
"Endeligt navn": np.object,
"φ": np.float64,
"λ": np.float64,
"Foreløbig kote": np.float64,
},
)
except:
nyetablerede = pd.DataFrame(
columns=["Foreløbigt navn", "Endeligt navn", "φ", "λ", "Foreløbig kote"],
)
assert nyetablerede.shape[0] == 0, "Forventede tom dataframe"
# Sæt 'Foreløbigt navn'-søjlen som index, så vi kan adressere
# som nyetablerede.at[punktnavn, elementnavn]
return nyetablerede.set_index("Foreløbigt navn")
# ------------------------------------------------------------------------------
def find_inputfiler(navn)-> List[Tuple[str, float]]:
"""Opbyg oversigt over alle input-filnavne og deres tilhørende spredning"""
try:
inputfiler = pd.read_excel(
navn + ".xlsx", sheet_name="Filoversigt", usecols="C:E"
)
except:
sys.exit("Kan ikke finde filoversigt i projektfil")
inputfiler = inputfiler[inputfiler["Filnavn"].notnull()] # Fjern blanklinjer
filnavne = inputfiler["Filnavn"]
spredning = inputfiler["σ"]
assert len(filnavne) > 0, "Ingen inputfiler anført"
return list(zip(filnavne, spredning))
# ------------------------------------------------------------------------------
def importer_observationer():
"""Opbyg dataframe med observationer importeret fra rådatafil"""
print("Importerer observationer")
observationer = pd.DataFrame(
get_observation_strings(find_inputfiler()),
columns=[
"journal",
"fra",
"til",
"dH",
"L",
"opst",
"σ",
"kommentar",
"hvornår",
"T",
"sky",
"sol",
"vind",
"sigt",
"kilde",
],
)
return observationer.sort_values(by="journal").set_index("journal").reset_index()
# ------------------------------------------------------------------------------
def find_observationer(navn):
"""Opbyg dataframe med allerede importerede observationer"""
print("Læser observationer")
try:
observationer = pd.read_excel(
navn + ".xlsx", sheet_name="Observationer", usecols="A:P"
)
except:
observationer = importer_observationer()
return observationer
# ------------------------------------------------------------------------------
def opbyg_punktoversigt(navn, nyetablerede, alle_punkter, nye_punkter):
# Læs den foreløbige punktoversigt, for at kunne se om der skal gås i databasen
try:
punktoversigt = pd.read_excel(
navn + ".xlsx", sheet_name="Punktoversigt", usecols="A:L"
)
except:
punktoversigt = pd.DataFrame(
columns=[
"punkt",
"fix",
"upub",
"år",
"kote",
"σ",
"ny",
"ny σ",
"Δ",
"kommentar",
"φ",
"λ",
]
)
assert punktoversigt.shape[0] == 0, "Forventede tom dataframe"
print("Opbygger punktoversigt")
# Find og tilføj de punkter, der mangler i punktoversigten.
manglende_punkter = set(alle_punkter) - set(punktoversigt["punkt"])
pkt = list(punktoversigt["punkt"]) + list(manglende_punkter)
# Forlæng punktoversigt, så der er plads til alle punkter
punktoversigt = punktoversigt.reindex(range(len(pkt)))
punktoversigt["punkt"] = pkt
# Geninstaller 'punkt'-søjlen som indexsøjle
punktoversigt = punktoversigt.set_index("punkt")
# Hent kote og placering fra databasen hvis vi ikke allerede har den
print("Checker for manglende kote og placering")
koteid = np.nan
for punkt in alle_punkter:
if not pd.isna(punktoversigt.at[punkt, "kote"]):
continue
if punkt in nye_punkter:
continue
# Vi undgår tilgang til databasen hvis vi allerede har alle koter
# ved først at hente koteid når vi ved vi har brug for den
if np.isnan(koteid):
koteid = hent_sridid(firedb, "EPSG:5799")
# TODO: Klar det med try:..except i stedet
assert koteid != 0, "DVR90 (EPSG:5799) ikke fundet i srid-tabel"
info = punkt_information(punkt)
kote = punkt_kote(info, koteid)
if kote is not None:
punktoversigt.at[punkt, "kote"] = kote.z
punktoversigt.at[punkt, "σ"] = kote.sz
punktoversigt.at[punkt, "år"] = kote.registreringfra.year
geom = punkt_geometri(info, punkt)
if pd.isna(punktoversigt.at[punkt, "φ"]):
punktoversigt.at[punkt, "φ"] = geom[1]
punktoversigt.at[punkt, "λ"] = geom[0]
# Nyetablerede punkter er ikke i databasen, så hent eventuelle manglende
# koter og placeringskoordinater i fanebladet 'Nyetablerede punkter'
for punkt in nye_punkter:
if pd.isna(punktoversigt.at[punkt, "kote"]):
punktoversigt.at[punkt, "kote"] = nyetablerede.at[punkt, "<NAME>"]
if pd.isna(punktoversigt.at[punkt, "φ"]):
punktoversigt.at[punkt, "φ"] = nyetablerede.at[punkt, "φ"]
if pd.isna(punktoversigt.at[punkt, "λ"]):
punktoversigt.at[punkt, "λ"] = nyetablerede.at[punkt, "λ"]
# Check op på placeringskoordinaterne. Hvis nogle ligner UTM, så regner vi
# om til geografiske koordinater. NaN og 0 flyttes ud i Kattegat, så man kan
# få øje på dem
utm32 = Proj("proj=utm zone=32 ellps=GRS80", preserve_units=False)
assert utm32 is not None, "Kan ikke initialisere projektionselelement utm32"
for punkt in alle_punkter:
phi = punktoversigt.at[punkt, "φ"]
lam = punktoversigt.at[punkt, "λ"]
if pd.isna(phi) or | pd.isna(lam) | pandas.isna |
import sys
from pathlib import Path
from datetime import datetime
from time import sleep
import pandas as pd
from meteostat import Daily, Point
sys.path.append(str(Path.cwd()))
from pipeline_config import root_dir # noqa: E402
from pipeline_logger import logger # noqa: E402
from utils import get_module_purpose, read_args, write_ff_csv # noqa: E402
def collect_daily_weather_conditions(
game_date: str, latitude: float, longitude: float
) -> pd.DataFrame:
game_date = datetime.strptime(game_date, "%Y-%m-%d")
point = Point(lat=latitude, lon=longitude)
game_day_weather_df = (
Daily(point, start=game_date, end=game_date).fetch().reset_index()
)
return game_day_weather_df
def collect_weather(
calendar_df: pd.DataFrame, stadium_df: pd.DataFrame
) -> pd.DataFrame:
game_location_fields = ["date", "team", "opp", "stadium_name", "roof_type"]
game_weather_df = pd.DataFrame(columns=game_location_fields)
for row in calendar_df.itertuples(index=False):
logger.info(f"collecting data for {row}")
game_date, team, opp, is_away = (row.date, row.team, row.opp, row.is_away)
if is_away == 1:
home_team = opp
else:
home_team = team
game_location = stadium_df[stadium_df["team"] == home_team]
if game_location.empty:
raise Exception(f"No stadium found for {home_team}")
stadium_name, roof_type, lon, lat = (
game_location[["stadium_name", "roof_type", "longitude", "latitude"]]
.iloc[0]
.tolist()
)
game_location_data = [row.date, row.team, row.opp, stadium_name, roof_type]
if roof_type in ["Indoor", "Retractable"]:
game_day_weather_df = pd.DataFrame(
[game_location_data], columns=game_location_fields
)
else:
game_day_weather_df = collect_daily_weather_conditions(
game_date=game_date, latitude=lat, longitude=lon
)
game_day_weather_df = game_day_weather_df.drop(columns="time")
game_day_weather_df = pd.DataFrame(
[game_location_data + game_day_weather_df.iloc[0].tolist()],
columns=game_location_fields + game_day_weather_df.columns.tolist(),
)
game_weather_df = pd.concat([game_weather_df, game_day_weather_df])
sleep(2)
return game_weather_df
if __name__ == "__main__":
args = read_args()
dir_type, data_type = get_module_purpose(module_path=__file__)
processed_calendar_path = (
root_dir
/ "staging_datasets"
/ "season"
/ str(args.season_year)
/ "processed"
/ "calendar"
/ "calendar.csv"
)
calendar_df = pd.read_csv(processed_calendar_path)
stadium_df = | pd.read_csv(root_dir / "staging_datasets" / "stadiums.csv") | pandas.read_csv |
import sys
from collections import defaultdict
import pandas
from crawlplot import CrawlPlot, PLOTDIR
from crawlstats import CST, MonthlyCrawl, MultiCount
from top_level_domain import TopLevelDomain
from stats.tld_alexa_top_1m import alexa_top_1m_tlds
from stats.tld_cisco_umbrella_top_1m import cisco_umbrella_top_1m_tlds
from stats.tld_majestic_top_1m import majestic_top_1m_tlds
# min. share of URLs for a TLD to be shown in metrics
min_urls_percentage = .05
class TldStats(CrawlPlot):
def __init__(self):
self.tlds = defaultdict(dict)
self.tld_stats = defaultdict(dict)
self.N = 0
def add(self, key, val):
cst = CST[key[0]]
if cst != CST.tld:
return
tld = key[1]
crawl = key[2]
self.tlds[tld][crawl] = val
def transform_data(self):
crawl_has_host_domain_counts = {}
for tld in self.tlds:
tld_repr = tld
tld_obj = None
if tld in ('', '(ip address)'):
continue
else:
try:
tld_obj = TopLevelDomain(tld)
tld_repr = tld_obj.tld
except:
print('error', tld)
continue
for crawl in self.tlds[tld]:
self.tld_stats['suffix'][self.N] = tld_repr
self.tld_stats['crawl'][self.N] = crawl
date = pandas.Timestamp(MonthlyCrawl.date_of(crawl))
self.tld_stats['date'][self.N] = date
if tld_obj:
self.tld_stats['type'][self.N] \
= TopLevelDomain.short_type(tld_obj.tld_type)
self.tld_stats['subtype'][self.N] = tld_obj.sub_type
self.tld_stats['tld'][self.N] = tld_obj.first_level
else:
self.tld_stats['type'][self.N] = ''
self.tld_stats['subtype'][self.N] = ''
self.tld_stats['tld'][self.N] = ''
value = self.tlds[tld][crawl]
n_pages = MultiCount.get_count(0, value)
self.tld_stats['pages'][self.N] = n_pages
n_urls = MultiCount.get_count(1, value)
self.tld_stats['urls'][self.N] = n_urls
n_hosts = MultiCount.get_count(2, value)
self.tld_stats['hosts'][self.N] = n_hosts
n_domains = MultiCount.get_count(3, value)
self.tld_stats['domains'][self.N] = n_domains
if n_urls != n_hosts:
# multi counts including host counts are not (yet)
# available for all crawls
crawl_has_host_domain_counts[crawl] = True
elif crawl not in crawl_has_host_domain_counts:
crawl_has_host_domain_counts[crawl] = False
self.N += 1
for crawl in crawl_has_host_domain_counts:
if not crawl_has_host_domain_counts[crawl]:
print('No host and domain counts for', crawl)
for n in self.tld_stats['crawl']:
if self.tld_stats['crawl'][n] == crawl:
del(self.tld_stats['hosts'][n])
del(self.tld_stats['domains'][n])
self.tld_stats = pandas.DataFrame(self.tld_stats)
@staticmethod
def field_percentage_formatter(precision=2, nan='-'):
f = '{0:,.' + str(precision) + 'f}'
return lambda x: nan if pandas.isna(x) else f.format(x)
def save_data(self):
self.tld_stats.to_csv('data/tlds.csv')
def percent_agg(self, data, columns, index, values, aggregate):
data = data[[columns, index, values]]
data = data.groupby([columns, index]).agg(aggregate)
data = data.groupby(level=0).apply(lambda x: 100.0*x/float(x.sum()))
# print("\n-----\n")
# print(data.to_string(formatters={'urls': TldStats.field_percentage_formatter()}))
return data
def pivot_percentage(self, data, columns, index, values, aggregate):
data = self.percent_agg(data, columns, index, values, aggregate)
return data.reset_index().pivot(index=index,
columns=columns, values=values)
def plot_groups(self):
title = 'Groups of Top-Level Domains'
ylabel = 'URLs %'
clabel = ''
img_file = 'tld/groups.png'
data = self.pivot_percentage(self.tld_stats, 'crawl', 'type',
'urls', {'urls': 'sum'})
data = data.transpose()
print("\n-----\n")
types = set(self.tld_stats['type'].tolist())
formatters = {c: TldStats.field_percentage_formatter() for c in types}
print(data.to_string(formatters=formatters))
data.to_html('{}/tld/groups-percentage.html'.format(PLOTDIR),
formatters=formatters,
classes=['tablesorter', 'tablepercentage'])
data = self.percent_agg(self.tld_stats, 'date', 'type',
'urls', {'urls': 'sum'}).reset_index()
return self.line_plot(data, title, ylabel, img_file,
x='date', y='urls', c='type', clabel=clabel)
def plot(self, crawls, latest_crawl):
field_formatters = {c: '{:,.0f}'.format
for c in ['pages', 'urls', 'hosts', 'domains']}
for c in ['%urls', '%hosts', '%domains']:
field_formatters[c] = TldStats.field_percentage_formatter()
data = self.tld_stats
data = data[data['crawl'].isin(crawls)]
crawl_data = data
top_tlds = []
# stats per crawl
for crawl in crawls:
print("\n-----\n{}\n".format(crawl))
for aggr_type in ('type', 'tld'):
data = crawl_data
data = data[data['crawl'].isin([crawl])]
data = data.set_index([aggr_type], drop=False)
data = data.sum(level=aggr_type).sort_values(
by=['urls'], ascending=False)
for count in ('urls', 'hosts', 'domains'):
data['%'+count] = 100.0 * data[count] / data[count].sum()
if aggr_type == 'tld':
# skip less frequent TLDs
data = data[data['%urls'] >= min_urls_percentage]
for tld in data.index.values:
top_tlds.append(tld)
print(data.to_string(formatters=field_formatters))
print()
if crawl == latest_crawl:
# latest crawl by convention
type_name = aggr_type
if aggr_type == 'type':
type_name = 'group'
path = '{}/tld/latest-crawl-{}s.html'.format(
PLOTDIR, type_name)
data.to_html(path,
formatters=field_formatters,
classes=['tablesorter', 'tablesearcher'])
# stats comparison for selected crawls
for aggr_type in ('type', 'tld'):
data = crawl_data
if aggr_type == 'tld':
data = data[data['tld'].isin(top_tlds)]
data = self.pivot_percentage(data, 'crawl', aggr_type,
'urls', {'urls': 'sum'})
print("\n----- {}\n".format(aggr_type))
print(data.to_string(formatters={c: TldStats.field_percentage_formatter()
for c in crawls}))
if aggr_type == 'tld':
# save as HTML table
path = '{}/tld/selected-crawls-percentage.html'.format(
PLOTDIR, len(crawls))
data.to_html(path,
float_format=TldStats.field_percentage_formatter(4),
classes=['tablesorter', 'tablepercentage',
'tablesearcher'])
def plot_comparison(self, crawl, name, topNlimit=None, method='spearman'):
print()
print('Comparison for', crawl, '-', name, '-', method)
data = self.tld_stats
data = data[data['crawl'].isin([crawl])]
data = data[data['urls'] >= topNlimit]
data = data.set_index(['tld'], drop=False)
data = data.sum(level='tld')
print(data)
data['alexa'] = pandas.Series(alexa_top_1m_tlds)
data['cisco'] = | pandas.Series(cisco_umbrella_top_1m_tlds) | pandas.Series |
#Imports & Dependencies
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
#Site Navigation
executable_path = {"executable_path": "chromedriver.exe"}
browser = Browser("chrome", **executable_path, headless=False)
# NASA Mars News
news_url = "https://mars.nasa.gov/news/"
browser.visit(news_url)
html_news = browser.html
soup = BeautifulSoup(html_news, "html.parser")
# Retrieve the latest element that contains news title and news_paragraph
news = soup.find("li", class_="slide")
news_title = news.find("div", class_="content_title").get_text
news_p = news.find("div", class_="article_teaser_body").get_text
# Display scrapped data for news_title and news_p
print(news_title)
print(news_p)
# JPL Mars Space Images - Featured Image
image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(image_url)
browser.click_link_by_id("full_image")
browser.click_link_by_partial_text("more info")
html_image = browser.html
soup = BeautifulSoup(html_image, 'html.parser')
# Retrieve the url for featured_image
image = soup.select_one('figure', class_="lede")
image_url = image.select_one("a").get("href")
# Display complete url string for the featured_image
print("https://www.jpl.nasa.gov"+ image_url)
# Mars Facts
facts_url = "https://space-facts.com/mars/"
browser.visit(facts_url)
# Use pandas to convert the data to a HTML table string
mars_data = | pd.read_html(facts_url) | pandas.read_html |
import numpy as np
import pandas.util.testing as tm
from pandas import (DataFrame, Series, DatetimeIndex, MultiIndex, Index,
date_range)
from .pandas_vb_common import lib
class Reindex(object):
goal_time = 0.2
def setup(self):
rng = DatetimeIndex(start='1/1/1970', periods=10000, freq='1min')
self.df = DataFrame(np.random.rand(10000, 10), index=rng,
columns=range(10))
self.df['foo'] = 'bar'
self.rng_subset = Index(rng[::2])
self.df2 = DataFrame(index=range(10000),
data=np.random.rand(10000, 30), columns=range(30))
N = 5000
K = 200
level1 = tm.makeStringIndex(N).values.repeat(K)
level2 = np.tile(tm.makeStringIndex(K).values, N)
index = MultiIndex.from_arrays([level1, level2])
self.s = Series(np.random.randn(N * K), index=index)
self.s_subset = self.s[::2]
def time_reindex_dates(self):
self.df.reindex(self.rng_subset)
def time_reindex_columns(self):
self.df2.reindex(columns=self.df.columns[1:5])
def time_reindex_multiindex(self):
self.s.reindex(self.s_subset.index)
class ReindexMethod(object):
goal_time = 0.2
params = ['pad', 'backfill']
param_names = ['method']
def setup(self, method):
N = 100000
self.idx = date_range('1/1/2000', periods=N, freq='1min')
self.ts = Series(np.random.randn(N), index=self.idx)[::2]
def time_reindex_method(self, method):
self.ts.reindex(self.idx, method=method)
class Fillna(object):
goal_time = 0.2
params = ['pad', 'backfill']
param_names = ['method']
def setup(self, method):
N = 100000
self.idx = date_range('1/1/2000', periods=N, freq='1min')
ts = Series(np.random.randn(N), index=self.idx)[::2]
self.ts_reindexed = ts.reindex(self.idx)
self.ts_float32 = self.ts_reindexed.astype('float32')
def time_reindexed(self, method):
self.ts_reindexed.fillna(method=method)
def time_float_32(self, method):
self.ts_float32.fillna(method=method)
class LevelAlign(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(
levels=[np.arange(10), np.arange(100), np.arange(100)],
labels=[np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10)])
self.df = DataFrame(np.random.randn(len(self.index), 4),
index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4),
index=self.index.levels[1])
def time_align_level(self):
self.df.align(self.df_level, level=1, copy=False)
def time_reindex_level(self):
self.df_level.reindex(self.index, level=1)
class DropDuplicates(object):
goal_time = 0.2
params = [True, False]
param_names = ['inplace']
def setup(self, inplace):
N = 10000
K = 10
key1 = tm.makeStringIndex(N).values.repeat(K)
key2 = tm.makeStringIndex(N).values.repeat(K)
self.df = DataFrame({'key1': key1, 'key2': key2,
'value': np.random.randn(N * K)})
self.df_nan = self.df.copy()
self.df_nan.iloc[:10000, :] = np.nan
self.s = Series(np.random.randint(0, 1000, size=10000))
self.s_str = Series(np.tile(tm.makeStringIndex(1000).values, 10))
N = 1000000
K = 10000
key1 = np.random.randint(0, K, size=N)
self.df_int = DataFrame({'key1': key1})
self.df_bool = DataFrame(np.random.randint(0, 2, size=(K, 10),
dtype=bool))
def time_frame_drop_dups(self, inplace):
self.df.drop_duplicates(['key1', 'key2'], inplace=inplace)
def time_frame_drop_dups_na(self, inplace):
self.df_nan.drop_duplicates(['key1', 'key2'], inplace=inplace)
def time_series_drop_dups_int(self, inplace):
self.s.drop_duplicates(inplace=inplace)
def time_series_drop_dups_string(self, inplace):
self.s_str.drop_duplicates(inplace=inplace)
def time_frame_drop_dups_int(self, inplace):
self.df_int.drop_duplicates(inplace=inplace)
def time_frame_drop_dups_bool(self, inplace):
self.df_bool.drop_duplicates(inplace=inplace)
class Align(object):
# blog "pandas escaped the zoo"
goal_time = 0.2
def setup(self):
n = 50000
indices = tm.makeStringIndex(n)
subsample_size = 40000
self.x = Series(np.random.randn(n), indices)
self.y = Series(np.random.randn(subsample_size),
index=np.random.choice(indices, subsample_size,
replace=False))
def time_align_series_irregular_string(self):
self.x + self.y
class LibFastZip(object):
goal_time = 0.2
def setup(self):
N = 10000
K = 10
key1 = tm.makeStringIndex(N).values.repeat(K)
key2 = | tm.makeStringIndex(N) | pandas.util.testing.makeStringIndex |
import networkx as nx
import csv
import pandas as pd
import itertools
import json
import dedupe
from itertools import combinations,product
import sys
import os
import numpy as np
from affinegap import normalizedAffineGapDistance
import simplejson
from tqdm import tqdm
import tempfile
from dedupe.clustering import cluster as dedupe_cluster
import dm_file_checker
def get_deduper_probs_and_threshold(deduper, unlabeled_data, blocked_data = None, recall_weight = 1):
if blocked_data is None:
pairs = deduper.pairs(unlabeled_data)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(deduper, blocked_data))
probs = dedupe.core.scoreDuplicates(pairs,
deduper.data_model,
deduper.classifier,
deduper.num_cores)['score']
# the memory mapped file location of the scored records
temp_filename = probs.filename
probs = probs.copy()
probs.sort()
probs = probs[::-1]
# delete the memory mapped file so it won't clog the disk
os.remove(temp_filename)
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def get_linker_probs_and_threshold(linker, unlabeled_data_1, unlabeled_data_2, blocked_data = None, recall_weight = 1):
if blocked_data is None:
pairs = linker.pairs(unlabeled_data_1, unlabeled_data_2)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(linker, blocked_data))
probs = dedupe.core.scoreDuplicates(pairs,
linker.data_model,
linker.classifier,
linker.num_cores)['score']
# the memory mapped file location of the scored records
temp_filename = probs.filename
probs = probs.copy()
probs.sort()
probs = probs[::-1]
# delete the memory mapped file so it won't clog the disk
os.remove(temp_filename)
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def get_model_weights(deduper_or_linker):
fields = [field.name for field in deduper_or_linker.data_model._variables]
model_weights = sorted(list(zip(fields, deduper_or_linker.classifier.weights)), key = lambda x: x[1], reverse = False)
model_weights = pd.DataFrame(model_weights, columns = ["variable", "logistic_reg_weight"])
return model_weights
def map_cluster_ids(deduper, unlabeled_data, threshold, hard_threshold = 0.0,
blocked_data = None, canonicalize = True, numeric_fields = None,
cluster_id_tag = None,
mapped_records_filepath = None,
cluster_canonical_filepath = None):
# BADLY NEED TO REFACTOR THIS
"""
Function that maps record ids to cluster ids
Parameters
----------
deduper : dedupe.Deduper
A trained instance of dedupe.
unlabeled_data : dict
The dedupe formatted data dictionary.
threshold : dedupe.Threshold
The threshold used for clustering.
hard_threshold: float
Threshold for record pair scores that will be included in the clustering
canonicalize : bool or list, default False
Option that provides the canonical records as additional columns.
Specifying a list of column names only canonicalizes those columns.
numeric_fields: list of str, default None
Specify which fields are numeric
cluster_id_tag: str, default None
Additional tag for distinguishing the cluster id of different datasets
Returns
-------
mapped_records
A dataframe storing the mapping from cluster_id to record_id
cluster_canonicals
A dataframe storing the canonical representation per cluster_id
"""
assert (hard_threshold < 1) and (hard_threshold >= 0), "hard_threshold should less than 1 at at least 0.0"
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "w", newline = "") as f:
mapped_records_header = ["record id", "cluster id", "confidence score", "cluster type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
if canonicalize:
if cluster_canonical_filepath is not None:
with open(cluster_canonical_filepath, "w", newline = "") as f:
cluster_canonical_header = [field.field for field in deduper.data_model.primary_fields]
cluster_canonical_header.append("cluster id")
writer = csv.DictWriter(f, fieldnames = cluster_canonical_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
else:
assert cluster_canonical_filepath is None, "can't have canonicalize be False if cluster_canonical_filepath exists"
# ## Clustering
if blocked_data is None:
pairs = deduper.pairs(unlabeled_data)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(deduper, blocked_data))
pair_scores = deduper.score(pairs)
pair_scores = pair_scores[pair_scores["score"] > hard_threshold]
clustered_dupes = deduper.cluster(pair_scores, threshold)
if numeric_fields is not None:
assert isinstance(numeric_fields, list)
mapped_records = []
cluster_canonicals = []
record_ids_in_clusters = []
# assign cluster ids to record ids
i = 0
print("Mapping cluster ids...")
for cluster in tqdm(clustered_dupes):
i += 1
cluster_id = "cl-{}".format(i)
if cluster_id_tag is not None:
cluster_id = "{}-{}".format(cluster_id_tag, cluster_id)
id_set, scores = cluster
if canonicalize:
cluster_data = [unlabeled_data[i] for i in id_set]
canonical_rep = get_canonical_rep(cluster_data, numeric_fields = numeric_fields)
canonical_rep["cluster id"] = cluster_id
if cluster_canonical_filepath is not None:
with open(cluster_canonical_filepath, "a") as f:
writer = csv.DictWriter(f, fieldnames = cluster_canonical_header, quoting = csv.QUOTE_ALL)
writer.writerow(canonical_rep)
else:
cluster_canonicals.append(canonical_rep)
for record_id, score in zip(id_set, scores):
record_dict = {
"record id": record_id,
"cluster id": cluster_id,
"confidence score": score,
"cluster type":'dup'
}
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "a", newline = "") as f:
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerow(record_dict)
else:
mapped_records.append(record_dict)
record_ids_in_clusters.append(record_id)
record_ids_in_clusters = set(record_ids_in_clusters)
solo_ids = list(set(unlabeled_data.keys()).difference(record_ids_in_clusters))
# assign solo ids to record ids
print("Mapping solo record ids...")
for record_id in tqdm(solo_ids):
i += 1
cluster_id = "cl-{}".format(i)
if cluster_id_tag is not None:
cluster_id = "{}-{}".format(cluster_id_tag, cluster_id)
record_dict = {
"record id":record_id,
"cluster id":cluster_id,
"confidence score":None,
"cluster type":'solo'
}
mapped_records.append(record_dict)
if mapped_records_filepath is None:
mapped_records = pd.DataFrame(mapped_records)
else:
with open(mapped_records_filepath, "a", newline = "") as f:
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerows(mapped_records)
mapped_records = None
if cluster_canonical_filepath is None:
cluster_canonicals = pd.DataFrame(cluster_canonicals)
else:
cluster_canonicals = None
# delete temporary file generated for pair_scores
try:
mmap_file = pair_scores.filename
del pair_scores
os.remove(mmap_file)
except AttributeError:
pass
if canonicalize:
return mapped_records, cluster_canonicals
else:
return mapped_records
def abs_distance(x,y):
return np.abs(x-y)
def get_canonical_rep(record_cluster, numeric_fields = None):
"""
Given a list of records within a duplicate cluster, constructs a
canonical representation of the cluster by finding canonical
values for each field
"""
canonical_rep = {}
keys = record_cluster[0].keys()
if numeric_fields is None:
numeric_fields = []
for key in keys:
key_values = []
# difference distance functions for numeric and non-numeric fields
if key in numeric_fields:
comparator = abs_distance
else:
comparator = normalizedAffineGapDistance
for record in record_cluster:
# assume non-empty values always better than empty value
# for canonical record
if record[key]:
key_values.append(record[key])
if key_values:
canonical_rep[key] = dedupe.canonical.getCentroid(key_values, comparator)
else:
canonical_rep[key] = ''
return canonical_rep
def get_linked_ids(linker, unlabeled_data_1, unlabeled_data_2, threshold, hard_threshold = 0.0, blocked_data = None,
mapped_records_filepath = None, constraint = "one-to-one"):
# BADLY NEED TO REFACTOR THIS
"""
constraint: What type of constraint to put on a join.
'one-to-one'
Every record in data_1 can match at most
one record from data_2 and every record
from data_2 can match at most one record
from data_1. This is good for when both
data_1 and data_2 are from different
sources and you are interested in
matching across the sources. If,
individually, data_1 or data_2 have many
duplicates you will not get good
matches.
'many-to-one'
Every record in data_1 can match at most
one record from data_2, but more than
one record from data_1 can match to the
same record in data_2. This is good for
when data_2 is a lookup table and data_1
is messy, such as geocoding or matching
against golden records.
'many-to-many'
Every record in data_1 can match
multiple records in data_2 and vice
versa. This is like a SQL inner join.
"""
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "w", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
## link matching
if blocked_data is None:
pairs = linker.pairs(unlabeled_data_1, unlabeled_data_2)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(linker, blocked_data))
pair_scores = linker.score(pairs)
pair_scores = pair_scores[pair_scores["score"] > hard_threshold]
assert constraint in {'one-to-one', 'many-to-one', 'many-to-many'}, (
'%s is an invalid constraint option. Valid options include '
'one-to-one, many-to-one, or many-to-many' % constraint)
if constraint == 'one-to-one':
links = linker.one_to_one(pair_scores, threshold)
elif constraint == 'many-to-one':
links = linker.many_to_one(pair_scores, threshold)
elif constraint == 'many-to-many':
links = pair_scores[pair_scores['score'] > threshold]
links = list(links)
# delete temporary file generated for pair_scores
try:
mmap_file = pair_scores.filename
del pair_scores
os.remove(mmap_file)
except AttributeError:
pass
mapped_records = []
ids_with_links_1 = []
ids_with_links_2 = []
print("Mapping linked pairs...")
for record_pair in tqdm(links):
record_ids, score = record_pair
pair_dict = {
"record id 1":record_ids[0],
"record id 2":record_ids[1],
"confidence score":score,
"link type":"dup",
}
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "a", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerow(pair_dict)
else:
mapped_records.append(pair_dict)
ids_with_links_1.append(record_ids[0])
ids_with_links_2.append(record_ids[1])
ids_with_links_1 = set(ids_with_links_1)
ids_with_links_2 = set(ids_with_links_2)
# include the records without found links
ids_without_links_1 = list(set(unlabeled_data_1.keys()).difference(ids_with_links_1))
ids_without_links_2 = list(set(unlabeled_data_2.keys()).difference(ids_with_links_2))
print("Mapping unlinked records in dataset 1...")
for record_id in tqdm(ids_without_links_1):
pair_dict = {
"record id 1":record_id,
"record id 2":None,
"confidence score":None,
"link type":"solo",
}
mapped_records.append(pair_dict)
print("Mapping unlinked records in dataset 2...")
for record_id in tqdm(ids_without_links_2):
pair_dict = {
"record id 1":None,
"record id 2":record_id,
"confidence score":None,
"link type":"solo",
}
mapped_records.append(pair_dict)
if mapped_records_filepath is None:
mapped_records = pd.DataFrame(mapped_records)
else:
with open(mapped_records_filepath, "a", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerows(mapped_records)
mapped_records = None
return mapped_records
def get_uncertain_clusters(mapped_records_df, threshold = 0.9):
cluster_means_df = mapped_records_df\
.groupby("cluster id")\
.mean()\
.sort_values(by = "confidence score", ascending = True)
cluster_means_bool = (cluster_means_df["confidence score"] < threshold)
print("There are {} clusters with mean confidence score lower than {:.1f}% threshold".format(cluster_means_bool.sum(), threshold*100))
uncertain_clusters_dict = cluster_means_df.loc[cluster_means_bool,:].to_dict()["confidence score"]
return uncertain_clusters_dict
def get_pairs_from_uncertain_clusters(mapped_records_df, labeled_id_pairs, threshold = 0.9):
assert isinstance(labeled_id_pairs, list)
uncertain_clusters = get_uncertain_clusters(mapped_records_df, threshold = threshold)
n_uncertain_clusters = len(uncertain_clusters)
nth_cluster = 0
for cluster_id, mean_conf_score in uncertain_clusters.items():
nth_cluster += 1
pairs_in_cluster = []
# get record ids in cluster
ids_in_cluster = mapped_records_df.loc[mapped_records_df["cluster id"] == cluster_id,"record id"].values.tolist()
# generating record pairs from cluster
for id_1, id_2 in combinations(ids_in_cluster, 2):
id_pair = tuple(sorted((id_1,id_2)))
# if pair is not already tagged, grab data of records
if id_pair not in labeled_id_pairs:
pairs_in_cluster.append(id_pair)
yield ids_in_cluster, pairs_in_cluster, nth_cluster, n_uncertain_clusters, mean_conf_score
def find_ids_of_labeled_data(labeled_data, unlabeled_data):
labeled_pair_ids = []
for label in labeled_data.keys():
assert label in ["distinct", "match"]
print("Finding ids for {} pairs".format(label))
data_pairs_list = labeled_data[label]
for data_pair in tqdm(data_pairs_list):
try:
# for backwards compatibility
record_1, record_2 = data_pair["__value__"]
except:
record_1, record_2 = data_pair
record_1_id = [key for key,val in unlabeled_data.items() if unlabeled_data[key] == record_1]
record_2_id = [key for key,val in unlabeled_data.items() if unlabeled_data[key] == record_2]
if len(record_1_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_1_id),record_1))
record_1_id = record_1_id[0]
if len(record_2_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_2_id),record_2))
record_2_id = record_2_id[0]
labeled_pair = {"record id 1":record_1_id, "record id 2":record_2_id, "label":label}
labeled_pair_ids.append(labeled_pair)
labeled_pair_ids = pd.DataFrame(labeled_pair_ids, dtype = "str")
return labeled_pair_ids
def find_ids_of_labeled_data_rl(labeled_data, unlabeled_data_1, unlabeled_data_2):
labeled_pair_ids = []
for label in labeled_data.keys():
assert label in ["distinct", "match"]
print("Finding ids for {} pairs".format(label))
data_pairs_list = labeled_data[label]
for data_pair in tqdm(data_pairs_list):
record_1, record_2 = data_pair
record_1_id = [key for key,val in unlabeled_data_1.items() if unlabeled_data_1[key] == record_1]
record_2_id = [key for key,val in unlabeled_data_2.items() if unlabeled_data_2[key] == record_2]
if len(record_1_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_1_id),record_1))
record_1_id = record_1_id[0]
if len(record_2_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_2_id),record_2))
record_2_id = record_2_id[0]
labeled_pair = {"record id 1":record_1_id, "record id 2":record_2_id, "label":label}
labeled_pair_ids.append(labeled_pair)
labeled_pair_ids = pd.DataFrame(labeled_pair_ids, dtype = "str")
return labeled_pair_ids
def consoleLabel_cluster_old(deduper, mapped_records_df, labeled_id_pairs, unlabeled_data, threshold = 0.9):
'''
Command line interface for presenting and labeling uncertain clusters by the user
Argument :
A deduper object
'''
finished = False
fields = [field.field for field in deduper.data_model.primary_fields]
assert len(fields) == len(list(set(fields)))
labeled_pairs = {"distinct":[], "match":[]}
uncertain_pair_generator = get_pairs_from_uncertain_clusters(mapped_records_df,
labeled_id_pairs,
threshold = threshold)
while not finished:
try:
ids_in_cluster, pairs_in_cluster, nth_cluster, n_uncertain_clusters, mean_conf_score = next(uncertain_pair_generator)
records_in_cluster = {i:unlabeled_data[i] for i in ids_in_cluster}
except StopIteration:
print("Already tagged all {} uncertain clusters.".format(n_uncertain_clusters))
print("Finished labeling")
break
print("Viewing {} out of {} uncertain clusters".format(nth_cluster, n_uncertain_clusters), file = sys.stderr)
print("Cluster contains {} records".format(len(ids_in_cluster)))
print("Mean Cluster Score {:.1f}%\n".format(mean_conf_score*100), file = sys.stderr)
for record_id, record in records_in_cluster.items():
print("Record {}".format(record_id), file=sys.stderr)
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
user_input = _prompt_records_same()
if user_input == "y":
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
labeled_pairs["match"].append(record_pair)
elif user_input == "n":
print("Reviewing pairs in cluster", file=sys.stderr)
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
for record in record_pair:
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
user_input = _prompt_records_same()
if user_input == "y":
labeled_pairs["match"].append(record_pair)
elif user_input == "n":
labeled_pairs["distinct"].append(record_pair)
elif user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
break
elif user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
deduper.markPairs(labeled_pairs)
def consoleLabel_cluster(deduper, mapped_records_df, labeled_id_pairs, unlabeled_data,
recall = 1.0, threshold = 0.9):
'''
Command line interface for presenting and labeling uncertain clusters by the user
Argument :
A deduper object
'''
finished = False
fields = [field.field for field in deduper.data_model.primary_fields]
assert len(fields) == len(list(set(fields)))
labeled_pairs = {"distinct":[], "match":[]}
uncertain_pair_generator = get_pairs_from_uncertain_clusters(mapped_records_df,
labeled_id_pairs,
threshold = threshold)
while not finished:
try:
ids_in_cluster, pairs_in_cluster, nth_cluster, n_uncertain_clusters, mean_conf_score = next(uncertain_pair_generator)
records_in_cluster = {i:unlabeled_data[i] for i in ids_in_cluster}
except StopIteration:
print("Already tagged all {} uncertain clusters.".format(n_uncertain_clusters))
print("Finished labeling")
break
print("Viewing {} out of {} uncertain clusters".format(nth_cluster, n_uncertain_clusters), file = sys.stderr)
print("Cluster contains {} records".format(len(ids_in_cluster)), file = sys.stderr)
print("Mean Cluster Score {:.1f}%\n".format(mean_conf_score*100), file = sys.stderr)
for record_id, record in records_in_cluster.items():
print("Record {}".format(record_id), file=sys.stderr)
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
user_input = _prompt_records_same()
if user_input == "y":
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
labeled_pairs["match"].append(record_pair)
labeled_id_pairs.append((id_1, id_2))
elif user_input == "n":
print("Reviewing pairs in cluster", file=sys.stderr)
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
for record in record_pair:
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
pair_user_input = _prompt_records_same()
if pair_user_input == "y":
labeled_pairs["match"].append(record_pair)
labeled_id_pairs.append((id_1,id_2))
elif pair_user_input == "n":
labeled_pairs["distinct"].append(record_pair)
labeled_id_pairs.append((id_1,id_2))
elif pair_user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
break
elif user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
if (user_input == "y") or (user_input == "n"):
deduper.markPairs(labeled_pairs)
deduper.train(recall = recall)
clustering_threshold = deduper.threshold(unlabeled_data, recall_weight=1)
mapped_records_df = map_cluster_ids(deduper, unlabeled_data, clustering_threshold, canonicalize=False)
print("Resampling uncertain clusters based on retrained model", file=sys.stderr)
labeled_pairs = {"distinct":[], "match":[]}
uncertain_pair_generator = get_pairs_from_uncertain_clusters(mapped_records_df, labeled_id_pairs, threshold = threshold)
def _prompt_records_same():
print("Do these records refer to the same thing?", file = sys.stderr)
valid_response = False
user_input = ""
valid_responses = {"y", "n", "u", "f"}
while not valid_response:
prompt = "(y)es / (n)o / (u)nsure / (f)inished"
print(prompt, file=sys.stderr)
user_input = input()
if user_input in valid_responses:
valid_response = True
return user_input
def get_clusters_from_links(links, solo_records):
assert isinstance(links, pd.Index)
assert isinstance(solo_records, pd.Index)
clusters = nx.Graph(links.tolist())
clusters = list(nx.connected_components(clusters))
clusters.extend(solo_records.tolist())
return clusters
def get_deduper_candidate_pairs(deduper, unlabeled_data):
# gets candidate pairs after indexing
candidate_records = deduper.pairs(unlabeled_data)
candidate_records = [(candidate[0][0], candidate[1][0]) for candidate in candidate_records]
candidate_records = | pd.MultiIndex.from_tuples(candidate_records) | pandas.MultiIndex.from_tuples |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Implementation of the network creation methods.
Usage:
>>> import pandangas as pg
>>> net = pg.create_empty_network()
>>> busf = pg.create_bus(net, level="MP", name="BUSF")
>>> bus0 = pg.create_bus(net, level="MP", name="BUS0")
>>> bus1 = pg.create_bus(net, level="BP", name="BUS1")
>>> bus2 = pg.create_bus(net, level="BP", name="BUS2")
>>> bus3 = pg.create_bus(net, level="BP", name="BUS3")
>>> pg.create_load(net, bus2, p_kW=10.0, name="LOAD2")
>>> pg.create_load(net, bus3, p_kW=13.0, name="LOAD3")
>>> pg.create_pipe(net, busf, bus0, length_m=100, diameter_m=0.05, name="PIPE0")
>>> pg.create_pipe(net, bus1, bus2, length_m=400, diameter_m=0.05, name="PIPE1")
>>> pg.create_pipe(net, bus1, bus3, length_m=500, diameter_m=0.05, name="PIPE2")
>>> pg.create_pipe(net, bus2, bus3, length_m=500, diameter_m=0.05, name="PIPE3")
>>> pg.create_station(net, bus0, bus1, p_lim_kW=50, p_Pa=0.025E5, name="STATION")
>>> pg.create_feeder(net, busf, p_lim_kW=50, p_Pa=0.9E5, name="FEEDER")
"""
import pandas as pd
import logging
logging.basicConfig(level=logging.WARNING)
class _Network:
# TODO: add H2/CH4 composition
LEVELS = {"HP": 5.0E5, "MP": 1.0E5, "BP+": 0.1E5, "BP": 0.025E5} # Pa
LHV = 38.1E3 # kJ/kg
V_MAX = 2.0 # m/s
def __init__(self):
self.bus = pd.DataFrame(columns=["name", "level", "zone", "type"])
self.pipe = pd.DataFrame(columns=["name", "from_bus", "to_bus", "length_m", "diameter_m", "material", "in_service"])
self.load = pd.DataFrame(columns=["name", "bus", "p_kW", "min_p_Pa", "scaling"])
self.feeder = pd.DataFrame(columns=["name", "bus", "p_lim_kW", "p_Pa"])
self.station = pd.DataFrame(columns=["name", "bus_high", "bus_low", "p_lim_kW", "p_Pa"])
self.res_bus = pd.DataFrame(columns=["name", "p_Pa", "p_bar"])
self.res_pipe = | pd.DataFrame(columns=["name", "m_dot_kg/s", "v_m/s", "p_kW", "loading_%"]) | pandas.DataFrame |
# coding: utf-8
import pandas as pd
from pyecharts import options as opts
from pyecharts.commons.utils import JsCode
from pyecharts.charts import Kline, Line, Bar, Grid, Scatter
import mplfinance as mpf
import matplotlib as mpl
import matplotlib.pyplot as plt
def plot_kline(ka, bs=None, file_html="kline.html", width="1400px", height="680px"):
"""
:param ka: KlineAnalyze
:param bs: pd.DataFrame
买卖点,包含三个字段 ["操作提示", "交易时间", "交易价格"]
:param file_html: str
:param width: str
:param height: str
:return: None
"""
df = ka.to_df(use_macd=True, ma_params=(5, 20,))
x = df.dt.to_list()
title = "%s | %s 至 %s" % (ka.symbol, ka.start_dt, ka.end_dt)
kline = (
Kline()
.add_xaxis(xaxis_data=x)
.add_yaxis(
series_name="",
y_axis=df[['open', 'close', 'low', 'high']].values.tolist(),
itemstyle_opts=opts.ItemStyleOpts(
color="#ef232a",
color0="#14b143",
border_color="#ef232a",
border_color0="#14b143",
),
)
.set_series_opts(
markarea_opts=opts.MarkAreaOpts(is_silent=True)
)
.set_global_opts(
title_opts=opts.TitleOpts(title=title, pos_left="0"),
xaxis_opts=opts.AxisOpts(
type_="category",
is_scale=True,
boundary_gap=False,
axisline_opts=opts.AxisLineOpts(is_on_zero=False),
splitline_opts=opts.SplitLineOpts(is_show=False),
split_number=20,
min_="dataMin",
max_="dataMax",
),
yaxis_opts=opts.AxisOpts(
is_scale=True, splitline_opts=opts.SplitLineOpts(is_show=True),
axislabel_opts=opts.LabelOpts(is_show=True, position="inside")
),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="line"),
datazoom_opts=[
opts.DataZoomOpts(
is_show=False, type_="inside", xaxis_index=[0, 0], range_end=100
),
opts.DataZoomOpts(
is_show=True, xaxis_index=[0, 1], pos_top="96%", range_end=100
),
opts.DataZoomOpts(is_show=False, xaxis_index=[0, 2], range_end=100),
],
# 三个图的 axis 连在一块
axispointer_opts=opts.AxisPointerOpts(
is_show=True,
link=[{"xAxisIndex": "all"}],
label=opts.LabelOpts(background_color="#777"),
),
)
)
kline_line = (
Line()
.add_xaxis(xaxis_data=x)
.add_yaxis(
series_name="笔",
y_axis=df.bi.tolist(),
is_smooth=False,
is_connect_nones=True,
symbol='diamond',
symbol_size=8,
linestyle_opts=opts.LineStyleOpts(opacity=1, type_='dotted', width=2),
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="线段",
y_axis=df.xd.tolist(),
is_smooth=False,
is_connect_nones=True,
symbol='triangle',
symbol_size=12,
linestyle_opts=opts.LineStyleOpts(opacity=1, type_='solid', width=2),
label_opts=opts.LabelOpts(is_show=True, position='right'),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=1,
axislabel_opts=opts.LabelOpts(is_show=False),
),
yaxis_opts=opts.AxisOpts(
grid_index=1,
split_number=3,
axisline_opts=opts.AxisLineOpts(is_on_zero=False),
axistick_opts=opts.AxisTickOpts(is_show=False),
splitline_opts=opts.SplitLineOpts(is_show=False),
axislabel_opts=opts.LabelOpts(is_show=True, position="inside"),
),
)
)
# Overlap Kline + Line
overlap_kline_line = kline.overlap(kline_line)
if isinstance(bs, pd.DataFrame) and len(bs) > 0:
c = (
Scatter()
.add_xaxis(bs['交易时间'].to_list())
.add_yaxis(
"买卖点",
bs['交易价格'].to_list(),
label_opts=opts.LabelOpts(
is_show=True,
position="left",
formatter=JsCode(
"function(params){return bsName[params.dataIndex][0];}"
)
),
))
overlap_kline_line = overlap_kline_line.overlap(c)
# draw volume
bar_1 = (
Bar()
.add_xaxis(xaxis_data=x)
.add_yaxis(
series_name="Volumn",
y_axis=df.vol.tolist(),
xaxis_index=1,
yaxis_index=1,
label_opts=opts.LabelOpts(is_show=False),
itemstyle_opts=opts.ItemStyleOpts(
color=JsCode(
"""
function(params) {
var colorList;
if (barData[params.dataIndex][1] > barData[params.dataIndex][0]) {
colorList = '#ef232a';
} else {
colorList = '#14b143';
}
return colorList;
}
"""
)
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=1,
axislabel_opts=opts.LabelOpts(is_show=False),
),
yaxis_opts=opts.AxisOpts(
axislabel_opts=opts.LabelOpts(is_show=True, position='inside')
),
legend_opts=opts.LegendOpts(is_show=False),
)
)
# Bar-2 (Overlap Bar + Line)
bar_2 = (
Bar()
.add_xaxis(xaxis_data=x)
.add_yaxis(
series_name="MACD",
y_axis=df.macd.tolist(),
xaxis_index=2,
yaxis_index=2,
label_opts=opts.LabelOpts(is_show=False),
itemstyle_opts=opts.ItemStyleOpts(
color=JsCode(
"""
function(params) {
var colorList;
if (params.data >= 0) {
colorList = '#ef232a';
} else {
colorList = '#14b143';
}
return colorList;
}
"""
)
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=2,
axislabel_opts=opts.LabelOpts(is_show=False),
),
yaxis_opts=opts.AxisOpts(
grid_index=2,
split_number=4,
axisline_opts=opts.AxisLineOpts(is_on_zero=False),
axistick_opts=opts.AxisTickOpts(is_show=False),
splitline_opts=opts.SplitLineOpts(is_show=False),
axislabel_opts=opts.LabelOpts(is_show=True, position="inside"),
),
legend_opts=opts.LegendOpts(is_show=False),
)
)
line_2 = (
Line()
.add_xaxis(xaxis_data=x)
.add_yaxis(
series_name="DIF",
y_axis=df['diff'].tolist(),
xaxis_index=2,
yaxis_index=2,
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="DEA",
y_axis=df['dea'].tolist(),
xaxis_index=2,
yaxis_index=2,
label_opts=opts.LabelOpts(is_show=False),
)
.set_global_opts(legend_opts=opts.LegendOpts(is_show=False))
)
# draw MACD
overlap_bar_line = bar_2.overlap(line_2)
# 最后的 Grid
grid_chart = Grid(init_opts=opts.InitOpts(width=width, height=height, page_title=title))
grid_chart.add_js_funcs("var barData = {}".format(df[['open', 'close', 'low', 'high']].values.tolist()))
if isinstance(bs, pd.DataFrame) and len(bs) > 0:
grid_chart.add_js_funcs("var bsName = {}".format(bs[["操作提示", "交易价格"]].values.tolist()))
grid_chart.add(
overlap_kline_line,
grid_opts=opts.GridOpts(pos_left="3%", pos_right="1%", height="60%"),
)
grid_chart.add(
bar_1,
grid_opts=opts.GridOpts(pos_left="3%", pos_right="1%", pos_top="71%", height="10%"),
)
grid_chart.add(
overlap_bar_line,
grid_opts=opts.GridOpts(pos_left="3%", pos_right="1%", pos_top="82%", height="14%"),
)
grid_chart.render(path=file_html)
def plot_ka(ka, file_image, mav=(5, 20, 120, 250), max_k_count=1000, dpi=50):
"""绘制 ka,保存到 file_image"""
df = ka.to_df(use_macd=True, ma_params=(5, 20,))
df.rename({"open": "Open", "close": "Close", "high": "High",
"low": "Low", "vol": "Volume"}, axis=1, inplace=True)
df.index = | pd.to_datetime(df['dt']) | pandas.to_datetime |
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import cea.config
import cea.globalvar
import cea.inputlocator
from cea.optimization.constants import SIZING_MARGIN
from cea.technologies.solar.photovoltaic import calc_Cinv_pv, calc_Crem_pv
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def supply_system_configuration(generation, individual, locator, output_type_network, config):
district_supply_sys_columns = ['Lake_kW', 'VCC_LT_kW', 'VCC_HT_kW', 'single_effect_ACH_LT_kW',
'single_effect_ACH_HT_kW', 'DX_kW', 'CHP_CCGT_thermal_kW', 'SC_FP_m2', 'SC_ET_m2',
'PV_m2', 'Storage_thermal_kW', 'CT_kW', 'Capex_Centralized', 'Opex_Centralized',
'Capex_Decentralized', 'Opex_Decentralized']
district_supply_sys = | pd.DataFrame(columns=district_supply_sys_columns) | pandas.DataFrame |
"""Classes for report generation and add-ons."""
import os
from copy import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from jinja2 import FileSystemLoader, Environment
from json2html import json2html
from sklearn.metrics import roc_auc_score, precision_recall_fscore_support, roc_curve, precision_recall_curve, \
average_precision_score, explained_variance_score, mean_absolute_error, \
mean_squared_error, median_absolute_error, r2_score, f1_score, precision_score, recall_score, confusion_matrix
from ..utils.logging import get_logger
logger = get_logger(__name__)
base_dir = os.path.dirname(__file__)
def extract_params(input_struct):
params = dict()
iterator = input_struct if isinstance(input_struct, dict) else input_struct.__dict__
for key in iterator:
if key.startswith(('_', 'autonlp_params')):
continue
value = iterator[key]
if type(value) in [bool, int, float, str]:
params[key] = value
elif value is None:
params[key] = None
elif hasattr(value, '__dict__') or isinstance(value, dict):
params[key] = extract_params(value)
else:
params[key] = str(type(value))
return params
def plot_roc_curve_image(data, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10));
fpr, tpr, _ = roc_curve(data['y_true'], data['y_pred'])
auc_score = roc_auc_score(data['y_true'], data['y_pred'])
lw = 2
plt.plot(fpr, tpr, color='blue', lw=lw, label='Trained model');
plt.plot([0, 1], [0, 1], color='red', lw=lw, linestyle='--', label='Random model');
plt.xlim([-0.05, 1.05]);
plt.ylim([-0.05, 1.05]);
plt.xlabel('False Positive Rate');
plt.ylabel('True Positive Rate');
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2);
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45);
plt.yticks(np.arange(0, 1.01, 0.05));
plt.grid(color='gray', linestyle='-', linewidth=1);
plt.title('ROC curve (GINI = {:.3f})'.format(2 * auc_score - 1));
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight');
plt.close()
return auc_score
def plot_pr_curve_image(data, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10));
precision, recall, _ = precision_recall_curve(data['y_true'], data['y_pred'])
ap_score = average_precision_score(data['y_true'], data['y_pred'])
lw = 2
plt.plot(recall, precision, color='blue', lw=lw, label='Trained model');
positive_rate = np.sum(data['y_true'] == 1) / data.shape[0]
plt.plot([0, 1], [positive_rate, positive_rate], \
color='red', lw=lw, linestyle='--', label='Random model');
plt.xlim([-0.05, 1.05]);
plt.ylim([0.45, 1.05]);
plt.xlabel('Recall');
plt.ylabel('Precision');
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2);
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45);
plt.yticks(np.arange(0, 1.01, 0.05));
plt.grid(color='gray', linestyle='-', linewidth=1);
plt.title('PR curve (AP = {:.3f})'.format(ap_score));
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight');
plt.close()
def plot_preds_distribution_by_bins(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
box_plot_data = []
labels = []
for name, group in data.groupby('bin'):
labels.append(name)
box_plot_data.append(group['y_pred'].values)
box = axs.boxplot(box_plot_data, patch_artist=True, labels=labels)
for patch in box['boxes']:
patch.set_facecolor('green')
axs.set_yscale('log')
axs.set_xlabel('Bin number')
axs.set_ylabel('Prediction')
axs.set_title('Distribution of object predictions by bin')
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_distribution_of_logits(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
data['proba_logit'] = np.log(data['y_pred'].values / (1 - data['y_pred'].values))
sns.kdeplot(data[data['y_true'] == 0]['proba_logit'], shade=True, color="r", label='Class 0 logits', ax=axs)
sns.kdeplot(data[data['y_true'] == 1]['proba_logit'], shade=True, color="g", label='Class 1 logits', ax=axs)
axs.set_xlabel('Logits')
axs.set_ylabel('Density')
axs.set_title('Logits distribution of object predictions (by classes)');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_pie_f1_metric(data, F1_thresh, path):
tn, fp, fn, tp = confusion_matrix(data['y_true'], (data['y_pred'] > F1_thresh).astype(int)).ravel()
(_, prec), (_, rec), (_, F1), (_, _) = precision_recall_fscore_support(data['y_true'],
(data['y_pred'] > F1_thresh).astype(int))
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(20, 10), subplot_kw=dict(aspect="equal"))
recipe = ["{} True Positives".format(tp),
"{} False Positives".format(fp),
"{} False Negatives".format(fn),
"{} True Negatives".format(tn)]
wedges, texts = ax.pie([tp, fp, fn, tn], wedgeprops=dict(width=0.5), startangle=-40)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-", color='k'),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1) / 2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(recipe[i], xy=(x, y), xytext=(1.35 * np.sign(x), 1.4 * y),
horizontalalignment=horizontalalignment, **kw)
ax.set_title(
"Trained model: Precision = {:.2f}%, Recall = {:.2f}%, F1-Score = {:.2f}%".format(prec * 100, rec * 100, F1 * 100))
plt.savefig(path, bbox_inches='tight');
plt.close()
return prec, rec, F1
def f1_score_w_co(data, min_co=.01, max_co=.99, step=0.01):
data['y_pred'] = np.clip(np.ceil(data['y_pred'].values / step) * step, min_co, max_co)
pos = data['y_true'].sum()
neg = data['y_true'].shape[0] - pos
grp = pd.DataFrame(data).groupby('y_pred')['y_true'].agg(['sum', 'count'])
grp.sort_index(inplace=True)
grp['fp'] = grp['sum'].cumsum()
grp['tp'] = pos - grp['fp']
grp['tn'] = (grp['count'] - grp['sum']).cumsum()
grp['fn'] = neg - grp['tn']
grp['pr'] = grp['tp'] / (grp['tp'] + grp['fp'])
grp['rec'] = grp['tp'] / (grp['tp'] + grp['fn'])
grp['f1_score'] = 2 * (grp['pr'] * grp['rec']) / (grp['pr'] + grp['rec'])
best_score = grp['f1_score'].max()
best_co = grp.index.values[grp['f1_score'] == best_score].mean()
# print((y_pred < best_co).mean())
return best_score, best_co
def get_bins_table(data):
bins_table = data.groupby('bin').agg({'y_true': [len, np.mean], \
'y_pred': [np.min, np.mean, np.max]}).reset_index()
bins_table.columns = ['Bin number', 'Amount of objects', 'Mean target', \
'Min probability', 'Average probability', 'Max probability']
return bins_table.to_html(index=False)
# Regression plots:
def plot_target_distribution_1(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(2, 1, figsize=(16, 20))
sns.kdeplot(data['y_true'], shade=True, color="g", ax=axs[0])
axs[0].set_xlabel('Target value')
axs[0].set_ylabel('Density')
axs[0].set_title('Target distribution (y_true)');
sns.kdeplot(data['y_pred'], shade=True, color="r", ax=axs[1])
axs[1].set_xlabel('Target value')
axs[1].set_ylabel('Density')
axs[1].set_title('Target distribution (y_pred)');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_target_distribution_2(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
sns.kdeplot(data['y_true'], shade=True, color="g", label="y_true", ax=axs)
sns.kdeplot(data['y_pred'], shade=True, color="r", label="y_pred", ax=axs)
axs.set_xlabel('Target value')
axs.set_ylabel('Density')
axs.set_title('Target distribution');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_target_distribution(data, path):
data_pred = pd.DataFrame({'Target value': data['y_pred']})
data_pred['source'] = 'y_pred'
data_true = pd.DataFrame({'Target value': data['y_true']})
data_true['source'] = 'y_true'
data = pd.concat([data_pred, data_true], ignore_index=True)
sns.set(style="whitegrid", font_scale=1.5)
g = sns.displot(data, x="Target value", row="source", height=9, aspect=1.5, kde=True, color="m",
facet_kws=dict(margin_titles=True))
g.fig.suptitle("Target distribution")
g.fig.tight_layout()
g.fig.subplots_adjust(top=0.95)
g.fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_error_hist(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(16, 10))
g = sns.kdeplot(data['y_pred'] - data['y_true'], shade=True, color="m", ax=ax)
ax.set_xlabel('Error = y_pred - y_true')
ax.set_ylabel('Density')
ax.set_title('Error histogram');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_reg_scatter(data, path):
sns.set(style="whitegrid", font_scale=1.5)
g = sns.jointplot(x="y_pred", y="y_true", data=data, \
kind="reg", truncate=False, color="m", \
height=14)
g.fig.suptitle("Scatter plot")
g.fig.tight_layout()
g.fig.subplots_adjust(top=0.95)
g.fig.savefig(path, bbox_inches='tight');
plt.close()
# Multiclass plots:
def plot_confusion_matrix(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(16, 12))
cmat = confusion_matrix(data['y_true'], data['y_pred'], normalize='true')
g = sns.heatmap(cmat, annot=True, linewidths=.5, cmap='Purples', ax=ax)
ax.set_xlabel('y_pred')
ax.set_ylabel('y_true')
ax.set_title('Confusion matrix');
fig.savefig(path, bbox_inches='tight');
plt.close()
class ReportDeco:
"""
Decorator to wrap :class:`~lightautoml.automl.base.AutoML` class to generate html report on ``fit_predict`` and ``predict``.
Example:
>>> report_automl = ReportDeco(output_path='output_path', report_file_name='report_file_name')(automl).
>>> report_automl.fit_predict(train_data)
>>> report_automl.predict(test_data)
Report will be generated at output_path/report_file_name automatically.
Warning:
Do not use it just to inference (if you don't need report), because:
- It needs target variable to calc performance metrics.
- It takes additional time to generate report.
- Dump of decorated automl takes more memory to store.
To get unwrapped fitted instance to pickle
and inferecne access ``report_automl.model`` attribute.
"""
@property
def model(self):
"""Get unwrapped model.
Returns:
model.
"""
return self._model
@property
def mapping(self):
return self._model.reader.class_mapping
def __init__(self, *args, **kwargs):
"""
Note:
Valid kwargs are:
- output_path: Folder with report files.
- report_file_name: Name of main report file.
Args:
*args: Arguments.
**kwargs: Additional parameters.
"""
if not kwargs:
kwargs = {}
# self.task = kwargs.get('task', 'binary')
self.n_bins = kwargs.get('n_bins', 20)
self.template_path = kwargs.get('template_path', os.path.join(base_dir, 'lama_report_templates/'))
self.output_path = kwargs.get('output_path', 'lama_report/')
self.report_file_name = kwargs.get('report_file_name', 'lama_interactive_report.html')
if not os.path.exists(self.output_path):
os.makedirs(self.output_path, exist_ok=True)
self._base_template_path = 'lama_base_template.html'
self._model_section_path = 'model_section.html'
self._train_set_section_path = 'train_set_section.html'
self._results_section_path = 'results_section.html'
self._inference_section_path = {'binary': 'binary_inference_section.html', \
'reg': 'reg_inference_section.html', \
'multiclass': 'multiclass_inference_section.html'}
self.title = 'LAMA report'
self.sections_order = ['intro', 'model', 'train_set', 'results']
self._sections = {}
self._sections['intro'] = '<p>This report was generated automatically.</p>'
self._model_results = []
self.generate_report()
def __call__(self, model):
self._model = model
# AutoML only
self.task = self._model.task._name # valid_task_names = ['binary', 'reg', 'multiclass']
# add informataion to report
self._model_name = model.__class__.__name__
self._model_parameters = json2html.convert(extract_params(model))
self._model_summary = None
self._sections = {}
self._sections['intro'] = '<p>This report was generated automatically.</p>'
self._model_results = []
self._n_test_sample = 0
self._generate_model_section()
self.generate_report()
return self
def _binary_classification_details(self, data):
self._inference_content['sample_bins_table'] = get_bins_table(data)
prec, rec, F1 = plot_pie_f1_metric(data, self._F1_thresh, \
path=os.path.join(self.output_path, self._inference_content['pie_f1_metric']))
auc_score = plot_roc_curve_image(data, path=os.path.join(self.output_path, self._inference_content['roc_curve']))
plot_pr_curve_image(data, path=os.path.join(self.output_path, self._inference_content['pr_curve']))
plot_preds_distribution_by_bins(data, path=os.path.join(self.output_path, \
self._inference_content['preds_distribution_by_bins']))
plot_distribution_of_logits(data, path=os.path.join(self.output_path, \
self._inference_content['distribution_of_logits']))
return auc_score, prec, rec, F1
def _regression_details(self, data):
# graphics
plot_target_distribution(data, path=os.path.join(self.output_path, self._inference_content['target_distribution']))
plot_error_hist(data, path=os.path.join(self.output_path, self._inference_content['error_hist']))
plot_reg_scatter(data, path=os.path.join(self.output_path, self._inference_content['scatter_plot']))
# metrics
mean_ae = mean_absolute_error(data['y_true'], data['y_pred'])
median_ae = median_absolute_error(data['y_true'], data['y_pred'])
mse = mean_squared_error(data['y_true'], data['y_pred'])
r2 = r2_score(data['y_true'], data['y_pred'])
evs = explained_variance_score(data['y_true'], data['y_pred'])
return mean_ae, median_ae, mse, r2, evs
def _multiclass_details(self, data):
y_true = data['y_true']
y_pred = data['y_pred']
# precision
p_micro = precision_score(y_true, y_pred, average='micro')
p_macro = precision_score(y_true, y_pred, average='macro')
p_weighted = precision_score(y_true, y_pred, average='weighted')
# recall
r_micro = recall_score(y_true, y_pred, average='micro')
r_macro = recall_score(y_true, y_pred, average='macro')
r_weighted = recall_score(y_true, y_pred, average='weighted')
# f1-score
f_micro = f1_score(y_true, y_pred, average='micro')
f_macro = f1_score(y_true, y_pred, average='macro')
f_weighted = f1_score(y_true, y_pred, average='weighted')
# classification report for features
classes = sorted(self.mapping, key=self.mapping.get)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred)
cls_report = pd.DataFrame({'Class name': classes, 'Precision': p, 'Recall': r, 'F1-score': f, 'Support': s})
self._inference_content['classification_report'] = cls_report.to_html(index=False, float_format='{:.4f}'.format,
justify='left')
plot_confusion_matrix(data, path=os.path.join(self.output_path, self._inference_content['confusion_matrix']))
return [p_micro, p_macro, p_weighted, r_micro, r_macro, r_weighted, f_micro, f_macro, f_weighted]
def _collect_data(self, preds, sample):
data = pd.DataFrame({'y_true': sample[self._target].values})
if self.task in 'multiclass':
if self.mapping is not None:
data['y_true'] = np.array([self.mapping[y] for y in data['y_true'].values])
data['y_pred'] = preds._data.argmax(axis=1)
else:
data['y_pred'] = preds._data[:, 0]
data.sort_values('y_pred', ascending=False, inplace=True)
data['bin'] = (np.arange(data.shape[0]) / data.shape[0] * self.n_bins).astype(int)
# remove NaN in predictions:
data = data[~data['y_pred'].isnull()]
return data
def fit_predict(self, *args, **kwargs):
"""Wrapped ``automl.fit_predict`` method.
Valid args, kwargs are the same as wrapped automl.
Args:
*args: Arguments.
**kwargs: Additional parameters.
Returns:
OOF predictions.
"""
# TODO: parameters parsing in general case
preds = self._model.fit_predict(*args, **kwargs)
train_data = kwargs["train_data"] if "train_data" in kwargs else args[0]
input_roles = kwargs["roles"] if "roles" in kwargs else args[1]
self._target = input_roles['target']
valid_data = kwargs.get("valid_data", None)
if valid_data is None:
data = self._collect_data(preds, train_data)
else:
data = self._collect_data(preds, valid_data)
self._inference_content = {}
if self.task == 'binary':
# filling for html
self._inference_content = {}
self._inference_content['roc_curve'] = 'valid_roc_curve.png'
self._inference_content['pr_curve'] = 'valid_pr_curve.png'
self._inference_content['pie_f1_metric'] = 'valid_pie_f1_metric.png'
self._inference_content['preds_distribution_by_bins'] = 'valid_preds_distribution_by_bins.png'
self._inference_content['distribution_of_logits'] = 'valid_distribution_of_logits.png'
# graphics and metrics
_, self._F1_thresh = f1_score_w_co(data)
auc_score, prec, rec, F1 = self._binary_classification_details(data)
# update model section
evaluation_parameters = ['AUC-score', \
'Precision', \
'Recall', \
'F1-score']
self._model_summary = pd.DataFrame({'Evaluation parameter': evaluation_parameters, \
'Validation sample': [auc_score, prec, rec, F1]})
elif self.task == 'reg':
# filling for html
self._inference_content['target_distribution'] = 'valid_target_distribution.png'
self._inference_content['error_hist'] = 'valid_error_hist.png'
self._inference_content['scatter_plot'] = 'valid_scatter_plot.png'
# graphics and metrics
mean_ae, median_ae, mse, r2, evs = self._regression_details(data)
# model section
evaluation_parameters = ['Mean absolute error', \
'Median absolute error', \
'Mean squared error', \
'R^2 (coefficient of determination)', \
'Explained variance']
self._model_summary = pd.DataFrame({'Evaluation parameter': evaluation_parameters, \
'Validation sample': [mean_ae, median_ae, mse, r2, evs]})
elif self.task == 'multiclass':
self._inference_content['confusion_matrix'] = 'valid_confusion_matrix.png'
index_names = np.array([['Precision', 'Recall', 'F1-score'], \
['micro', 'macro', 'weighted']])
index = | pd.MultiIndex.from_product(index_names, names=['Evaluation metric', 'Average']) | pandas.MultiIndex.from_product |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 22 14:50:25 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import LSTM, Bidirectional, GRU
from keras.layers.recurrent import LSTM
from sklearn.utils import shuffle
import seaborn as sns
import matplotlib.pyplot as plt
import math
data1 = pd.read_csv("B05_birlestirilmis.csv")
data2 = pd.read_csv("B07_birlestirilmis.csv")
data3 = pd.read_csv("B18_birlestirilmis.csv")
data4 = pd.read_csv("B33_birlestirilmis.csv")
data5 = pd.read_csv("B34_birlestirilmis.csv")
data6 = pd.read_csv("B46_birlestirilmis.csv")
data7 = pd.read_csv("B47_birlestirilmis.csv")
data8 = pd.read_csv("B48_birlestirilmis.csv")
X1=data1.iloc[:,0:31]
Y1=data1.iloc[:,30:31]
X2=data2.iloc[:,0:31]
Y2=data2.iloc[:,30:31]
X3=data3.iloc[:,0:31]
Y3=data3.iloc[:,30:31]
X4=data4.iloc[:,0:31]
Y4=data4.iloc[:,30:31]
X5=data5.iloc[:,0:31]
Y5=data5.iloc[:,30:31]
X6=data6.iloc[:,0:31]
Y6=data6.iloc[:,30:31]
X7=data7.iloc[:,0:31]
Y7=data7.iloc[:,30:31]
X8=data8.iloc[:,0:31]
Y8=data8.iloc[:,30:31]
#verilerin egitim ve test icin bolunmesi
from sklearn.model_selection import train_test_split
trX1, teX1,trY1,teY1 = train_test_split(X1,Y1,test_size=0.20, random_state=0)
trX2, teX2,trY2,teY2 = train_test_split(X2,Y2,test_size=0.20, random_state=0)
trX3, teX3,trY3,teY3 = train_test_split(X3,Y3,test_size=0.20, random_state=0)
trX4, teX4,trY4,teY4 = train_test_split(X4,Y4,test_size=0.20, random_state=0)
trX5, teX5,trY5,teY5 = train_test_split(X5,Y5,test_size=0.20, random_state=0)
trX6, teX6,trY6,teY6 = train_test_split(X6,Y6,test_size=0.20, random_state=0)
trX7, teX7,trY7,teY7 = train_test_split(X7,Y7,test_size=0.20, random_state=0)
trX8, teX8,trY8,teY8 = train_test_split(X8,Y8,test_size=0.20, random_state=0)
tesX1=pd.DataFrame(teX1).sort_index()
tesY1=pd.DataFrame(teY1).sort_index()
tesX2=pd.DataFrame(teX2).sort_index()
tesY2=pd.DataFrame(teY2).sort_index()
tesX3= | pd.DataFrame(teX3) | pandas.DataFrame |
from .static.Qua_config import *
from .Qua_assisFunc import *
import pandas as pd
import numpy as np
from .main_match import main_match
# function 1
def GetAllIdType(StudentList):
StudentList = pd.DataFrame(StudentList[1:], columns=StudentList[0])
id_col_name = ['身分別1','身分別2','身分別3','特殊身份別']
column = [str(x) for i in range(len(id_col_name)) for x in StudentList[id_col_name[i]].tolist() if str(x)!='None']
return sorted(list(set(column)))
# function 2
def DivideDF(ordered_IdList, StudentList, DormList):
StudentList = pd.DataFrame(StudentList[1:], columns=StudentList[0])
StudentList['學號'] = [str(i) for i in range(len(StudentList))] # TODO: remove!
DormList = pd.DataFrame(DormList[1:], columns=DormList[0])
StudentList = StudentList.drop(columns = Ori_ColumnToBeDrop)
BedNumDict = countBedNum(DormList)
# get get_str2int
id_dict = get_id_dict(ordered_IdList)
# StudentList = get_str2int(id_dict, StudentList) # string contain id & willingness
StudentList = get_id2int(id_dict, StudentList)
StudentList = get_willing2int(StudentList)
# divide in-out campus
StudentList = StudentList.sort_values(by = '校內外意願').reset_index(drop = True)
InCamNum = len(StudentList)-StudentList.groupby('校內外意願').count()['性別'][3]
InCam_df = StudentList.iloc[:InCamNum,:]
InCam_df = InCam_df.sort_values(by = '性別').reset_index(drop = True)
InCam_df['資格'] = [2 if (row['id_index']==1 and row['是否需要安排身障房間']=='是') else 0 for index,row in InCam_df.iterrows()]
# incampus divide boy-girl
GirlInCamNum = InCam_df.groupby(['性別']).size()['女性']
GirlInCam = InCam_df.iloc[:GirlInCamNum,:].sort_values(by='id_index').reset_index(drop=True)
BoyInCam = InCam_df.iloc[GirlInCamNum:,:].sort_values(by='id_index').reset_index(drop=True)
# WaitDF
WaitDF = StudentList.iloc[InCamNum:,:]
# get qualification of boy&girl df
GirlInCam = dealWithPreference(assign_qualificaiton(GirlInCam,BedNumDict))
BoyInCam = dealWithPreference(assign_qualificaiton(BoyInCam,BedNumDict))
GirlInCam = GirlInCam.sort_values(by='資格').reset_index(drop=True)
BoyInCam = BoyInCam.sort_values(by='資格').reset_index(drop=True)
# All-Wait DF
QuaGirlGroup = GirlInCam.groupby('資格').count()
NoQuaGirlNum = QuaGirlGroup['性別'][0]
QuaBoyGroup = BoyInCam.groupby('資格').count()
NoQuaBoyNum = QuaBoyGroup['性別'][0]
WaitAllDf = [GirlInCam.iloc[:NoQuaGirlNum,:],BoyInCam.iloc[:NoQuaBoyNum],WaitDF]
WaitDF = pd.concat(WaitAllDf)
# Output Girl&Boy df
GirlInCam = GirlInCam.iloc[NoQuaGirlNum:,:].drop(columns = AlgorithmNeedDrop).sort_values(by='id_index').reset_index(drop=True)
BoyInCam = BoyInCam.iloc[NoQuaBoyNum:,:].drop(columns = AlgorithmNeedDrop).sort_values(by='id_index').reset_index(drop=True)
GirlInCam['永久地址'] = Address2Nationality(GirlInCam['永久地址'],countryDict)
BoyInCam['永久地址'] = Address2Nationality(BoyInCam['永久地址'],countryDict)
# organize Wait df
WaitDF = WaitDF.drop(columns=Wait_Drop)
return BoyInCam, GirlInCam, WaitDF
def list2df(beds):
columns = beds[0]
data = beds[1:]
df = pd.DataFrame(data, columns = beds[0])
return df
def Match(BoyInQua, GirlInQua, beds):
beds_df = list2df(beds)
BoyInQua, GirlInQua = main_match(BoyInQua, GirlInQua, beds_df)
return BoyInQua, GirlInQua
# function4
def GetOutputDF(id_orderList, BoyQua, GirlQua, StudentList, WaitDF):
# BoyQua = pd.DataFrame(BoyQua[1:], columns=BoyQua[0])
# GirlQua = pd.DataFrame(GirlQua[1:], columns=GirlQua[0])
StudentList = pd.DataFrame(StudentList[1:], columns=StudentList[0])
StudentList['學號'] = [str(i) for i in range(len(StudentList))] # TODO: remove!
# WaitDF = pd.DataFrame(WaitDF[1:], columns=WaitDF[0])
# Divide WaitDF => campus,BOT
WaitDF = WaitDF.sort_values('校內外意願')
WillGroupNum = WaitDF.groupby('校內外意願')
CampusNum = WillGroupNum.count()['性別'][1] + WillGroupNum.count()['性別'][2]
NotBotNum = len(WaitDF) - WillGroupNum.count()['性別'][2] - WillGroupNum.count()['性別'][3]
Campus = WaitDF.iloc[:CampusNum,:].drop(columns = CampusWait_Drop_AsQua).sort_values('性別')
Bot = WaitDF.iloc[NotBotNum:,:]
# organize Campus
Campus['資格'] = [0 for i in range(len(Campus))]
CampusGirlNum = Campus.groupby('性別')['性別'].count().tolist()[0]
CampusBoy = OrderAssign(Campus.iloc[CampusGirlNum:])
CampusGirl = OrderAssign(Campus.iloc[:CampusGirlNum])
BoyQua['順位序號'] = [0 for i in range(len(BoyQua))]
GirlQua['順位序號'] = [0 for i in range(len(GirlQua))]
CampusBoy = pd.concat([BoyQua,CampusBoy]).sort_values(by='順位序號')
CampusGirl = | pd.concat([GirlQua,CampusGirl]) | pandas.concat |
#!/usr/bin/env python3
import pandas as pd
import tensorflow as tf
from gpflow import default_float
from mogpe.training import train_from_config_and_dataset
def load_mcycle_dataset(filename='./mcycle.csv'):
df = pd.read_csv(filename, sep=',')
X = pd.to_numeric(df['times']).to_numpy().reshape(-1, 1)
Y = | pd.to_numeric(df['accel']) | pandas.to_numeric |
#%%
import os
from os import path
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import json
import matplotlib
df=pd.DataFrame(columns=['stage','conv','prec1','prec5','loss_log'])
df_aoa=pd.DataFrame(columns=['stage','conv','aoa','loss','node'])
for stage in [0,1,2,3,4,5,10,15,20,25,30]:
for conv in range(0,4):
lcpth='/home/ubuntu/cornet_analysis/linearclass_v3/linearclass_time_%02d_conv_%d_v3'%(stage,conv)
# lcpth='/home/rhodricusack/linearclass_v3/linearclass_time_%02d_conv_%d_v3'%(stage,conv)
d={'stage':[stage],'conv':[conv]}
for item in ['prec1','prec5','loss_log']:
itpth=path.join(lcpth,'log',item)
if path.exists(itpth):
with open(itpth,'rb') as f:
it=pickle.load(f)
d[item]=float(it[0])
df=df.append( | pd.DataFrame.from_dict(d) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = | pd.Categorical(["a", "c", "b"], ordered=True) | pandas.Categorical |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 1 10:59:51 2020
Modified on ... look at git commit log, you lazy bum
@author: <NAME>, Assistant Research Professor, CEE WSU
@author: <NAME>, Ecoinformaticist, USDA-ARS
contact: <EMAIL>
Library of functions for the Azure Data Lake download codeset; see the readme within this repo for more details about the different scripts used
Comments in this are specific to the functions
"""
# General library imports for functions; some functions have the import statements as part of the function
import pathlib
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
def format_plot(ax,yf,xf,xminor,yminor,yl,yu,xl,xu):
#subplot has to have ax as the axis handle
# Does not accept blank arguments within the function call; needs to be a number of some sort even if just a 0.
# Format the x and yticks
plt.yticks(fontsize = yf)
plt.xticks(fontsize = xf)
minor_locator = AutoMinorLocator(xminor)
ax.xaxis.set_minor_locator(minor_locator)
minor_locator = AutoMinorLocator(yminor)
ax.yaxis.set_minor_locator(minor_locator)
ax.tick_params(axis='both',direction='in',length=12.5,width=2)
ax.tick_params(axis='both',which = 'minor',direction='in',length=5)
plt.ylim([yl,yu])
plt.xlim([xl,xu])
return
def indx_fill(df_in, frq):
# Fills in missing index values for a continuous time series. Rows are left blank.
df = df_in.copy()
df.index = pd.to_datetime(df.index)
# # Sort index in case it came in out of order, a possibility depending on filenames and naming scheme
# df = df.sort_index()
# # Remove any duplicate times, can occur if files from mixed sources and have overlapping endpoints
# df = df[~df.index.duplicated(keep='first')]
# Remove any duplicated rows; keep row with more data
df['nan_count'] = pd.isna(df).sum(1)
df = df.sort_values(['RECORD', 'nan_count']) # Can sort on RECORD here because values with null/na index were previously removed
df = df[~df.index.duplicated(keep='first')]
df = df.drop('nan_count',1).sort_index()
# Fill in missing times due to tower being down and pad dataframe to midnight of the first and last day
idx = pd.date_range(df.index[0].floor('D'),df.index[len(df.index)-1].ceil('D'),freq = frq)
# Reindex the dataframe with the new index and fill the missing values with NaN/blanks
df = df.reindex(idx, fill_value=np.NaN)
return df
def Fast_Read(filenames, hdr, idxfll, specified_dtypes = None):
#Check to make sure there are files within the directory and doesn't error
if len(filenames) == 0:
print('No Files in directory, check the path name.')
return # 'exit' function and return error
elif (len(filenames) > 0) & (hdr ==4): # hdr == 4 is for data direct from the data logger as there are four header lines
#Initialize dataframe used within function
Final = [];Final = pd.DataFrame(Final)
for k in range (0,len(filenames)):
#Read in data and concat to one dataframe; no processing until data all read in
if specified_dtypes:
try:
df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 1,skiprows=[2,3],na_values='NAN',dtype=specified_dtypes)
except:
continue
else:
try:
df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 1,skiprows=[2,3],na_values='NAN',low_memory=False)
except:
continue
Final = pd.concat([Final,df], sort = False)
# Fill missing index with blank values
Out = indx_fill(Final, idxfll)
# Convert to datetime for the index
Out.index = pd.to_datetime(Out.index)
# Sort index in chronological order; readin files not always in order depending on how files are read in or named
Out = Out.sort_index()
elif (len(filenames) > 0) & (hdr ==1): # hdr == 1 means there is only one header line and has been through some amount of processing
#Initialize dataframe used within function
Final = [];Final = pd.DataFrame(Final)
for k in range (0,len(filenames)):
#Read in data and concat to one dataframe; no processing until data all read in
if specified_dtypes:
df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 0,dtype=specified_dtypes)
else:
df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 0,low_memory=False)
Final = pd.concat([Final,df], sort = False)
# Convert time index
Out = indx_fill(Final,idxfll)
Out.index = pd.to_datetime(Out.index)
Out = Out.sort_index()
return Out # Return dataframe to main function.
def download_data_from_datalake(access, s, col, siteName):
# Import libraries needed to connect and credential to the data lake.
from azure.storage.filedatalake import DataLakeServiceClient
from azure.identity import ClientSecretCredential
import datetime
from datetime import date
from dateutil.relativedelta import relativedelta
import pathlib
# Get today's date
today = date.today()
# Pull the access information from the driver Excel workbook for the datalake in question
storage_account_name = access[col]['storageaccountname']
client_id = access[col]['CLIENTID']
tenant_id = access[col]['TENANTID']
client_secret = access[col]['CLIENTSECRET']
access_path = access[col]['path']
localfile = access[col]['LOCAL_DIRECT']
# If localfile is not defined in xlsx file, then default to something like: input/CookEast/Met
if pd.isnull(localfile):
localfile = pathlib.Path(access[col]["inputPath"]) / siteName / col
localfile.mkdir(parents=True, exist_ok=True)
file_system = access[col]['file_system']
back = access[col]['back']
# Credential to the client and build the token
credential = ClientSecretCredential(tenant_id,client_id, client_secret)
# Connect to the Data Lake through this function with the access credentials; do not change this.
try:
global service_client
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format(
"https", storage_account_name), credential=credential)
except Exception as e:
print(e)
file_system_client = service_client.get_file_system_client(file_system)
date_inc = datetime.date(s.year, s.month, 1)
while date_inc <= today:
paths = file_system_client.get_paths(f'{access_path}{date_inc.year:04d}/{date_inc.month:02d}')
for path in paths:
# This gets all files for month; need to only download after specified day
z = path.name
#Y = z[-19:-15]; M = z[-14:-12]; D = z[-11:-9]
#bd = datetime.date(int(Y), int(M), int(D))
date_components = z.split('/')[-1].split('_')[3:6]
bd = datetime.date(
int(date_components[0]),
int(date_components[1]),
int(date_components[2]))
if (bd >= s) & (bd<=today):
# If dates are within the correct range, downloads the file to the local directory
#local_file = open(localfile+z[back:],'wb'); print(local_file)
filePath = localfile / pathlib.Path(z).name
if not filePath.is_file():
local_file = open(filePath, 'wb')
print(str(filePath))
file_client = file_system_client.get_file_client(z)
download = file_client.download_file()
downloaded_bytes = download.readall()
local_file.write(downloaded_bytes)
local_file.close()
else:
print(f'Skipping {filePath}')
date_inc = date_inc + relativedelta(months=1)
def Data_Update_Azure(access, s,col, siteName):
raise Exception('Deprecated: use download_data_from_datalake instead')
# Import libraries needed to connect and credential to the data lake.
from azure.storage.filedatalake import DataLakeServiceClient
from azure.identity import ClientSecretCredential
import datetime
from datetime import date
import pathlib
# Pulls today's data from the computer and uses as the end date.
e = date.today()
# Pull the access information from the driver Excel workbook for the datalake in question
storage_account_name = access[col]['storageaccountname']
client_id = access[col]['CLIENTID']
tenant_id = access[col]['TENANTID']
client_secret = access[col]['CLIENTSECRET']
path = access[col]['path']
localfile = access[col]['LOCAL_DIRECT']
if pd.isnull(localfile):
localfile = pathlib.Path(access[col]["inputPath"]) / siteName
localfile.mkdir(parents=True, exist_ok=True)
file_system = access[col]['file_system']
back = access[col]['back']
# Credential to the client and build the token
credential = ClientSecretCredential(tenant_id,client_id, client_secret)
# Collect the integer value of the month of the start date (s)
month = int(s.month)
year = int(s.year)
td = date.today()
# Connect to the Data Lake through this function with the access credentials; do not change this.
try:
global service_client
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format(
"https", storage_account_name), credential=credential)
except Exception as e:
print(e)
file_system_client = service_client.get_file_system_client(file_system)
# Still need to deal with year in the path.
# Checks that the month of the current date is the same or greater than the last month of the previous data's aggregation
yrt = False
while year<=td.year:
if year<td.year:
paths = file_system_client.get_paths(path+ str(s.year) +'/'+str(s.month))
for path in paths:
z = path.name
Y = z[-19:-15]; M = z[-14:-12]; D = z[-11:-9]
bd = datetime.date(int(Y), int(M), int(D))
if (bd >= s)& (bd<=e):
# If dates are within the correct range, downloads the file to the local directory
#local_file = open(localfile+z[back:],'wb'); print(local_file)
filePath = localfile / pathlib.Path(z).name
if not filePath.is_file():
local_file = open(filePath, 'wb')
print(str(filePath))
file_client = file_system_client.get_file_client(z)
download = file_client.download_file()
downloaded_bytes = download.readall()
local_file.write(downloaded_bytes)
local_file.close()
else:
print(f'Skipping {filePath}')
year = year+1
yrt = True
if year == td.year:
path = access[col]['path']
if yrt: month = int(e.month)
while td.month >= month:
# Check if month int/string is correct or not; the path needs a 2-digit month and an int value will default to 1 digit is less than 10.
if month < 10:
paths = file_system_client.get_paths(path+ str(e)[0:4] +'/0'+str(month))
elif month >=10:
paths = file_system_client.get_paths(path+ str(e)[0:4] +'/'+str(month))
# Loop over all the path names and build path to download to the local file.
for path in paths:
z = path.name
# Builds datetime of the current path and checks against the start and end dates
Y = z[-19:-15]; M = z[-14:-12]; D = z[-11:-9]
bd = datetime.date(int(Y), int(M), int(D))
if (bd >= s)& (bd<=e):
# If dates are within the correct range, downloads the file to the local directory
local_file = open(localfile / z.split('/')[-1],'wb'); print(local_file)
file_client = file_system_client.get_file_client(z)
download = file_client.download_file()
downloaded_bytes = download.readall()
local_file.write(downloaded_bytes)
local_file.close()
month = month+1 # While loop so needs a way to exit the loop counter
path = access[col]['path'] # Print path name of files downloaded for user to look at it and admire.
year = year+1
def wateryear():
# Calculate what the wateryear is; checks if it is Ooctober or not; if so then adds one to the year to get to the correct water year.
from datetime import date
if int(str(date.today()).replace('-','')[4:6]) < 10:
wateryear = str(date.today()).replace('-','')[0:4]
else:
wateryear = str(int(str(date.today()).replace('-','')[0:4])+1)
return wateryear # Returns water year as a string.
def get_latest_file(files):
"""Takes a list of files (probably from glob) and returns the one with the latest date stamp (in form of _YYYYMMDD at end of the filename)
"""
latest_file = files[0]
for f in files:
if get_datetime_from_filename(f) > get_datetime_from_filename(latest_file):
latest_file = f
return latest_file
def get_datetime_from_filename(filestring:str):
"""Takes a filename or filepath string and returns a datetime object representing the iso date in the filename
"""
import datetime
stem = pathlib.Path(filestring).stem
isodate = stem.split('_')[-1]
dt = datetime.datetime.strptime(isodate, '%Y%m%d')
return dt
def AccessAzure(Sites, col, Time,access,CEF,save=True, QC = True,startDate=None):
# Main driver function of the datalake access and QC functions, called from the main driver of the codeset.
import glob
import datetime
import pandas as pd
from datetime import date
from dateutil import parser
# Collect which column, met or flux
ver = access[col]['Ver']
cy = wateryear() # Determine wateryear to build file path
if startDate is None:
aggregated_file = get_latest_file(glob.glob(CEF))
CE = Fast_Read([aggregated_file],1, Time, get_dtypes(f'{col}Aggregated')) # Read in the previous aggregated file(s)
s = str(CE.index[-1])[0:10]; s= s.replace('-', '') # Find the last index in the file and convert to a string
s = datetime.date(int(s[0:4]), int(s[4:6]), int(s[6:])) - datetime.timedelta(days=1)
#if int(s[6:])>1: # Check if it is the first day of the month or not to go back a day for the file collection later.
# s = datetime.date(int(s[0:4]), int(s[4:6]), int(s[6:])-1)
#else: s = datetime.date(int(s[0:4]), int(s[4:6]), int(s[6:]))
else: s = parser.parse(startDate).date()
print('Downloading files')
# Call function to update the Azure data
download_data_from_datalake(access, s, col, Sites)
print('Reading '+ Sites)
if not pd.isna(access[col]['LOCAL_DIRECT']):
filenames = glob.glob(access[col]['LOCAL_DIRECT']+'\\*.dat') # Gather all the filenames just downloaded
#globString = Sites[k] + '_' + col + '_AggregateQC_CY*' + '_' + access[col]['Ver'] + '*.csv'
else: filenames = glob.glob(access[col]["inputPath"] + '\\' + Sites + '\\' + col + '\\*.dat')
CEN = Fast_Read(filenames, 4,Time, get_dtypes(f'{col}Raw')) # Read in new files
if 'CE' in locals():
CE=pd.concat([CE,CEN], sort = False) # Concat new files the main aggregated file
else: CE = CEN
CE = CE.sort_index() # Sort index
CE = CE.dropna(subset=['RECORD']) # Drop any row that has a NaN/blank in the "RECORD" number column; removes the overlap-extra rows added from the previous run
CE = indx_fill(CE,Time) # Fill back in the index through to the end of the current day. Also removes duplicated values and inserts missing values.
# CEFClean = CEF[:-4]+'NO_QC'+tag; CEFClean=CEFClean.replace('*','') # Replace something in a string; don't remember why.
# CE.to_csv(CEFClean, index_label = 'TIMESTAMP') # Print new aggregated file to local machine for local copy
if QC: # Boolean for QCing data
if col == 'Met':
print('QCing the Meteorology Data')
CE = METQC(CE, col) # Calls met QC functions
if col == 'Flux':
print('QCing the Flux Data')
CE = Grade_cs(CE, access) # Calls flux QC function
CE = METQC(CE, col) # Calls met QC function; flux data includes met data hence extra call.
if save == True:
print('Saving Data')
#CEF = (CEF[:-4]+tag).replace('*','') # replace wildcards that were used for glob
today = str(date.today()).replace('-','') # Replace dashes within datestring to make one continuous string
fname = Sites+'_'+col+'_AggregateQC_CY'+cy+'_'+ver+'_'+today+'.csv' # Build filename for uploaded file based on tyrannical data manager's specifications
fpath = access[col]["outputPath"] + '\\' + Sites + '\\' + col + '\\' + fname
CE.to_csv(fpath, index_label = 'TIMESTAMP') # Print new aggregated file to local machine for local copy
print('Uploading data')
# TODO: Enable uploading to DL soon (removed during testing 01/27/2021 by brc)
AggregatedUploadAzure(fname, access, col,fpath,cy) # Send info to upload function
for f in filenames:
os.remove(f) # Delete downloaded files on local machines as no longer needed
df=CE
del CEN; del CE; return df # Delete variables for clean rerun as needed
def AggregatedUploadAzure(fname, access, col, CEF, cy):
# Upload the aggregated file to the datalake
from azure.storage.filedatalake import DataLakeServiceClient
from azure.identity import ClientSecretCredential
# Parse credentials from the access Excel workbook
storage_account_name = access[col]['storageaccountname']
client_id = access[col]['CLIENTID']
tenant_id = access[col]['TENANTID']
client_secret = access[col]['CLIENTSECRET']
upload_dir = access[col]['UPLOAD']
# Build client credential token
credential = ClientSecretCredential(tenant_id,client_id, client_secret)
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", storage_account_name), credential=credential)
# Builds file path based on cropyear (water year) and upload directory
file_system_client = service_client.get_file_system_client(upload_dir+cy+'/')
file_client = file_system_client.get_file_client(fname) # Build filename for the upload
file_client.create_file() # Creates the file in the datalake through the file client
local_file = open(CEF,'r') # Opens the local copy of the aggregated file
file_contents = local_file.read() # Read local file copy
file_client.upload_data(file_contents, overwrite=True) # Upload file to the datalake and overwrite if already exists; depending on how often code is run
local_file.close() # Close file
#%% QC for the flux data for the Azure upload
def readinfo(access):
# Values pulled in from a separate *.csv file because easier and flexible; are the QC values for the flux qc function
grade = int(access['Flux']['grade'])
LE_B = [float(access['Flux']['LE_B']),float(access['Met']['LE_B'])]
H_B = [float(access['Flux']['H_B']),float(access['Met']['H_B'])]
F_B = [float(access['Flux']['F_B']),float(access['Met']['F_B'])]
ustar = float(access['Flux']['ustar'])
gg = [(access['Flux']['gg']),(access['Met']['gg']),(access['Val_3']['gg'])]
col = [(access['Flux']['cls']),(access['Met']['cls']),(access['Val_3']['cls'])]
return grade, LE_B, H_B, F_B, ustar, col, gg
def Grade_cs(data,access):
# Basic flux qc function; more serious codeset not included.
grade, LE_B, H_B, F_B, ustar,col,gg = readinfo(access)
pd.options.mode.chained_assignment = None # Don't remember exactly why this is here; probably to avoid a warning statement somewhere
if (grade >9) | (grade<1): # Check that the grade value falls within acceptable bounds
print('Grade number must be between 1-9.')
return # 'exit' function and return error
if (ustar<0): # Check that ustar is okay though default should be zero; no ustar filter should be used here.
print('u-star must be a positive number.')
return # 'exit' function and return error
var = ['H_Flags','LE_Flags','Fc_Flags'] # Set flag column names
if var[0] not in data: # Create flag columns if they do not already exist
Marker = [];Marker = pd.DataFrame(Marker, columns = var)
data = data.join(Marker)
for k in range (0,3): # Loops over the H, LE, and co2 flux columns;
df = data
flux = col[k]
if flux == col[1]: # Bounds checks for each of the flux values; set in driver sheet
HL = (df[col[1]].astype(float) < LE_B[0]) | (df[col[1]].astype(float)>LE_B[1]) | df[col[1]].astype(float).isnull()
elif flux ==col[0]:
HL = (df[col[0]].astype(float) < H_B[0]) | (df[col[0]].astype(float)> H_B[1]) | df[col[0]].astype(float).isnull()
elif flux ==col[2]:
HL = (df[col[2]].astype(float) < F_B[0])|(df[col[2]].astype(float) > F_B[1])| df[col[2]].astype(float).isnull()
data[(col[k]+'_Graded')] = data[col[k]] # Create the flux graded column
data[var[k]] = '0'
data[var[k]][HL] = '1' # Start building the flag values
#QA/QC grade for data
Grade = df[gg[k]].astype(float) <= grade # Check flux again the developed turbulence grades
data[var[k]][~Grade] = data[var[k]]+'1'
data[var[k]][Grade] = data[var[k]]+'0'# add to the data flag
# values for columns hardcoded assuming they do not change for the EasyFlux code; will need to be updated if column names change
if 'Precipitation_Tot' in df.columns: # Check if recorded precip or not; if so, filter fluxes
Precip = df['Precipitation_Tot'].astype(float) < 0.001
data[var[k]][~Precip] = data[var[k]]+'1'
data[var[k]][Precip] = data[var[k]]+'0'
#10Hz sample Mask
if 'CO2_sig_strgth_Min' in df.columns: # Check is co2 sig strength is high enough
c_sig_strength = df['CO2_sig_strgth_Min'].astype(float) > 0.7
data[var[k]][c_sig_strength] = data[var[k]]+'0'
data[var[k]][~c_sig_strength] = data[var[k]]+'1'
if 'H2O_sig_strgth_Min' in df.columns: # Check if h20 sig strength is high enough
w_sig_strength = df['H2O_sig_strgth_Min'].astype(float) > 0.7
data[var[k]][w_sig_strength] = data[var[k]]+'0'
data[var[k]][~w_sig_strength] = data[var[k]]+'1'
if 'sonic_samples_Tot' in df.columns: # Check if enough samples in the sonic column (80% coverage);
Samp_Good_Sonic = df['sonic_samples_Tot'].astype(float) > 14400
data[var[k]][~Samp_Good_Sonic] =data[var[k]]+'1'
data[var[k]][Samp_Good_Sonic] = data[var[k]]+'0'
if 'Fc_samples_Tot' in df.columns: # Check if enough samples in Fc column (80%) coverage
Samp_Good_IRGA = df['Fc_samples_Tot'].astype(float)>14400
data[var[k]][~Samp_Good_IRGA] = data[var[k]]+'1'
data[var[k]][Samp_Good_IRGA] = data[var[k]]+'0'
#Door Open Mask
if 'door_is_open_Hst' in df.columns: # Check if door open meaning people at the site doing work
Door_Closed = df['door_is_open_Hst'].astype(float) == 0
data[var[k]][~Door_Closed] = data[var[k]]+'1'
data[var[k]][Door_Closed] = data[var[k]]+'0'
Good = Precip & Grade & Door_Closed&~HL&c_sig_strength&w_sig_strength # Create single boolean from all the qc checks; only one fail will trigger fail
Good = Good & (Samp_Good_Sonic | Samp_Good_IRGA)
else: # If door open is not part of the column set; should be with the logger data
Good = Grade &~HL
Good = Good & (Samp_Good_Sonic | Samp_Good_IRGA)
data[(col[k]+'_Graded')][~Good] = np.NaN # Create column with nan/blank in the column if data is bad/filtered
if k == 0: G = Good;
if k >0: G = pd.concat([G,Good],axis=1, sort = False)
del Good # Delete Good variable for the next round of flux data.
return data
#%%
def METQC(Data, col):
# Driver for the met qc function to deal with some column shenanigans
if col == 'Flux': # Different columns between the two for some reason; think has to do with the way the tables were constructed in the logger code
Met_QC = Met_QAQC(RH=Data['RH_Avg'].astype(float),P=Data['amb_press_Avg'].astype(float), Tair = Data['amb_tmpr_Avg'].astype(float),
WS = Data['rslt_wnd_spd'].astype(float), WD = Data['wnd_dir_compass'].astype(float), Precip = Data['Precipitation_Tot'].astype(float),
PAR =Data['PAR_density_Avg'].astype(float), Rn = Data['Rn_meas_Avg'].astype(float),VPD = Data['VPD_air'].astype(float),e = Data['e_Avg'].astype(float), e_s = Data['e_sat_Avg'].astype(float),z = 0.777)
if col == 'Met': #
Met_QC = Met_QAQC(RH=Data['RH_Avg'].astype(float),P=Data['amb_press_Avg'].astype(float), Tair = Data['amb_tmpr_Avg'].astype(float),
WS = Data['rslt_wnd_spd'].astype(float), WD = Data['wnd_dir_compass'].astype(float), Precip = Data['Precipitation_Tot'].astype(float),
PAR =Data['PAR_density_Avg'].astype(float), Rn = Data['Rn_meas_Avg'].astype(float),VPD = Data['VPD_air'].astype(float),e = Data['e'].astype(float), e_s = Data['e_sat'].astype(float),z = 0.777)
if 'Tair_Filtered' in Data.columns: # Checks if the data has already been through the QC code or not;
for k in range(0,len(Met_QC.columns)):
Data = Data.drop(columns=[Met_QC.columns[k]]) # Drops all columns in the metqc variable before readding them back; the QC occurs over the entire dataframe so will re-addd what was deleted; prevents adding multiple columns to the dataframe with the same header
# Not sure why this is the case and this is a quick fix but don't like it
Data = pd.concat([Data,Met_QC], axis = 1, sort=False) # Concat the metqc values to the dataframe.
return Data
def Met_QAQC(**kwargs):
Q = None
if 'Tair' in kwargs.keys(): # Air temperature
Tair = pd.DataFrame(kwargs['Tair'])
Q = Tair; Q = pd.DataFrame(Q);
Q['Tair_Hard_Limit'] = (Q[Tair.columns[0]].astype(float) <= 50) & (Q[Tair.columns[0]].astype(float) >= -40) # Bounds check
Q['Tair_Change'] = ~(np.abs(Q[Tair.columns[0]].diff() >= 15)) & (np.abs(Q[Tair.columns[0]].diff() != 0)) # Check if data change between each time step
Q['Tair_Day_Change'] = (Tair.resample('D').mean().diff !=0) # Checks if the daily average changes from zero
Q['Tair_Filtered'] = Q[Tair.columns[0]][Q['Tair_Hard_Limit'] & Q['Tair_Change'] & Q['Tair_Day_Change']] #Adds filters and booleans together
Q.drop(columns=[Tair.columns[0]],inplace=True) # Drops the columns that are filtered out; probably a better way to do this
else:
print('******Temperature not present******')
if 'RH' in kwargs.keys():
RH = | pd.DataFrame(kwargs['RH']) | pandas.DataFrame |
from keras.utils import np_utils
import pandas as pd
import numpy as np
import codecs
import re
import pickle
from sklearn.model_selection import train_test_split, KFold
from numpy.random import geometric
import random
import nltk
from nltk.corpus import wordnet
from collections import defaultdict
import glob
from tqdm import tqdm
from scipy.special import comb
class Synonym:
def __init__(self):
nltk.download('wordnet')
self.synonims = defaultdict(list)
def get_synonym(self, word, pos):
if not self.synonims[word]:
for syn in wordnet.synsets(word, pos):
for l in syn.lemmas():
self.synonims[word].append(l.name())
if self.synonims[word]:
return random.choice(self.synonims[word])
else:
return word
class PreprocessBase:
def __init__(self):
pass
def lemmatize(self, dataset):
labels, texts = [], []
nltk.download('punkt')
nltk.download('wordnet')
lemma = nltk.stem.WordNetLemmatizer()
for l, t in tqdm(zip(dataset["label"], dataset["text"])):
words = nltk.word_tokenize(t)
stem_text = ' '.join([lemma.lemmatize(word) for word in words])
texts.append(stem_text)
labels.append(l)
texts = pd.Series(texts)
labels = pd.Series(labels)
return pd.DataFrame({'text': texts, 'label': labels})
def character_restriction(self, dataset, restriction_rule=r'[^\w!?,\s]'):
labels, texts = [], []
for l, t in zip(dataset["label"], dataset["text"]):
re_text = re.sub(r'\s', ' ', re.sub(restriction_rule, '', t)).strip()
if not re_text:
continue
texts.append(re_text)
labels.append(l)
texts = pd.Series(texts)
labels = pd.Series(labels)
return pd.DataFrame({'text': texts, 'label': labels})
# character to ID
def conversion_rule_for_alphabet(self, sentence):
id_list = []
for c in sentence:
if c.islower():
number = ord(c) - 96
elif c.isupper():
number = ord(c) - 38
elif c.isdecimal():
number = int(c) + 53
elif c == '?':
number = 63
elif c == '!':
number = 64
elif c == ',':
number = 65
else:
number = 66
if number >= 0 and number <= 65:
id_list.append(number)
else:
id_list.append(66)
return id_list
def texts_to_characters_id_lists(self, texts, limit_characters, conversion_rule=conversion_rule_for_alphabet):
characters_id_lists = []
for sentence in texts:
id_list = self.conversion_rule_for_alphabet(sentence)
id_list = id_list + [0 for i in range(limit_characters - len(id_list))]
characters_id_lists.append(id_list[:limit_characters])
return np.array(characters_id_lists)
def labels_to_onehot(self, labels):
return np_utils.to_categorical(list(labels))
def transform_pos(self, nltk_pos):
# noun
if nltk_pos[0] == 'N':
return 'n'
# verb
elif nltk_pos[0] == 'V':
return 'v'
# adverb
elif nltk_pos[0] == 'R':
return 'r'
# adjective
elif nltk_pos[0] == 'J':
return 'a'
else:
return ''
def data_augmentation(self, dataset, sample_num=100000, sample_rate=None):
nltk.download('averaged_perceptron_tagger')
nltk.download('punkt')
synonym = Synonym()
texts = list(dataset['text'])
labels = list(dataset['label'])
dataset_size = len(texts)
new_texts, new_labels = [], []
if sample_rate is not None:
sample_num = int(dataset_size * sample_rate)
replace_numbers = geometric(p=0.5, size=sample_num)
for r in tqdm(replace_numbers):
i = random.choice(range(dataset_size))
t, l = texts[i], labels[i]
words = nltk.word_tokenize(t)
words_and_pos = nltk.pos_tag(words)
r = r if r < len(words) else len(words)
replace_idx = random.sample(range(len(words)), r)
# replace word
for i in replace_idx:
word, pos = words_and_pos[i]
pos = self.transform_pos(pos)
if pos != '':
syn = synonym.get_synonym(word, pos)
# replace
words[i] = syn
new_text = ' '.join(words)
if t == new_text:
continue
new_texts.append(new_text)
new_labels.append(l)
texts.extend(new_texts)
labels.extend(new_labels)
texts = pd.Series(texts)
labels = pd.Series(labels)
_max = np.max(labels)
return pd.DataFrame({'text': texts, 'label': labels})
def data_augmentation_emuneration(self, dataset, sample_num=100000, sample_rate=None):
nltk.download('averaged_perceptron_tagger')
nltk.download('punkt')
synonym = Synonym()
texts = list(dataset['text'])
labels = list(dataset['label'])
dataset_size = len(texts)
new_texts, new_labels = [], []
for i in tqdm(range(dataset_size)):
t, l = texts[i], labels[i]
words = nltk.word_tokenize(t)
words_and_pos = nltk.pos_tag(words)
replaceable_idx = []
for i, (w, p) in enumerate(words_and_pos):
p = self.transform_pos(p)
if p != '':
replaceable_idx.append(i)
# combination
for i in range(len(words)):
word, pos = words_and_pos[i]
pos = self.transform_pos(pos)
if pos != '':
syn = synonym.get_synonym(word, pos)
# replace
words[i] = syn
new_text = ' '.join(words)
if t == new_text:
continue
new_texts.append(new_text)
new_labels.append(l)
texts.extend(new_texts)
labels.extend(new_labels)
texts = pd.Series(texts)
labels = pd.Series(labels)
_max = np.max(labels)
return pd.DataFrame({'text': texts, 'label': labels})
class Sentiment140(PreprocessBase):
def __init__(self):
super().__init__()
def common_preprocess_sentiment140(self, input_name):
with codecs.open(input_name, "r", "UTF-8", "ignore") as file:
df = pd.read_csv(file, header=None)
labels, texts = [], []
for l, t in zip(df[0], df[5]):
if l == 2:
continue
re_text = re.sub(r'(https?://+\S*\s*|www.\S*\s*|#\S*\s*|@\S*\s*|&\S*\s*)', '', t).strip()
if not re_text:
continue
labels.append(0 if l == 0 else 1)
texts.append(re_text)
texts = pd.Series(texts)
labels = pd.Series(labels)
dataset = pd.DataFrame({'text': texts, 'label': labels})
return dataset
def preprocess(self, file_name, limit_characters, number_of_characters, aug, kfold):
print("extracting...")
extracted_dataset = self.common_preprocess_sentiment140(file_name)
print("lemmatizing......")
lemma_dataset = self.lemmatize(extracted_dataset)
#lemma_dataset = extracted_dataset
x, y = np.array(lemma_dataset["text"]), np.array(lemma_dataset["label"])
# split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=183)
train_set = pd.DataFrame({'text': x_train, 'label': y_train})
test_set = pd.DataFrame({'text': x_test, 'label': y_test})
# train
if aug:
print("Do augmentation")
train_set = self.data_augmentation(train_set, sample_rate=4.0)
restricted_train = self.character_restriction(train_set, restriction_rule=r'[^\w!?,\s]')
re_x_train = self.texts_to_characters_id_lists(restricted_train['text'], limit_characters)
re_y_train = self.labels_to_onehot(restricted_train['label'])
# test
restricted_test = self.character_restriction(test_set, restriction_rule=r'[^\w!?,\s]')
re_x_test = self.texts_to_characters_id_lists(restricted_test['text'], limit_characters)
re_y_test = self.labels_to_onehot(restricted_test['label'])
return re_x_train, re_x_test, re_y_train, re_y_test
def preprocess_kfold(self, file_name, limit_characters, number_of_characters, aug, k=5):
print("extracting...")
extracted_dataset = self.common_preprocess_sentiment140(file_name)
print("lemmatizing......")
lemma_dataset = self.lemmatize(extracted_dataset)
#lemma_dataset = extracted_dataset
x, y = np.array(lemma_dataset["text"]), np.array(lemma_dataset["label"])
# split
x_train_kf, x_test_kf, y_train_kf, y_test_kf = [], [], [], []
kf = KFold(n_splits=k, shuffle=True, random_state=0)
for i, (train_idx, test_idx) in enumerate(kf.split(x)):
print("k fold: {}".format((i)))
x_train, x_test = x[train_idx], x[test_idx]
y_train, y_test = y[train_idx], y[test_idx]
train_set = pd.DataFrame({'text': x_train, 'label': y_train})
test_set = pd.DataFrame({'text': x_test, 'label': y_test})
# train
if aug:
print("Do augmentation")
train_set = self.data_augmentation(train_set, sample_rate=4.0)
restricted_train = self.character_restriction(train_set, restriction_rule=r'[^\w!?,\s]')
x_train_kf.append(self.texts_to_characters_id_lists(restricted_train['text'], limit_characters))
y_train_kf.append(self.labels_to_onehot(restricted_train['label']))
# test
restricted_test = self.character_restriction(test_set, restriction_rule=r'[^\w!?,\s]')
x_test_kf.append(self.texts_to_characters_id_lists(restricted_test['text'], limit_characters))
y_test_kf.append(self.labels_to_onehot(restricted_test['label']))
return x_train_kf, x_test_kf, y_train_kf, y_test_kf
class IMDB(PreprocessBase):
def __init__(self):
super().__init__()
def read_row_dataset(self, root_dir_name):
# training_dataset_negative
texts, labels = [], []
parent_dir = [root_dir_name + 'train/neg/*', root_dir_name + 'train/pos/*']
for dir_name in parent_dir:
filenames = glob.glob(dir_name)
for name in filenames:
with codecs.open(name, "r", "UTF-8", "ignore") as file:
text = file.read()
texts.append(re.sub(r'(https?://+\S*\s*|www.\S*\s*|#\S*\s*|@\S*\s*|&\S*\s*)', '', text).strip())
labels.append(0 if 'neg' in dir_name else 1)
texts = pd.Series(texts)
labels = | pd.Series(labels) | pandas.Series |
import pandas as pd
import regex as re
import numpy as np
import sys
import time
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from collections import OrderedDict
from nltk.corpus import words
from preprocess import CustomAnalyzer, doFreq, doTf_IDF
from dataJoin import joinData
# read bot data
bot_data = | pd.read_csv("../data/preprocessedTweets/bot_english_tweets.csv") | pandas.read_csv |
import os
import logging
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch import nn
from torch.optim import AdamW
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingWarmRestarts, CosineAnnealingLR
import torch.nn.functional as F
from segmentation_models_pytorch.unetplusplus.model import UnetPlusPlus
from segmentation_models_pytorch.losses import DiceLoss
from segmentation_models_pytorch.utils.metrics import IoU
import pandas as pd
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
import gc
from sklearn.metrics import roc_auc_score, accuracy_score
import json
import argparse
parser = argparse.ArgumentParser(description='Insert some arguments')
parser.add_argument('--mri_type', type=str,
help='Train your model on which MRI type. Should be one of: FLAIR, T1w, T1wCE, T2w, All (All means sequentially training the above 4 mri types)', default='FLAIR')
parser.add_argument('--gpu', type=int,
help='GPU ID', default=0)
parser.add_argument('--batch_size', type=int,
help='Batch size', default=4)
parser.add_argument('--n_workers', type=int,
help='Number of parrallel workers', default=8)
args = parser.parse_args()
with open('SETTINGS.json', 'r') as f:
SETTINGS = json.load(f)
DATA_FOLDER = SETTINGS['CLASSIFICATION_DATA_DIR']
META_FILE_PATH = f'{DATA_FOLDER}/meta_classification.csv'
KFOLD_FILE_PATH = SETTINGS['KFOLD_PATH']
RUN_FOLDS = [0]
MRI_TYPES = ['FLAIR','T1w', 'T1wCE', 'T2w'] if args.mri_type == 'All' else [args.mri_type]
STRIDE = 5
SEQ_LEN = 35
LSTM_HIDDEN_SIZE = 128
LSTM_LAYERS = 1
SEED = 67
DIM = (224, 224, 3)
N_WORKERS = args.n_workers
BATCH_SIZE = args.batch_size
BASE_LR = 1e-6
NUM_EPOCHS = 80
PATIENT = 10
SAMPLE = None
DEVICE = torch.device(f'cuda:{args.gpu}')
PARENT_OUT_FOLDER = 'models/'
CANDIDATES = [
{
'backbone_name':'eca_nfnet_l0',
'ver_note':'2d_classification',
'backbone_pretrained':'pretrained_models/eca_nfnet_l0.pth',
'batch_size':BATCH_SIZE,
'warm_up_epochs':5,
},
]
import sys
from utils.general import seed_torch, init_progress_dict, log_to_progress_dict, save_progress, log_and_print, get_logger
# seed every thing
seed_torch(SEED)
def chunk_slices(list_files):
list_files = sorted(list_files)
chunks = []
n_chunks = max(int(np.ceil((len(list_files) - SEQ_LEN) / STRIDE ) + 1),1)
for i in range(n_chunks):
s = i*STRIDE
e = min(s+SEQ_LEN, len(list_files))
chunks.append(list_files[s:e])
return chunks
def expand(row):
list_files = row['chunk_file_paths']
return pd.DataFrame({
'BraTS21ID':[row['BraTS21ID']]*len(list_files),
'MGMT_value':[row['MGMT_value']]*len(list_files),
'mri_type':[row['mri_type']]*len(list_files),
'file_path':list_files,
'fold':[row['fold']]*len(list_files)
})
def get_first_value(df, col_name):
df[col_name] = df[col_name].map(lambda x: list(x)[0])
def process_df_mri_type(df_mri):
df_mri_group = df_mri.groupby('BraTS21ID').agg(list)
df_mri_group = df_mri_group.reset_index()
df_mri_group['chunk_file_paths'] = df_mri_group.file_path.map(chunk_slices)
df_mri_group['chunk_count'] = df_mri_group['chunk_file_paths'].map(lambda x: len(x))
df_mri_group['chunk_cum_count'] = df_mri_group['chunk_count'].cumsum()
df_mri_group_expand = df_mri_group.apply(expand, axis=1).tolist()
df_mri_group_expand = pd.concat(df_mri_group_expand)
for col_name in ['MGMT_value', 'mri_type', 'fold']:
get_first_value(df_mri_group_expand, col_name)
return df_mri_group_expand
class BrainClassification2DDataset(torch.utils.data.Dataset):
def __init__(self, csv, transforms=None):
self.csv = csv.reset_index(drop=True)
self.augmentations = transforms
def __len__(self):
return self.csv.shape[0]
def __getitem__(self, index):
row = self.csv.iloc[index]
list_file_path = row['file_path']
list_images = []
label = row['MGMT_value']
for i, path in enumerate(list_file_path):
image = np.load(path)
label = row['MGMT_value']
list_images.append(image)
images = np.stack(list_images, axis=0)
if(images.shape[0] < SEQ_LEN):
n_pad = SEQ_LEN - images.shape[0]
pad_matrix = np.zeros(shape=(n_pad, images.shape[1], images.shape[2], images.shape[3]))
images = np.concatenate([images, pad_matrix], axis=0)
if self.augmentations:
images_dict = dict()
for i in range(len(images)):
if(i==0):
images_dict['image'] = images[i]
else:
images_dict[f'image{i-1}'] = images[i]
augmented = self.augmentations(**images_dict)
transformed_images = []
for i in range(len(images)):
if(i==0):
transformed_images.append(augmented['image'])
else:
transformed_images.append(augmented[f'image{i-1}'])
transformed_images = np.stack(transformed_images, axis=0)
return transformed_images, torch.tensor(label)
return images, torch.tensor(label)
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
def get_train_transforms(candidate):
dim = candidate.get('dim', DIM)
seq_len = candidate.get('seq_len', SEQ_LEN)
additional_targets = {f'image{i}':'image' for i in range(SEQ_LEN-1)}
return A.Compose(
[
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.ShiftScaleRotate(p=0.5),
A.Resize(width=dim[1], height=dim[0], always_apply=True),
A.Normalize(),
ToTensorV2(p=1.0)
],
additional_targets=additional_targets
)
def get_valid_transforms(candidate):
dim = candidate.get('dim', DIM)
additional_targets = {f'image{i}':'image' for i in range(SEQ_LEN-1)}
return A.Compose(
[
A.Resize(width=dim[1], height=dim[0], always_apply=True),
A.Normalize(),
ToTensorV2(p=1.0)
],
additional_targets=additional_targets
)
def dfs_freeze(module):
for name, child in module.named_children():
for param in child.parameters():
param.requires_grad = False
dfs_freeze(child)
def dfs_unfreeze(module):
for name, child in module.named_children():
for param in child.parameters():
param.requires_grad = True
dfs_unfreeze(child)
import timm
class BrainSequenceModelNFNet(nn.Module):
def __init__(self, backbone_name, backbone_pretrained,
lstm_dim=64, lstm_layers=1, lstm_dropout=0.,
n_classes=1):
super(BrainSequenceModelNFNet, self).__init__()
self.backbone = timm.create_model(backbone_name, pretrained=False)
self.backbone.load_state_dict(torch.load(backbone_pretrained))
lstm_inp_dim = self.backbone.head.fc.in_features
self.backbone.head.fc = nn.Identity()
self.lstm = nn.LSTM(lstm_inp_dim, lstm_dim, num_layers=lstm_layers,
batch_first=True, bidirectional=True,
dropout=lstm_dropout)
self.clf_head = nn.Linear(lstm_dim*2*SEQ_LEN, n_classes)
def forward(self, x):
n = x.shape[0]
seq_length = x.shape[1]
concat_x = torch.cat([x[i] for i in range(n)], axis=0)
concat_x = self.backbone(concat_x)
stacked_x = torch.stack([concat_x[i*seq_length:i*seq_length+seq_length] for i in range(n)], axis=0)
seq_features, _ = self.lstm(stacked_x)
seq_features = seq_features.reshape(n,-1)
logits = self.clf_head(seq_features)
return logits
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train_valid_fn(dataloader,model, criterion, scaler, optimizer=None,device='cuda:0',scheduler=None,
epoch=0,mode='train', metric='auc'):
'''Perform model training'''
if(mode=='train'):
model.train()
elif(mode=='valid'):
model.eval()
else:
raise ValueError('No such mode')
loss_score = AverageMeter()
tk0 = tqdm(enumerate(dataloader), total=len(dataloader))
all_predictions = []
all_labels = []
for i, batch in tk0:
if(mode=='train'):
optimizer.zero_grad()
# input, gt
voxels, labels = batch
voxels = voxels.to(device)
labels = labels.to(device).float()
# prediction
with torch.cuda.amp.autocast():
logits = model(voxels)
logits = logits.view(-1)
probs = logits.sigmoid()
# compute loss
loss = criterion(logits, labels)
if(mode=='train'):
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
loss_score.update(loss.detach().cpu().item(), dataloader.batch_size)
# append for metric calculation
all_predictions.append(probs.detach().cpu().numpy())
all_labels.append(labels.detach().cpu().numpy())
if(mode=='train'):
tk0.set_postfix(Loss_Train=loss_score.avg, Epoch=epoch, LR=optimizer.param_groups[0]['lr'])
elif(mode=='valid'):
tk0.set_postfix(Loss_Valid=loss_score.avg, Epoch=epoch)
del batch, voxels, labels, logits, probs, loss
torch.cuda.empty_cache()
if(mode=='train'):
if(scheduler.__class__.__name__ == 'CosineAnnealingWarmRestarts'):
scheduler.step(epoch=epoch)
elif(scheduler.__class__.__name__ == 'ReduceLROnPlateau'):
scheduler.step(loss_score.avg)
all_predictions = np.concatenate(all_predictions)
all_labels = np.concatenate(all_labels)
if(metric == 'auc'):
auc = roc_auc_score(y_true=all_labels, y_score=all_predictions)
return loss_score.avg, auc
return loss_score.avg
# ============ Read metadata ==============
df = | pd.read_csv(META_FILE_PATH) | pandas.read_csv |
# pylint: disable=E1101
from datetime import datetime, timedelta
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
import pandas.tseries.offsets as offsets
import pandas as pd
import unittest
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
class TestResample(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEquals(g.ngroups, 2593)
self.assert_(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
self.assertEquals(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assert_(result.index.name == 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEquals(len(result), 3)
self.assert_((result.index.dayofweek == [6, 6, 6]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/9/2005'])
self.assertEquals(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [0, 0]).all())
self.assertEquals(result.irow(0), s['1/3/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [1, 1]).all())
self.assertEquals(result.irow(0), s['1/4/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [2, 2]).all())
self.assertEquals(result.irow(0), s['1/5/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [3, 3]).all())
self.assertEquals(result.irow(0), s['1/6/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [4, 4]).all())
self.assertEquals(result.irow(0), s['1/7/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEquals(len(result), 7)
self.assert_((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/3/2005'])
self.assertEquals(result.irow(5), s['1/9/2005'])
self.assert_(result.index.name == 'index')
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assert_(result.index.freq == Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEquals(len(result), 12961)
self.assertEquals(result[0], s[0])
self.assertEquals(result[-1], s[-1])
self.assert_(result.index.name == 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEquals(len(result), len(expect))
self.assertEquals(len(result.columns), 4)
xs = result.irow(-2)
self.assertEquals(xs['open'], s[-6])
self.assertEquals(xs['high'], s[-6:-1].max())
self.assertEquals(xs['low'], s[-6:-1].min())
self.assertEquals(xs['close'], s[-2])
xs = result.irow(0)
self.assertEquals(xs['open'], s[0])
self.assertEquals(xs['high'], s[:5].max())
self.assertEquals(xs['low'], s[:5].min())
self.assertEquals(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEquals(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assert_(result.index.freq == offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assert_((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assert_((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assert_((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M', how='mean')
expected = ts.groupby(lambda x: x.month).mean()
self.assertEquals(len(result), 2)
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
self.assertRaises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1)
expected = df.T.resample('M').T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1)
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M'))
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2)
expected = p_apply(panel2, lambda x: x.resample('M', axis=1))
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', how=lambda x: x.mean(1), axis=1)
expected = panel.resample('M', how='mean', axis=1)
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', how=lambda x: x.mean(2), axis=2)
expected = panel.resample('M', how='mean', axis=2)
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the middle
# of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left')
expected = ts.resample(freq, closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2)
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
self.assert_(resampled.index.equals(exp_rng))
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left')
expected = ts.resample('D', closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period')
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
self.assert_(result.index.equals(exp_index))
def test_resample_empty(self):
ts = _simple_ts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
result = ts.resample('A', kind='period')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
xp = DataFrame()
rs = xp.resample('A')
assert_frame_equal(xp, rs)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W')
expected = ts.resample('W-SUN')
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
result = ts.resample('M')
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M')
expected = df.resample('M', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left')
exp = df.tshift(1, freq='D').resample('M', kind='period')
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q')
expected = df.resample('Q', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left')
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left')
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M')
self.assert_(len(resampled) == 1)
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
result = ts.resample(freq, how='mean')
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left')
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
self.assert_(result.index.equals(ex_index))
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC')
self.assert_(len(result) == 0)
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period')
self.assert_(len(result) == 1)
self.assert_(result.index[0] == Period('2000-04', freq='M'))
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d')
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min', how=['mean', 'sum'])
tm.assert_isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D', how='sum')
exp = ts.sort_index().resample('D', how='sum')
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64','int32','float64','float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype = dtype)
result = df.resample("T", how=lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T", how="median")
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M', how=lambda x: x.mean())
exp = ts.resample('M', how='mean')
tm.assert_series_equal(result, exp)
self.assertRaises(Exception, ts.resample, 'M',
how=[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result = ts.resample('M', how={'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
foo_exp = ts.resample('M', how='mean')
bar_exp = ts.resample('M', how='std')
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS', 'sum')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return TimeSeries(np.random.randn(len(rng)), index=rng)
from pandas.tseries.frequencies import MONTHS, DAYS
class TestResamplePeriodIndex(unittest.TestCase):
_multiprocess_can_split_ = True
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = ts.resample(targ, fill_method=meth,
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995',
freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec'), result)
assert_series_equal(ts.resample('a'), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
self.assertRaises(ValueError, ts.resample, 'a-dec')
self.assertRaises(ValueError, ts.resample, 'q-mar')
self.assertRaises(ValueError, ts.resample, 'M')
self.assertRaises(ValueError, ts.resample, 'w-thu')
def test_basic_upsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
resampled = result.resample('D', fill_method='ffill', convention='end')
expected = result.to_timestamp('D', how='end')
expected = expected.asfreq('D', 'ffill').to_period()
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', fill_method='ffill', limit=2,
convention='end')
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D', fill_method='ffill')
exp = df['a'].resample('D', fill_method='ffill')
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M', fill_method='ffill')
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
def test_quarterly_upsample(self):
targets = ['D', 'B', 'M']
for month in MONTHS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_monthly_upsample(self):
targets = ['D', 'B']
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_weekly_upsample(self):
targets = ['D', 'B']
for day in DAYS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp')
expected = ts.to_timestamp(how='end').resample('A-DEC')
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month, fill_method='ffill')
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how, fill_method='ffill')
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR', fill_method='ffill')
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = TimeSeries(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A')
expected = stamps.resample('A').to_period('A')
assert_series_equal(filled, expected)
filled = s.resample('A', fill_method='ffill')
expected = stamps.resample('A', fill_method='ffill').to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = TimeSeries(np.random.randn(5), index=rng)
self.assertRaises(Exception, s.resample, 'A')
def test_resample_5minute(self):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = TimeSeries(np.random.randn(len(rng)), index=rng)
result = ts.resample('5min')
expected = ts.to_timestamp().resample('5min')
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D')
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s')
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_empty(self):
ts = _simple_pts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assert_(len(result) == 0)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min', how=len)
expected = s.resample('10min', how=len).ix[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU')
self.assert_(result.isnull().all())
result = ts.resample('W-THU', fill_method='ffill')[:-1]
expected = ts.asfreq('W-THU', method='ffill')
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W')
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample('W').tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D')
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right')
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period')
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', how='mean', closed='left', label='right')
exp = s[1:].resample('10min', how='mean', closed='left', label='right')
assert_series_equal(result, exp)
result = s.resample('10min', how='mean', closed='left', label='left')
exp = s[1:].resample('10min', how='mean', closed='left', label='left')
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
self.assert_(result.index.equals(ex_index))
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A')
exp = ts.to_timestamp().resample('A').to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', how='first', closed='left', label='left')
def test_resample_bms_2752(self):
# GH2753
foo = pd.Series(index=pd.bdate_range('20000101','20000201'))
res1 = foo.resample("BMS")
res2 = foo.resample("BMS").resample("B")
self.assertEqual(res1.index[0], Timestamp('20000103'))
self.assertEqual(res1.index[0], res2.index[0])
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span')
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right'))
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left'))
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A", how='mean')
tm.assert_almost_equal(result[0], s.mean())
def test_resample_doesnt_truncate(self):
"""Test for issue #3020"""
import pandas as pd
dates = pd.date_range('01-Jan-2014','05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D')
self.assertEquals(result.index[0], dates[0])
class TestTimeGrouper(unittest.TestCase):
def setUp(self):
self.ts = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply(self):
grouper = | TimeGrouper('A', label='right', closed='right') | pandas.tseries.resample.TimeGrouper |
#!/usr/bin/env python
# coding: utf-8
# # Equipe : FlaRe análise
# # integrantes: <NAME> e <NAME>
# # email: <EMAIL>
# ##### relatório explicativo da análise: https://docs.google.com/document/d/1aRzknUnMZdc_e3nqljTfou46NI-mL7rgYOyh1Bw2MAo/edit
# ### Análise: O crescimento educacional da população está relacionado com o crescimento do trabalho formal?
#
# #### A análise começa investigando o nível educacional da mão de obra jovens com idade para trabalhar e a expectativa de anos de estudo. Depois foi analisada os índices de empregos formais, informais e o grau de formalização dos ocupados. Também o nível educacional da mão de obra que está trabalhando, o rendimento médio, redimento de até 1 salário mínimo.
# ###### Ps: Importante salientar que os dados disponíveis do munípicio foi do censo apenas do ano 1991, 2000 e 2010. E em alguns casos só tinha os dados do censo de 2010.
# ###### Os dados foram coletados do site: https://atlasbrasil.org.br/consulta/planilha
# In[ ]:
import pandas as pd
import numpy as np
# In[ ]:
# Dados da educação de Feira de Santana. Os anos usado nas análises foi do censo do ano 1991, 2000 e 2010
# Infelizmente Feira de Santana não tem dados do PNAD - IBGE, pois não é capital e nem considerada
# região metropolitana ainda.
dados_edu = | pd.read_csv('../data/dados_edu.csv', sep=',') | pandas.read_csv |
"""
ABSOLUTELY NOT TESTED
"""
import time
import os
import datetime
from collections import namedtuple
import numpy as np
import pandas as pd
import sklearn.preprocessing
import torch
import torch.nn as nn
import torch.optim as optim
from dateutil.relativedelta import relativedelta
from simple_ts_forecast.models import Model
SavedFit = namedtuple('SavedFit', 'filename date_test_start datetime_fit mape')
def r2_score(y_test, y_pred, torch_order=False):
if torch_order:
y_test, y_pred = y_pred, y_test
if isinstance(y_test, np.ndarray) and isinstance(y_pred, np.ndarray):
return 1 - np.mean((y_test - y_pred) ** 2) / np.mean((y_test - np.mean(y_test)) ** 2)
elif isinstance(y_test, torch.Tensor) and isinstance(y_pred, torch.Tensor):
return 1 - torch.mean((y_test - y_pred) ** 2).item() / torch.mean((y_test - torch.mean(y_test)) ** 2).item()
else:
raise TypeError(f"input_ array must be np.ndarray or torch.Tensor, got {type(y_test)}, {type(y_pred)}")
def mean_absolute_percent_error(y_test, y_pred, torch_order=False):
if torch_order:
y_test, y_pred = y_pred, y_test
if isinstance(y_test, np.ndarray) and isinstance(y_pred, np.ndarray):
return np.mean(np.abs((y_test - y_pred) / y_test)) * 100
elif isinstance(y_test, torch.Tensor) and isinstance(y_pred, torch.Tensor):
return torch.mean(torch.abs((y_test - y_pred) / y_test)) * 100
else:
raise TypeError(f"input_ array must be np.ndarray or torch.Tensor, got {type(y_test)}, {type(y_pred)}")
class LSTM(Model):
"""Use this class as another classic simple_ts_forecast"""
class _Model(nn.Module):
"""PyTorch RNN model"""
def __init__(self, input_size, hidden_size, output_size, device):
super().__init__()
self.device = device
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.lstm_1 = nn.LSTMCell(self.input_size, self.hidden_size)
self.lstm_2 = nn.LSTMCell(self.hidden_size, self.hidden_size)
self.dropout_1 = nn.Dropout(p=0.5)
self.dropout_2 = nn.Dropout(p=0.1)
self.linear = nn.Linear(self.hidden_size, self.input_size)
self.out_linear = nn.Linear(self.input_size, self.output_size)
def forward(self, x, future=1):
x = x.to(self.device)
outputs = []
# reset the state of LSTM
# the state is kept till the end of the sequence
h_t1, c_t1 = self.init_hidden(x.size(0))
h_t2, c_t2 = self.init_hidden(x.size(0))
for input_t in x.split(1, dim=1):
h_t1, c_t1 = self.lstm_1(input_t.squeeze(1), (h_t1, c_t1))
h_t1 = self.dropout_1(h_t1)
h_t2, c_t2 = self.lstm_2(h_t1, (h_t2, c_t2))
output = self.linear(self.dropout_2(h_t2))
outputs += [self.out_linear(output)]
for i in range(future - 1):
h_t1, c_t1 = self.lstm_1(output, (h_t1, c_t1))
h_t1 = self.dropout_1(h_t1)
h_t2, c_t2 = self.lstm_2(h_t1, (h_t2, c_t2))
output = self.linear(self.dropout_2(h_t2))
outputs += [self.out_linear(output)]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs
def init_hidden(self, batch_size):
h_t = torch.zeros(batch_size, self.hidden_size, dtype=torch.float32).to(self.device)
c_t = torch.zeros(batch_size, self.hidden_size, dtype=torch.float32).to(self.device)
return h_t, c_t
def __init__(self, n=14, window=35, lr=0.005, sched_step_size=10, sched_gamma=0.5,
model_params=None, model_input_size=1, model_hidden_size=300, model_output_size=1, scaler=None,
device=None, gpu_num=0, train_set_prop=0.9, batch_size=175, n_epochs=30,
models_dir='lstm_saves/ts_mnpz/',
days_between_fits=31, n_fits=3, search_window=14, post_process_coef=0.75):
"""Init model
Args:
n (int, optional): future days num to predict. Defaults to 14.
window (int, optional): window of past data from predict. Defaults to 35.
lr (float, optional): learning rate of optimizer. Defaults to 0.005.
sched_step_size (int, optional): lr_scheduler.StepLR step size. Defaults to 10.
sched_gamma (float, optional): lr_scheduler.StepLR gamma. Defaults to 0.5.
model_params (dict, optional): dict of params = args to model init. Defaults to dict of 3 params below.
model_input_size (int, optional): param of Model, num input_ features. Defaults to 1.
model_hidden_size (int, optional): param of Model, size of hidden layers. Defaults to 300.
model_output_size (int, optional): param of Model, size of output. Defaults to 1.
scaler (sklearn.preprocessing.*Scaler, optional): class Scaler for features. Defaults to sklearn.preprocessing.StandardScaler.
device (torch.device, optional): device train on. Defaults to gpu, if available.
gpu_num (int, optional): gpu num in sys. Defaults to 0.
train_set_prop (float, optional): if not providing sate_test_start uses these coef to slicing train data. Defaults to 0.9.
batch_size (int, optional): batch size for train. Defaults to 175.
n_epochs (int, optional): number epochs for train. Defaults to 30.
models_dir (str, optional): path to saves of simple_ts_forecast. Defaults to 'lstm_saves/ts_mnpz/'.
days_between_fits (int, optional): days between fits for predict for report. Defaults to 31.
n_fits (int, optional): number of fits for one test data. Defaults to 3.
search_window (int, optional): search saved fit up to search_window days back. Defaults to 14.
post_process_coef (float, optional): in [0, 1]. Defaults to 0.75.
"""
super().__init__()
self.model_params = model_params or dict(input_size=model_input_size, hidden_size=model_hidden_size,
output_size=model_output_size)
self.device = device or torch.device(f'cuda:{gpu_num}' if torch.cuda.is_available() else 'cpu')
self.cpu_device = torch.device('cpu')
self.model = self._Model(**self.model_params, device=self.cpu_device)
self.loss_fn = nn.MSELoss()
self.lr = lr
self.sched_step_size = sched_step_size
self.sched_gamma = sched_gamma
self.Scaler = scaler or sklearn.preprocessing.StandardScaler
self.scalers = []
self.n_in = window
self.n_out = n
self.n_epochs = n_epochs
self.batch_size = batch_size
self.seeds = [0, 42, 1, 123, 1337, 2000, -1000, 300]
self.models_dir = models_dir
os.makedirs(self.models_dir, exist_ok=True)
self.days_between_fits = days_between_fits
self._filename_pattern = 'model_{date_test_start}_{datetime_fit}_{mape:.2f}_.pt'
self.train_set_prop = train_set_prop
self.n_fits = n_fits
self.search_window = search_window
self.post_process_coef = post_process_coef
def fit(self, X, verbose=False, date_test_start=None, force_fit=False, load_from_filename=None, saving=True):
"""fit or load LSTM model
Args:
X ([pd.DataFrame]): all series to train (and testing model) without Nan
verbose (bool, optional): if True prints verbose information. Defaults to False.
date_test_start (str or datetime): Date for first n_out prediction. Defaults to end of 90% of df.
force_fit (bool, optional): Fit even if exist saved. Defaults to False.
load_from_filename (str, optional): Filename load from (without dirname). Defaults to None.
"""
ind = pd.to_datetime(X.index)
X = X.values
n_features = X.shape[1]
if date_test_start is None:
test_start = int(len(X) * self.train_set_prop)
date_test_start = pd.to_datetime(ind[test_start])
else:
test_start = ind.get_loc(date_test_start) + 1 - self.n_in - self.n_out
self._test_start = test_start
self.date_test_start = pd.to_datetime(date_test_start)
train = X[:test_start].reshape(-1, n_features)
test = X[test_start:].reshape(-1, n_features)
trains = []
tests = []
for i in range(n_features):
scaler = self.Scaler()
series = train[:, i].reshape(-1, 1)
scaler = scaler.fit(series)
trains.append(scaler.fit_transform(series))
tests.append(scaler.transform(test[:, i].reshape(-1, 1)))
self.scalers.append(scaler)
shift_size = self.n_in
train_arr = np.concatenate(trains, 1)
test_arr = np.concatenate(tests, 1)
x_train, y_train = self.series_to_supervised(train_arr, self.n_in, self.n_out, shift_size, for_new_arch=True)
self._x_train = x_train
self._y_train = y_train
x_test, y_test = self.series_to_supervised(test_arr, self.n_in, self.n_out, shift_size, for_new_arch=True)
self._x_test = x_test
self._y_test = y_test
if load_from_filename and not force_fit:
self.load_model(self.models_dir + load_from_filename)
elif force_fit:
self._n_fits(self.n_fits, verbose, saving)
else:
filename = self.find_nearest_save(self.date_test_start)
if filename:
self.load_model(self.models_dir + filename)
else:
self._n_fits(self.n_fits, verbose, saving)
def _n_fits(self, n_fits=3, verbose=False, saving=True):
info = []
min_mape = float('inf')
min_mape_i = 0
for i in range(n_fits):
if i < len(self.seeds):
torch.manual_seed(self.seeds[i])
else:
torch.seed()
self.model = self._Model(**self.model_params, device=self.device)
self.model.to(self.device)
self.loss_fn = nn.MSELoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=self.sched_step_size,
gamma=self.sched_gamma)
if verbose:
print(f'START fit {i}')
train_loss, val_loss, tttime, mape = self.train(self._x_train, self._y_train, self._x_test, self._y_test,
verbose=verbose)
if verbose:
print(f'MAPE on {i} fit = {mape:.4f}, last best = {min_mape:.4f}, elapsed {tttime / 60:.2f}min.\n')
if min_mape > mape:
min_mape = mape
min_mape_i = i
info.append((self.model, self.loss_fn, self.optimizer, self.scheduler))
self.model.to(self.cpu_device)
self.model.device = self.cpu_device
if verbose:
print(f'\nTHE BEST Model is {min_mape_i} with MAPE = {min_mape:.4f}\n')
self.model, self.loss_fn, self.optimizer, self.scheduler = info[min_mape_i]
self.mape_on_val = min_mape
if saving:
self.save_fit()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def predict(self, X, dates_from_predict=None, post_process=True):
"""
:param X: all series, same as in fit(), but with additional data at the end
:type X: pd.DataFrame or np.ndarray
:param dates_from_predict: indexes of days in df to predict
if None predicts for last date in df
:return: np.array if predictions for each day in dates_to_predict
"""
n_features = X.shape[1]
trains = []
for i in range(n_features):
scaler = self.scalers[i]
series = X.iloc[:, i:i + 1].values
trains.append(scaler.transform(series))
X = pd.DataFrame(np.concatenate(trains, 1), index=X.index)
ind = X.index
if dates_from_predict is None:
dates_from_predict = [ind[-1]]
to_predict = []
for date in dates_from_predict:
end_ind = ind.get_loc(date)
x = X.iloc[end_ind - self.n_in:end_ind, :].values
to_predict.append(x)
to_predict = np.array(to_predict)
x = torch.from_numpy(to_predict).float()
with torch.no_grad():
self.model.eval()
y_pred = self.model(x, future=self.n_out).cpu()
y_pred = y_pred[:, -self.n_out:].numpy()
predicted_scaled = self._scale_all_predictions(y_pred)
predicted_scaled = np.array(predicted_scaled).reshape(len(dates_from_predict), self.n_out)
columns = [f'n{i + 1}' for i in range(self.n_out)]
pred = pd.DataFrame(predicted_scaled, index=dates_from_predict, columns=columns)
if post_process:
ma = X.loc[pred.index].values[:, :1]
ppc = self.post_process_coef
pred = pred - predicted_scaled[:, :1] + (ma * ppc + predicted_scaled[:, :1] * (1 - ppc))
return pred
def predict_for_report(self, X, date_start, date_end, current_fit=False, force_fits=False, verbose=False,
saving=True, post_process=True):
date_start = pd.to_datetime(date_start)
date_end = pd.to_datetime(date_end)
columns = [f'n{i + 1}' for i in range(self.n_out)]
if current_fit:
predicted = self._evaluate_all(self._x_test, self._y_test)
start = date_start - relativedelta(days=self.n_out)
ind = pd.date_range(start, periods=len(predicted))
return pd.DataFrame(predicted, index=ind, columns=columns)
flag = False
preds = []
l_range = (date_end - date_start).days
for i in range(0, l_range, self.days_between_fits):
if l_range - (i + self.days_between_fits) < self.n_out:
flag = True
new_date_start = date_start + relativedelta(days=i)
new_end = new_date_start + relativedelta(days=self.days_between_fits - 1)
if flag:
new_end = date_end
if force_fits:
self.fit(X.loc[:new_end], date_test_start=new_date_start, force_fit=True, verbose=verbose,
saving=saving)
else:
saved_fit_fn = self.find_nearest_save(new_date_start)
if saved_fit_fn:
self.fit(X.loc[:new_end], date_test_start=new_date_start, load_from_filename=saved_fit_fn,
verbose=verbose, saving=saving)
else:
self.fit(X.loc[:new_end], date_test_start=new_date_start, force_fit=True, verbose=verbose,
saving=saving)
predicted = self._evaluate_all(self._x_test, self._y_test)
start = new_date_start - relativedelta(days=self.n_out)
ind = pd.date_range(start, periods=len(predicted))
preds.append( | pd.DataFrame(predicted, index=ind, columns=columns) | pandas.DataFrame |
import matplotlib.pyplot as plt
import datetime as datetime
import numpy as np
import pandas as pd
import talib
import seaborn as sns
from time import time
from sklearn import preprocessing
from pandas.plotting import register_matplotlib_converters
from .factorize import FactorManagement
import scipy.stats as stats
import cvxpy as cvx
import zipfile
import os
from sklearn import linear_model, decomposition, ensemble, preprocessing, isotonic, metrics
from sklearn.impute import SimpleImputer
import xgboost
register_matplotlib_converters()
class Learner:
def __init__(self):
pass
@staticmethod
def shift_mask_data(X, Y, upper_percentile, lower_percentile, n_fwd_days):
# Shift X to match factors at t to returns at t+n_fwd_days (we want to predict future returns after all)
shifted_X = np.roll(X, n_fwd_days + 1, axis=0)
# Slice off rolled elements
X = shifted_X[n_fwd_days + 1:]
Y = Y[n_fwd_days + 1:]
n_time, n_stocks, n_factors = X.shape
# Look for biggest up and down movers
upper = np.nanpercentile(Y, upper_percentile, axis=1)[:, np.newaxis]
lower = np.nanpercentile(Y, lower_percentile, axis=1)[:, np.newaxis]
upper_mask = (Y >= upper)
lower_mask = (Y <= lower)
mask = upper_mask | lower_mask # This also drops nans
mask = mask.flatten()
# Only try to predict whether a stock moved up/down relative to other stocks
Y_binary = np.zeros(n_time * n_stocks)
Y_binary[upper_mask.flatten()] = 1
Y_binary[lower_mask.flatten()] = -1
# Flatten X
X = X.reshape((n_time * n_stocks, n_factors))
# Drop stocks that did not move much (i.e. are in the 30th to 70th percentile)
X = X[mask]
Y_binary = Y_binary[mask]
return X, Y_binary
def feature_importance_adaboost(self, n_fwd_days, close, all_factors, n_estimators, train_size,
upper_percentile, lower_percentile):
pipe = all_factors
pipe.index = pipe.index.set_levels([pd.to_datetime(pipe.index.levels[0]), pipe.index.levels[1]])
close = close[pipe.index.levels[1]]
close.index = pd.to_datetime(close.index)
chunk_start = pipe.index.levels[0][0]
chunk_end = pipe.index.levels[0][-1]
returns = FactorManagement().log_Returns(close, 1).loc[slice(chunk_start, chunk_end), :]
returns_stacked = returns.stack().to_frame('Returns')
results = pd.concat([pipe, returns_stacked], axis=1)
results.index.set_names(['date', 'asset'], inplace=True)
results_wo_returns = results.copy()
returns = results_wo_returns.pop('Returns')
Y = returns.unstack().values
X = results_wo_returns.to_xarray().to_array()
X = np.array(X)
X = X.swapaxes(2, 0).swapaxes(0, 1) # (factors, time, stocks) -> (time, stocks, factors)
# Train-test split
train_size_perc = train_size
n_time, n_stocks, n_factors = X.shape
train_size = np.int16(np.round(train_size_perc * n_time))
X_train, Y_train = X[:train_size], Y[:train_size]
X_test, Y_test = X[(train_size + n_fwd_days):], Y[(train_size + n_fwd_days):]
X_train_shift, Y_train_shift = self.shift_mask_data(X_train, Y_train, n_fwd_days=n_fwd_days,
lower_percentile=lower_percentile,
upper_percentile=upper_percentile)
X_test_shift, Y_test_shift = self.shift_mask_data(X_test, Y_test, n_fwd_days=n_fwd_days,
lower_percentile=lower_percentile,
upper_percentile=upper_percentile)
start_timer = time()
# Train classifier
imputer = SimpleImputer()
scaler = preprocessing.MinMaxScaler()
clf = ensemble.AdaBoostClassifier(
n_estimators=n_estimators) # n_estimators controls how many weak classifiers are fi
X_train_trans = imputer.fit_transform(X_train_shift)
X_train_trans = scaler.fit_transform(X_train_trans)
clf.fit(X_train_trans, Y_train_shift)
end_timer = time()
print('Time to train full ML pipline: {} secs'.format(end_timer - start_timer))
Y_pred = clf.predict(X_train_trans)
print('Accuracy on train set = {:.2f}%'.format(metrics.accuracy_score(Y_train_shift, Y_pred) * 100))
# Transform test data
X_test_trans = imputer.transform(X_test_shift)
X_test_trans = scaler.transform(X_test_trans)
# Predict!
Y_pred = clf.predict(X_test_trans)
Y_pred_prob = clf.predict_proba(X_test_trans)
print('Predictions:', Y_pred)
print('Probabilities of class == 1:', Y_pred_prob[:, 1] * 100)
print('Accuracy on test set = {:.2f}%'.format(metrics.accuracy_score(Y_test_shift, Y_pred) * 100))
print('Log-loss = {:.5f}'.format(metrics.log_loss(Y_test_shift, Y_pred_prob)))
feature_importances = | pd.Series(clf.feature_importances_, index=results_wo_returns.columns) | pandas.Series |
import numpy
import pandas
from sklearn.metrics import mean_squared_error, r2_score
import joblib
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
#features of input file
#id,betweenness,closeness,follow,posts,interval,length,movie_posts,reposts,coapper_posts,boxoffice,fans,verifyName
data = | pandas.read_csv('actor-network-characteristics.csv', sep=',', dtype={'verifyName':object}, encoding='gbk') | pandas.read_csv |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = | pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Mon May 20 17:51:38 2019
@author: sebas
"""
from urllib.request import urlopen
import re
import csv
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
COLUMN_NAMES = ['Game_id', 'Period', 'Time', 'Event_num', 'Event_Msg_Type',
'Option1', 'Distance', 'Top', 'Left', 'Person1', 'Person2', 'Team_id',
'Aw_Score', 'Hm_Score']
MONTHS = ["october",
"november",
"december",
"january",
"february",
"march",
"april",
"may"]
def return_action(row, period, score, away, home):
"""
Returns data that corresponds to the row input and return period and score
Inputs:
row - beautifulsoup tag to procees
period - array-like period of the game
score - array-like away and home score
away - away team id
home - home team id
Outputs:
event_msg_type - Type of event
Team - team performing the action
player1 - player performing the action
player1 - secondary player on the play
option1 - miscellanious information about the play
"""
columns = row.findAll('td')
time = columns[0].text
event_msg_type = ""
team = ""
option1 = ""
player1 = ""
player2 = ""
distance = np.nan
#determine team id
if columns[1].text != '\xa0':
description = columns[1].text
team = away
elif columns[5].text != '\xa0':
description = columns[5].text
team = home
if description == "":
return time, event_msg_type, team, player1, player2, option1, distance, description
#determine Event Type and update scores
if description.split()[0] == 'Jump':
event_msg_type = '10'
elif 'free throw' in description or ' no shot' in description:
event_msg_type = '3'
if ' makes ' in description:
option1 = 1
if team == away:
score[0] += 1
else:
score[1] += 1
else:
option1 = 0
elif ' makes ' in description:
event_msg_type = '1'
option1 = re.search('\\d-pt', description).group()[0]
if team == away:
score[0] += int(option1)
else:
score[1] += int(option1)
if "at rim" in description:
distance = 0
else:
distance = int(re.search(' from (\d+) ft', description).group(1))
elif ' misses ' in description:
event_msg_type = '2'
option1 = re.search('\\d-pt', description).group()[0]
if "at rim" in description:
distance = 0
else:
distance = int(re.search(' from (\d+) ft', description).group(1))
elif ' rebound ' in description:
event_msg_type = '4'
elif ' turnover ' in description:
event_msg_type = '5'
elif ' foul ' in description:
event_msg_type = '6'
elif 'Violation' in description:
event_msg_type = '7'
elif ' enters ' in description:
event_msg_type = '8'
elif 'timeout' in description:
event_msg_type = '9'
elif 'ejected' in description:
event_msg_type = '11'
elif 'Start of ' in description:
event_msg_type = '12'
if description != "Start of 1st quarter":
period[0] += 1
else:
event_msg_type = '13'
#get player ids
player1, player2 = get_player_ids(row.findAll('a'))
#if event_msg_type in ['1','2'] and team == home:
#print(description)
return time, event_msg_type, team, player1, player2, option1, distance, description
def get_player_ids(player_tags):
"""Returns the player ids for an event"""
try:
tag1 = player_tags[0]
player1 = str(tag1).split('.html')[0].split('/')[-1]
except IndexError:
player1 = ''
try:
tag2 = player_tags[1]
player2 = str(tag2).split('.html')[0].split('/')[-1]
except IndexError:
player2 = ''
return player1, player2
def pbp_to_df(away, home, date):
"""
Returns the play by play data as a dataframe
Inputs:
away - away team id
home - home team id
date - date of the desired game (yyyymmdd)
Output:
df - dataframe containing the play by play data
"""
#get pbp table
pbp_quotepage = "https://www.basketball-reference.com/boxscores/pbp/{}0{}.html".format(date, home)
pbp_page = urlopen(pbp_quotepage)
pbp_soup = BeautifulSoup(pbp_page, 'html.parser')
pbp = pbp_soup.find('table', id='pbp')
pbp_rows = pbp.findAll('tr')
#get shot chart
shot_quotepage = "https://www.basketball-reference.com/boxscores/shot-chart/{}0{}.html".format(date, home)
shot_page = urlopen(shot_quotepage)
shot_soup = BeautifulSoup(shot_page, 'html.parser')
away_chart = shot_soup.find('div', id='shots-{}'.format(away))
home_chart = shot_soup.find('div', id='shots-{}'.format(home))
away_shot_rows = away_chart.findAll('div')
home_shot_rows = home_chart.findAll('div')
game_id = "{}-{}-{}".format(away, home, date)
df = | pd.DataFrame(columns=COLUMN_NAMES) | pandas.DataFrame |
from flask import Flask
import os
app = Flask(__name__)
#Test Route
@app.route('/test',methods=['GET'])
def test():
return 'Pinging Model Application!!'
#Prediction from model
import pickle
import sys
import pandas as pd
import numpy
import matplotlib.pyplot as plt
import seaborn as sns
from flask import request,jsonify
@app.route('/predict',methods=['POST'])
def predict():
#Reading model from binary file
with open('model/model.bin','rb') as f_in:
model = pickle.load(f_in)
student_data = request.get_json()
#Convert i/p to json
data_df = | pd.DataFrame(student_data,index=[0]) | pandas.DataFrame |
"""
====================================
Tests the Filters Module
====================================
"""
import unittest
import numpy as np
import pandas as pd
from pypg.filters import butterfy, chebyfy, movefy
class TestFilters(unittest.TestCase):
"""
Unit tests for the filters module.
"""
def setUp(self):
self.ppg = [2438.0, 2438.0, 2438.0, 2455.0, 2455.0, 2384.0, 2384.0, 2384.0, 2418.0,
2418.0, 2415.0, 2415.0, 2415.0, 2398.0, 2398.0, 2388.0, 2388.0, 2388.0,
2340.0, 2340.0, 2340.0, 2340.0, 2340.0, 2399.0, 2399.0, 2353.0, 2353.0,
2353.0, 2318.0, 2318.0, 2324.0, 2324.0, 2324.0, 2283.0, 2283.0, 2333.0,
2333.0, 2333.0, 2326.0, 2326.0, 2274.0, 2274.0, 2274.0, 2309.0, 2309.0,
2224.0, 2224.0, 2224.0, 2288.0, 2288.0, 2268.0, 2268.0, 2268.0, 2250.0]
self.int_data = int(2438)
self.pandas_data = | pd.Series(self.ppg) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 09:39:05 2020
@author: anniewong
"""
#%% Import libraries
import os
import pandas as pd
import datetime
import logging
logging.getLogger().setLevel(logging.INFO)
#%%
def storePainIntensitiesForParticipants2to9(datadir, data):
"""This function updates a dataframe with the pain intensities
data for participants 2 to 9.
Params:
datadir: path to datafolder for participant X
data: pandas dataframe to store data
Returns:
data: dataframe data updated with pain intensities
"""
# Look for all Pain.csv files in directory
for file in os.listdir(datadir):
if file.endswith("Pain.csv"):
painfile = (datadir + file)
logging.info(f"Parsing file '{file}' for pain intensities")
# Read pain.csv as pandas dataframe
df = pd.read_csv(painfile)
# The date columns are not accurate for participant8, use the 'date' column minus 1 day to retrieve date
if file == 'Participant8Pain.csv':
df['ndate'] = pd.to_datetime(df['date']) - pd.DateOffset(1)
# For the rest of the participants, create date column from 'startyear', 'startmonth' and 'startday'
else:
df['ndate']=df.apply(lambda x: datetime.date(x['startyear'], x['startmonth'], x['startday']), axis=1)
# Pivot
df = df.pivot_table(values='intensity', index=df.ndate, columns='name', aggfunc='first')
df.index= | pd.DatetimeIndex(df.index) | pandas.DatetimeIndex |
import json
import os
import math
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import pickle
from sklearn import metrics
from sklearn.preprocessing import scale, StandardScaler, normalize
import time
import matplotlib.pyplot as plt
import progressbar
from iotpackage.__vars import features, featureGroups, dictFeatures, renameDevices, permanentRename
loadedCategories = None
storedFeatureGroups = None
devicecol = 'Device'
categorycol = 'Category'
CSVcols = ['Frame','Time','SrcIP','DstIP','Proto','tcpSrcPort','tcpDstPort','udpSrcPort','udpDstPort','Length','tcpACK','tcpSYN','tcpFIN','tcpRST','tcpPSH','tcpURG','Protocol', 'srcMAC', 'dstMAC']
NON_IOT = ['iPhone', 'Android Tablet', 'HP Printer', 'Samsung Galaxy Tab', 'Laptop', 'IPhone', 'Android Phone', 'iPad', 'Ubuntu Desktop', 'MacBook', 'MacBook/Iphone', 'Nexus Tablet', 'Android Phone', 'Desktop', 'Motog phone', 'Router', 'Pixel 2 Phone']
def addToListMapping(mapping, key, value):
if key in mapping:
mapping[key].append(value)
else:
mapping[key] = [value]
def remapLabel(device, mapping):
for m in mapping:
if device in mapping[m]:
return m
raise Exception(f"No Mapping For Device: {device}")
def getCategoryMapping(devices, mapping):
category_mapping = {}
devices = set(devices)
for device in devices:
category = findCategory(mapping, device)
if category is None:
raise ValueError(f'No Company Category Mapping For Device: {device}')
else:
addToListMapping(category_mapping, category, device)
return category_mapping
def findCategory(category_mapping, device):
for category in category_mapping:
if device in category_mapping[category]:
return category
return None
def getCommonLabels(data1, data2, label_col='Device', print_common=True):
if isinstance(data1, pd.DataFrame) and isinstance(data2, pd.DataFrame):
data1 = data1[label_col]
data2 = data2[label_col]
if not isinstance(data1, pd.Series):
data1 = pd.Series(data1)
if not isinstance(data2, pd.Series):
data2 = pd.Series(data2)
uniqueDevices_data1 = set(data1.unique())
uniqueDevices_data2 = set(data2.unique())
uniqueDevices_data1.discard('NoN-IoT')
common_labels = list(uniqueDevices_data1.intersection(uniqueDevices_data2))
if print_common:
print('Common Labels:', common_labels)
return common_labels
def findOptimalThreshold(fpr, tpr, thresholds):
points = {}
for i in range(0, len(thresholds)):
points[thresholds[i]] = [fpr[i], tpr[i]]
min = float('inf')
threshold = None
for k in points:
try:
[[i]] = metrics.pairwise.euclidean_distances([points[k]], [[0,1]])
except:
continue
if i < min:
min = i
threshold = k
return points[threshold][0], points[threshold][1], threshold
# Plots the Confusion Matrix. Also used to store the values to plot later
def plotCM(y_true, y_pred, store_cm=None, plot_cm=True):
labels = list(y_true.unique())
labels.sort()
cm = metrics.confusion_matrix(y_true, y_pred, labels=labels)
cmn = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
if isinstance(store_cm, str):
#Path is provided to store cmn
pd.DataFrame(cmn).to_csv(store_cm + '-CM.csv', index=False)
pd.Series(labels).to_csv(store_cm + '-Labels.csv', index=False)
if plot_cm:
fig, ax = plt.subplots()
sns.heatmap(cmn, annot=True, fmt='.2f', xticklabels=labels, yticklabels=labels, cmap="Blues", cbar=False)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
# Plots all the AUC curves used in the paper
def plotAUCCurve(fpr, tpr, roc_auc):
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (AUC = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Unknown Devices')
plt.legend(loc="lower right")
plt.show()
def getCommonLabelData(X1, X2, y1=None, y2=None, label_col=None, common_both=True, return_y=False, print_common=True):
if label_col:
y1 = X1[label_col]
y2 = X2[label_col]
elif y1 is None or y2 is None:
raise ValueError('Either y1, y2 or label_col must be defined')
X2.reset_index(drop=True, inplace=True)
y2.reset_index(drop=True, inplace=True)
commonDevices = getCommonLabels(y1, y2, print_common=print_common)
common_loc2 = y2.isin(commonDevices)
try:
X2 = X2[common_loc2]
except:
print(X2)
print(common_loc2)
if return_y:
y2 = y2[common_loc2]
else:
y2 = None
if common_both:
common_loc1 = y1.isin(commonDevices)
X1 = X1[common_loc1]
if return_y:
y1 = y1[common_loc1]
else:
y1 = None
return X1, y1, X2, y2
def getFeatureNames():
return list(features.keys())
def getFeatureGroups():
return featureGroups
def normalizeFeatureData(featureData):
listSimpleFeatures = list(set(list(features.keys())) - set(list(dictFeatures.keys())))
featureData[listSimpleFeatures] = normalize(featureData[listSimpleFeatures], axis=0)
return featureData
def renameLabels(featureData, labelCol, destCol, mappings, error_raise=True):
if not isinstance(featureData, pd.DataFrame):
raise ValueError(f'featureData must be a Pandas DataFrame given {type(featureData)}')
if not isinstance(labelCol, str):
raise ValueError(f'labelCol must be a str given {type(labelCol)}')
if not isinstance(destCol, str):
raise ValueError(f'destCol must be a str given {type(destCol)}')
if not isinstance(mappings, dict):
raise ValueError(f'mappings must be of type dict given {type(mappings)}')
for label in mappings:
loc = featureData[labelCol].isin(mappings[label])
featureData.loc[featureData[labelCol].isin(mappings[label]), destCol] = label
featureData.loc[featureData[labelCol].isin(NON_IOT), destCol] = 'NoN-IoT'
if featureData[destCol].isna().sum() and error_raise:
raise Exception(f'No Mappings For {featureData.loc[featureData[destCol].isna(), labelCol].unique()}')
else:
return featureData
def renameSimilarDevices(devicesSeries):
if not isinstance(devicesSeries, pd.Series): raise ValueError(f'Expected devicesSeries to be pandas.Series given {type(devicesSeries)}')
for device in renameDevices:
devicesSeries.loc[devicesSeries.isin(renameDevices[device])] = device
return devicesSeries
def renameNonIoTDevices(devicesSeries):
if not isinstance(devicesSeries, pd.Series): raise ValueError(f'Expected devicesSeries to be pandas.Series given {type(devicesSeries)}')
devicesSeries.loc[devicesSeries.isin(NON_IOT)] = 'NoN-IoT'
return devicesSeries
# This function loads the feature data and does some processing that some experiments might require e.g renaming non_iot to 'NoN-IoT' etc
def loadFeatureData(dataset_base_path, shuffle=True, normalize=True, fillna=True, rename_similar=True, rename_non_iot=True, verbose=0):
# Sanity Checks
if not os.path.exists(dataset_base_path):
dataset_base_path = os.path.join(os.getenv('IOTBASE'), dataset_base_path)
if not os.path.exists(dataset_base_path):
raise FileNotFoundError(f'dataset_base_path: {dataset_base_path} does not exist')
if os.path.isdir(dataset_base_path):
feature_data_path = os.path.join(dataset_base_path, 'featureData.pkl')
else:
feature_data_path = dataset_base_path
if not os.path.exists(feature_data_path):
raise FileNotFoundError(f'devices_file: {feature_data_path} does not exist')
# Loads from the disk
featureData = | pd.read_pickle(feature_data_path) | pandas.read_pickle |
import argparse
import os
import sys
import glob
import pandas
import numpy
import nipy.modalities.fmri.hrf
import matplotlib.pyplot as plt
from nipy import load_image
from subprocess import call
from nipy.core.image.image import Image
from nipy import save_image
import scipy.stats
from scipy.ndimage import convolve1d
from scipy.sparse import spdiags
from scipy.linalg import toeplitz
pandas.options.mode.chained_assignment = None
parser = argparse.ArgumentParser(prog="betaSeriesReg")
parser.add_argument("input_BIDS", help="target BIDS dataset that has derivatives and fmriprep preprocessed files present")
parser.add_argument("task_name", help="task name to analyze.")
parser.add_argument("TR", help = "TR in seconds of targetted scans")
parser.add_argument("-targetsuffix", help="target image file suffix. Defaults to Asym_preproc.nii.gz", default = "Asym_preproc.nii.gz")
parser.add_argument("-session", help="session to analyze. (0 analyzes all sessions)", default = 0)
parser.add_argument("-subject", help= "subject to analyze.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-aC", "--aCompCor",help="regress out aCompCor (NOTE: By default this supersedes all other nuisance regression (except GSR), see --override)", action="store_true")
group.add_argument("-tC", "--tCompCor",help="regress out tCompCor (NOTE: By default this supersedes all other nuisance regression (except GSR), see --override)", action="store_true")
group.add_argument("-ica", "--ICA_AROMA",help="regress out ICA-AROMA components (NOTE: By default this supersedes all other nuisance regression (except GSR), see --override)", action="store_true")
parser.add_argument("--p_reg", help="which regression approach you would like to use, options are base, quad, lagged, and quadlagged",
choices = ["base", "quad", "lagged", "quadlagged"], default="quadlagged")
parser.add_argument("--mot", help="don't include 6 parameters of motion in regression?", action="store_false")
parser.add_argument("--wm", help="don't include white matter in regression?", action="store_false")
parser.add_argument("--csf", help="don't include cerebro-spinal fluid in regression?", action="store_false")
parser.add_argument("--gsr", help="include global signal in regression?", action="store_true")
parser.add_argument("--nodatacheck", help="if option is included, will not inform user how many datasets are about to be processed, and will not require final confirmation", action = "store_true")
parser.add_argument("--noregcheck", help="if option is included, will not inform user what regression/method is used, and will not require final confirmation", action = "store_true")
parser.add_argument("--override", help="EXPERIMENTAL: combine component and nuisance regression", action="store_true")
parser.add_argument("--overwrite", help="allow for overwriting regressor matrices and output nii.gz", action="store_true")
parser.add_argument("--suffix", help="what suffix will be appended to denote new regressor matrix and output nii.gz (default: nuisReg)", default = "nuisReg")
parser.add_argument("--log_dir", help="log file directory name within derivatives, will create a directory if directory doesn't exist (default: nuisRegLog)", default = "nuisRegLog")
parser.add_argument("-q","--quiet", help="do NOT print activity to console, activity will still be recorded in log file", action = "store_true")
args = parser.parse_args()
targetdirs = args.input_BIDS +"/derivatives/fmriprep"
print(targetdirs)
if not os.path.isdir(targetdirs):
print("fmriprep directory is not present in the BIDS/derivatives directory. Have you run fmriprep? Ending function...")
sys.exit()
files = glob.glob(targetdirs+"/**/*.nii.gz", recursive = True)
targets = [i for i in files if "bold" in i]
targets = [i for i in targets if args.targetsuffix in i]
targets = [i for i in targets if "task-"+args.task_name +"_" in i]
if args.session is not 0:
targets = [i for i in targets if "ses-" + args.session + "_" in i]
if args.subject is not None:
targets = [i for i in targets if "sub-" + args.subject + "_" in i]
subs = [i.split("/") for i in targets]
subList = []
for file in subs:
subtarget = [i for i in file if "sub-" in i]
subList.extend([subtarget[0]])
uniqueSubs = set(subList)
import collections
counter = collections.Counter(subList).values()
minNum = min(counter)
maxNum = max(counter)
dataDesc = "This data set contains " + str(len(uniqueSubs)) + " subjects with at max " + str(maxNum) +" and at minimum " + str(minNum) +" functional scans. Does this look correct?"
fileNameList = []
for file in subs:
fileName = file[len(file)-1]
fileNameList.extend([fileName])
onsetfiles = glob.glob(args.input_BIDS+"/**/*_events.tsv", recursive = True)
onsetfiles = [i for i in onsetfiles if "task-"+args.task_name +"_" in i]
if args.session is not 0:
onsetfiles = [i for i in onsetfiles if "ses-" + args.session + "_" in i]
if args.subject is not None:
onsetfiles = [i for i in onsetfiles if "sub-" + args.subject + "_" in i]
fileNameList = [i.split("bold")[0] for i in fileNameList]
fileNameList = [i + "events.tsv" for i in fileNameList]
targetOnsetList = []
for file in fileNameList:
indexSet = []
for i, j in enumerate(onsetfiles):
if file in j:
indexSet.extend([i])
if len(indexSet) is not 1:
targetOnsetList.extend(["NotFound"])
else:
targetOnsetList.extend([onsetfiles[indexSet[0]]])
onsetcounter = [1 if i is not "NotFound" else 0 for i in targetOnsetList]
proportion = sum(onsetcounter)/len(fileNameList)*100
dataDesc = "This data set contains " + str(len(uniqueSubs)) + " subjects with at max " + str(maxNum) +" and at minimum " + str(minNum) +\
" functional scans with specified task and session. "+ str(proportion) + " have onset files. Does this look correct?"
if not args.nodatacheck:
print(dataDesc)
while True:
userinput = input("y/n: ")
if userinput == "n":
print("Ending this script. Please check your dataset and try again.")
sys.exit()
if userinput != "y" and userinput !="n":
continue
else:
break
regParamLabels = []
paramCounter = 0
types = ""
if args.mot:
paramCounter = 6
types = types + " the 6 parameters of motion"
regParamLabels.extend(["X", "Y", "Z", "RotX", "RotY", "RotZ"])
if args.csf:
paramCounter = paramCounter+1
types = types + " CSF signal"
regParamLabels.extend(["CSF"])
if args.wm:
paramCounter = paramCounter+1
types = types + " WM signal"
regParamLabels.extend(["WhiteMatter"])
if args.gsr:
paramCounter = paramCounter+1
types = types + " GSR"
regParamLabels.extend(["GlobalSignal"])
if args.p_reg == "quadlagged":
factor = 4
desc = "and contains main effects, quadratic and temporal derivatives (lag-1 differences) and squares of the derivatives of " +types +". "
if args.p_reg == "lagged":
factor = 2
desc = "and contains main effects and temporal derivatives (lag-1 differences) of " +types +". "
if args.p_reg == "quad":
factor = 2
desc = "and contains main effects and quadratic effects of " +types +". "
if args.p_reg == "base":
factor = 1
desc = "and contains main effects of " +types +". "
pnum = paramCounter*factor
regNums = "The nuisance regression has " +str(pnum) +" parameters (plus overall intercept) "
regDesc = regNums + desc
scrub=""
compLabels = []
if args.aCompCor:
responses = "You have chosen to regress out aCompCor components. "
if args.tCompCor:
responses = "You have chosen to regress out tCompCor components. "
if args.ICA_AROMA:
responses = "You have chosen to regress out ICA-AROMA components. "
printout = ""
gsrWarning = "No GSR is being performed. "
if args.gsr:
gsrWarning = "WARNING: GSR is being performed. "
if args.aCompCor or args.tCompCor or args.ICA_AROMA:
printout = printout + responses
if args.override:
overrid = "WARNING: You have chosen to perform full nuisance regression in addition to a component approach. "
printout = printout+overrid+regDesc+scrub + gsrWarning +" Is this correct?"
if not args.noregcheck:
print(printout)
while True:
userinput = input("y/n: ")
if userinput == "n":
print("Ending this script. Please check your options and try again.")
sys.exit()
if userinput != "y" and userinput != "n":
continue
else:
break
else:
printout = printout + scrub + gsrWarning + " Is this correct?"
if not args.noregcheck:
print(printout)
while True:
userinput = input("y/n: ")
if userinput == "n":
print("Ending this script. Please check your options and try again.")
sys.exit()
if userinput != "y" and userinput != "n":
continue
else:
break
else:
printout = printout+regDesc+scrub +gsrWarning +" Is this correct?"
if not args.noregcheck:
print(printout)
while True:
userinput = input("y/n: ")
if userinput == "n":
print("Ending this script. Please check your options and try again.")
sys.exit()
if userinput != "y" and userinput != "n":
continue
else:
break
#END OF PREPARATION SECTION
print("\nSacrifice is ready. Beginning confound removal.")
if not os.path.exists(args.input_BIDS+"/derivatives/"+args.log_dir):
os.makedirs(args.input_BIDS+"/derivatives/"+args.log_dir)
from datetime import datetime
time= str(datetime.now())
print(targets)
print(targetOnsetList)
for i in targets:
indexTarget = targets.index(i)
print(i)
print(targetOnsetList[indexTarget] + "\n")
logFile = open(args.input_BIDS+"/derivatives/"+args.log_dir+"/"+time+"_"+args.suffix+".txt","w+")
logFile.write("Log file for "+ args.suffix +" suffixed run at " + time + "\n\n")
logFile.write(dataDesc+"\n\n")
logFile.write(printout+"\n\n")
logFile.write("Begin Processing Log"+"\n")
for i in targets:
indexTarget = targets.index(i)
if not args.quiet: print("Finding nuisance regressor file for " + i)
#logFile.write("Finding nuisance regressor file for " + i + "\n")
bits = i.split("_")
comTarget = bits.index("bold")+1
bits = bits[0:(comTarget)]
targetRegs = '_'.join(bits)+"_confounds.tsv"
compLabels = []
if not os.path.exists(targetRegs):
if not args.quiet: print("Did not find confound file for" + i)
#logFile.write("Did not find confound file for " + i + "\n")
else:
if not args.quiet: print("Found confound file for" + i+", "+ targetRegs)
#logFile.write("Found confound file for" + i+", "+ targetRegs + "\n")
confounds = pandas.read_table(targetRegs,dtype="float",na_values="n/a")
if args.aCompCor or args.tCompCor or args.ICA_AROMA:
if args.aCompCor:
targetCompLabels = [col for col in confounds.columns if 'aCompCor' in col]
compLabels.extend(targetCompLabels)
if args.tCompCor:
targetCompLabels = [col for col in confounds.columns if 'tCompCor' in col]
compLabels.extend(targetCompLabels)
if args.ICA_AROMA:
targetCompLabels = [col for col in confounds.columns if 'AROMAAggrComp' in col]
compLabels.extend(targetCompLabels)
if args.override:
targetConfounds = confounds[regParamLabels]
if args.p_reg == "lagged":
targetConfoundsLagged = targetConfounds.diff()
targetConfounds = pandas.concat([targetConfounds,targetConfoundsLagged], axis=1)
targetConfounds['intercept'] = 1
if args.p_reg == "quad":
targetConfoundsQuad = targetConfounds.pow(2)
targetConfounds = pandas.concat([targetConfounds,targetConfoundsQuad], axis=1)
targetConfounds['intercept'] = 1
if args.p_reg == "quadlagged":
targetConfoundsLagged = targetConfounds.diff()
targetConfounds = pandas.concat([targetConfounds,targetConfoundsLagged], axis=1)
targetConfoundsQuad = targetConfounds.pow(2)
targetConfounds = pandas.concat([targetConfounds,targetConfoundsQuad], axis=1)
targetConfounds['intercept'] = 1
targetCompConfounds = confounds[compLabels]
targetConfounds = pandas.concat([targetConfounds,targetCompConfounds], axis=1)
else:
targetConfounds = confounds[compLabels]
targetConfounds['intercept'] = 1
if args.gsr:
pandas.concat([targetConfounds,confounds["GlobalSignal"]], axis = 1)
else:
targetConfounds = confounds[regParamLabels]
if args.p_reg == "lagged":
targetConfoundsLagged = targetConfounds.diff()
targetConfounds= pandas.concat([targetConfounds,targetConfoundsLagged], axis =1)
targetConfounds['intercept'] = 1
if args.p_reg == "quad":
targetConfoundsQuad = targetConfounds.pow(2)
targetConfounds = pandas.concat([targetConfounds,targetConfoundsQuad], axis=1)
targetConfounds['intercept'] = 1
if args.p_reg == "quadlagged":
targetConfoundsLagged = targetConfounds.diff()
targetConfounds= pandas.concat([targetConfounds,targetConfoundsLagged], axis=1)
targetConfoundsQuad = targetConfounds.pow(2)
targetConfounds = | pandas.concat([targetConfounds,targetConfoundsQuad], axis=1) | pandas.concat |
from unittest import TestCase
import pandas as pd
from moonstone.parsers.transform.base import TransformBase
class TestTransformBase(TestCase):
def test_historize(self):
df = pd.DataFrame(
[
[1, 2, 3],
[4, 5, 6]
],
columns=['a', 'b', 'c']
)
action = 'an_action'
arguments = {'arg1': 1, 'arg2': 2}
col_name = 'a'
expected_history = [
[action, {'col_name': col_name, **arguments}]
]
transform_base = TransformBase(df)
| pd.testing.assert_frame_equal(transform_base.raw_df, transform_base.df) | pandas.testing.assert_frame_equal |
import sys
import os
from tqdm import tqdm
import numpy as np
import pandas as pd
import argparse
from src import predict_data, preprocessing
from collections import Counter
from sklearn.model_selection import train_test_split
from imblearn import over_sampling
from imblearn import combine
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_curve, auc
# set save path
def set_path(basename):
os.makedirs('./output', exist_ok=True)
name = os.path.splitext(basename)
save_path = 'output/{}.csv'.format(name[0])
return save_path, name[0]
# train data + mndo data
def append_mndo(X_train, y_train, df):
X_mndo = df.drop('Label', axis=1)
y_mndo = df.Label
X_mndo = np.concatenate((X_mndo, X_train), axis=0)
y_mndo = np.concatenate((y_mndo, y_train), axis=0)
return X_mndo, y_mndo
if __name__ == '__main__':
# Load dataset
parser = argparse.ArgumentParser()
parser.add_argument('data', help='dataset')
parser.add_argument('generated', help='generated data')
args = parser.parse_args()
try:
data = pd.read_csv(args.data)
mndo_generated = | pd.read_csv(args.generated) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
import random
# Date and Time
# =============
print(datetime.datetime(2000, 1, 1))
print(datetime.datetime.strptime("2000/1/1", "%Y/%m/%d"))
print(datetime.datetime(2000, 1, 1, 0, 0).strftime("%Y%m%d"))
# to_datetime
# ===========
print(pd.to_datetime("4th of July"))
print(pd.to_datetime("13.01.2000"))
print(pd.to_datetime("7/8/2000"))
print(pd.to_datetime("7/8/2000", dayfirst=True))
print(issubclass(pd.Timestamp, datetime.datetime))
ts = pd.to_datetime(946684800000000000)
print(ts.year, ts.month, ts.day, ts.weekday())
index = [pd.Timestamp("2000-01-01"),
pd.Timestamp("2000-01-02"),
pd.Timestamp("2000-01-03")]
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts)
print(ts.index)
ts = pd.Series(np.random.randn(len(index)),
index=["2000-01-01", "2000-01-02", "2000-01-03"])
print(ts.index)
index = pd.to_datetime(["2000-01-01", "2000-01-02", "2000-01-03"])
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts.index)
print(pd.date_range(start="2000-01-01", periods=3, freq='H'))
print(pd.date_range(start="2000-01-01", periods=3, freq='T'))
print(pd.date_range(start="2000-01-01", periods=3, freq='S'))
print(pd.date_range(start="2000-01-01", periods=3, freq='B'))
print(pd.date_range(start="2000-01-01", periods=5, freq='1D1h1min10s'))
print(pd.date_range(start="2000-01-01", periods=5, freq='12BH'))
bh = pd.tseries.offsets.BusinessHour(start='07:00', end='22:00')
print(bh)
print(pd.date_range(start="2000-01-01", periods=5, freq=12 * bh))
print(pd.date_range(start="2000-01-01", periods=5, freq='W-FRI'))
print(pd.date_range(start="2000-01-01", periods=5, freq='WOM-2TUE'))
s = pd.date_range(start="2000-01-01", periods=10, freq='BAS-JAN')
t = pd.date_range(start="2000-01-01", periods=10, freq='A-FEB')
s.union(t)
index = pd.date_range(start='2000-01-01', periods=200, freq='B')
print(index)
ts = pd.Series(np.random.randn(len(index)), index=index)
walk = ts.cumsum()
walk.plot()
plt.savefig('random_walk.png')
print(ts.head())
print(ts[0])
print(ts[1:3])
print(ts['2000-01-03'])
print(ts[datetime.datetime(2000, 1, 3)])
print(ts['2000-01-03':'2000-01-05'])
print(ts['2000-01-03':datetime.datetime(2000, 1, 5)])
print(ts['2000-01-03':datetime.date(2000, 1, 5)])
print(ts['2000-02'])
print(ts['2000-03':'2000-05'])
small_ts = ts['2000-02-01':'2000-02-05']
print(small_ts)
print(small_ts.shift(2))
print(small_ts.shift(-2))
# Downsampling
# ============
rng = pd.date_range('4/29/2015 8:00', periods=600, freq='T')
ts = pd.Series(np.random.randint(0, 100, len(rng)), index=rng)
print(ts.head())
print(ts.resample('10min').head())
print(ts.resample('10min', how='sum').head())
print(ts.resample('1h', how='sum').head())
print(ts.resample('1h', how='max').head())
print(ts.resample('1h', how=lambda m: random.choice(m)).head())
print(ts.resample('1h', how='ohlc').head())
# Upsampling
# ==========
rng = | pd.date_range('4/29/2015 8:00', periods=10, freq='H') | pandas.date_range |
import pandas as pd
import pytest
from maker import Board
from utils import match_count
nan = float("nan")
@pytest.mark.parametrize(('player', 'match'), [
(128, 64),
(127, 63),
(100, 36),
(65, 1),
(64, 32),
(3, 1),
(2, 1),
])
def test_match_count(player, match):
assert match_count(player) == match
def pytest_funcarg__board(request):
return Board(match_count=2, keys=["club", "region"])
@pytest.mark.parametrize(("a", "b", "expected"), [
(pd.Series({"club": "a", "region": nan}),
pd.Series({"club": "a", "region": "west"}),
False),
(pd.Series({"club": "a", "region": nan}),
pd.Series({"club": "b", "region": nan}),
True),
(pd.Series({"club": "a", "region": nan}),
pd.Series({"club": "b", "region": "west"}),
True),
(pd.Series({"club": "a", "region": "west"}),
pd.Series({"club": "b", "region": "west"}),
False),
])
def test_valid_match(a, b, expected):
board = Board(match_count=2, keys=["club", "region"])
assert board._is_valid(a, b) is expected
@pytest.mark.parametrize(("data"), [
[pd.Series({"club": "a"}),
pd.Series({"club": "a"}),
| pd.Series({"club": "a"}) | pandas.Series |
#####
# BA Amadou 16 187 314
# YING Xu 18 205 032
# ABOU Hamza 17 057 836
###
import pandas as pd
import numpy as np
import os, sys
sys.path.append(os.path.dirname(os.path.join(os.getcwd())))
from .data_loader import DataLoader
import random
import cv2
import math
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.decomposition import PCA
from PIL import Image
from tqdm import tqdm
class DataPreprocessing:
train_repo = "../dataset/train.csv"
images_repo = "../dataset/images/"
r = random.randint(17, 27)
X_data_train = pd.DataFrame()
X_data_test = pd.DataFrame()
X_img_train = pd.DataFrame()
X_img_test = pd.DataFrame()
X_all_train = pd.DataFrame()
X_all_test = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import types
import pandas as pd
import numpy as np
import json
from pdsf import sflake as sf
from utils import split_months
def process_allo(param, permit_use):
"""
Function to process the consented allocation from the input tables from Accela and others.
More descriptions in the code below.
Parameters
----------
param : dict
Input parameters
permit_use : DataFrame
DataFrame from the output of the process_use_types function
Returns
-------
DataFrame
"""
run_time_start = pd.Timestamp.today().strftime('%Y-%m-%d %H:%M:%S')
print(run_time_start)
#######################################
### Read in source data and update accela tables in ConsentsReporting db
print('--Reading in source data...')
## Make object to contain the source data
db = types.SimpleNamespace()
for t in param['misc']['AllocationProcessing']['tables']:
p = param['source data'][t]
print(p['table'])
if p['schema'] != 'public':
stmt = 'select {cols} from "{schema}"."{table}"'.format(schema=p['schema'], table=p['table'], cols=json.dumps(p['col_names'])[1:-1])
else:
stmt = 'select {cols} from "{table}"'.format(table=p['table'], cols=json.dumps(p['col_names'])[1:-1])
setattr(db, t, sf.read_table(p['username'], p['password'], p['account'], p['database'], p['schema'], stmt))
##################################################
### Sites
print('--Process Waps')
## takes
wap_allo1 = db.wap_allo.copy()
wap1 = wap_allo1['Wap'].unique()
waps = wap1[~pd.isnull(wap1)].copy()
## Check that all Waps exist in the USM sites table
usm_waps1 = db.waps[db.waps.isin(waps)].copy()
# usm_waps1[['NzTmX', 'NzTmY']] = usm_waps1[['NzTmX', 'NzTmY']].astype(int)
if len(wap1) != len(usm_waps1):
miss_waps = set(wap1).difference(set(usm_waps1.Wap))
print('Missing {} Waps in USM'.format(len(miss_waps)))
wap_allo1 = wap_allo1[~wap_allo1.Wap.isin(miss_waps)].copy()
##################################################
### Permit table
print('--Process Permits')
'''
WILCO:
Selection FromDate and toDate was a bit of a pain in the ass i remember for the Rakaia as well. I don't think there is any filtering done here below yet, but maybe it is
good to consider that:
1) Some consents may have never been active between the FromDate and ToDate. The 'Given Effect To' field can help with that. If the given effect to is larger than the
toDate, then that consent was never exercised and (at least for modelling purposes) should be dropped from the list of consents.
2) If the Given Effect To date is larger than the fromDate, then set FromDate equal to Given Effect To.
3) For parent and child consents (orginal and renewals) it is good to check the FromDate and ToDate. In the Ecan database the FromDate of the renewal is most of the time
equal to the ToDate of the parent (original record), which would lead to double accounting for that day. For the Rakaia I fixed this by making sure that sure that
the toDate is always 1 day before the frommDate of the child consent.
Below I have inserted some (commented) code that I used in my Rakaia work, so not sure whether you want to use this yes/no.
'''
# #-Select consents that were active between sdate and edate
# print 'Filter consents that were active between %s and %s...' %(sdate.strftime('%d-%m-%Y'), edate.strftime('%d-%m-%Y'))
# df1 = df.loc[(df['toDate']>pd.Timestamp(sdate)) & (df['fmDate']<=pd.Timestamp(edate))]
# #-If 'Given Effect To' date is later than 'toDate', then consent was never active in between the fmDate-toDate period, and is therefore removed from the dataframe
# df1.loc[(df1['Given Effect To'] > df1['toDate']),:]=np.nan
# df2 = df1.dropna(how='all')
# #-If 'Given Effect To' date is later than 'fmDate', then the 'fmDate' field is set to 'Given Effect To'
# df2.loc[(df2['fmDate'] < df2['Given Effect To']),['fmDate']]= df2['Given Effect To']
#
# #-Unique consent numbers of 'OriginalRecord'
# ori_records = pd.unique(df2['OriginalRecord'])
# df2_columns = list(df2.columns)
# fmDate_index = df2_columns.index('fmDate')
# toDate_index = df2_columns.index('toDate')
# #-Make sure toDate is always 1 day before the fmDate of the child consent. Required to make sure that a consent isn't active twice on one day
# for c in ori_records:
# #-select the consents that belong to the same group (have same parent so to speak)
# df_short = df2.loc[df2['OriginalRecord']==c]
# for i in range(0,len(df_short)-1):
# toDate = df_short.iloc[i,toDate_index] #-toDate of current record
# fmDate = df_short.iloc[i+1,fmDate_index] #-fromDate of child record
# if toDate == fmDate: #-cannot be equal. If so, then decrease the todate of the current record with one day
# df_short.iloc[i, toDate_index] = toDate - dt.timedelta(days=1)
# df2.loc[df2['OriginalRecord']==c] = df_short
# #-get rid of old dataframes
# df = df2.copy()
# df1 = None; df2 = None; del df1, df2
#
# #-For consents that are active for one day, the toDate may now (because of extracting one day from toDate) be smaller than fmDate. Those records are removed
# df = df.loc[df['toDate']>=df['fmDate']]
## Clean data
permits2 = db.permit.copy()
permits2['FromDate'] = pd.to_datetime(permits2['FromDate'], infer_datetime_format=True, errors='coerce')
permits2['ToDate'] = pd.to_datetime(permits2['ToDate'], infer_datetime_format=True, errors='coerce')
## Filter data
permits2 = permits2[permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull()].copy()
# permits2 = permits2[(permits2['FromDate'] > '1950-01-01') & (permits2['ToDate'] > '1950-01-01') & (permits2['ToDate'] > permits2['FromDate']) & permits2.NzTmX.notnull() & permits2.NzTmY.notnull() & permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
## Convert datetimes to date
permits2.loc[permits2['FromDate'].isnull(), 'FromDate'] = pd.Timestamp('1900-01-01')
permits2.loc[permits2['ToDate'].isnull(), 'ToDate'] = pd.Timestamp('1900-01-01')
##################################################
### Parent-Child
print('--Process Parent-child table')
## Clean data
pc1 = db.parent_child.copy()
## Filter data
pc1 = pc1.drop_duplicates()
pc1 = pc1[pc1['ParentRecordNumber'].notnull() & pc1['ChildRecordNumber'].notnull()]
## Check foreign keys --> what are foreign keys?
crc1 = permits2.RecordNumber.unique()
pc0 = pc1[pc1.ParentRecordNumber.isin(crc1) & pc1.ChildRecordNumber.isin(crc1)].copy()
#################################################
### AllocatedRatesVolumes
print('--Process Allocation data')
## Rates
# Clean data
wa1 = wap_allo1.copy()
# Check foreign keys
wa4 = wa1[wa1.RecordNumber.isin(crc1)].copy()
# Find the missing Waps per consent
crc_wap_mis1 = wa4.loc[wa4.Wap.isnull(), 'RecordNumber'].unique()
crc_wap4 = wa4[['RecordNumber', 'Wap']].drop_duplicates()
for i in crc_wap_mis1:
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, i)].ChildRecordNumber.values
wap1 = []
while (len(crc2) > 0) & (len(wap1) == 0):
wap1 = crc_wap4.loc[np.in1d(crc_wap4.RecordNumber, crc2), 'Wap'].values
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, crc2)].ChildRecordNumber.values
if len(wap1) > 0:
wa4.loc[wa4.RecordNumber == i, 'Wap'] = wap1[0]
wa4 = wa4[wa4.Wap.notnull()].copy()
## Distribute the months
# Since the tables in accela have no explicit primary/composite keys, it is possible that the eventual composite key 'RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap' does not fully caapture the Accela data set. It is possible that the rates also change by month. This occurs in less than 100 consents ever, so the simplification seems justified. The below code splits the consents out by each month that the consent is allowed to be active by the appropriate rates and volumes listed in the Accela table. Then the mean is taken over all months to ensure that there is only one value for 'RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap'.
cols1 = wa4.columns.tolist()
from_mon_pos = cols1.index('FromMonth')
to_mon_pos = cols1.index('ToMonth')
allo_rates_list = []
for val in wa4.itertuples(False, None):
from_month = int(val[from_mon_pos])
to_month = int(val[to_mon_pos])
if from_month > to_month:
mons = list(range(1, to_month + 1))
else:
mons = range(from_month, to_month + 1)
d1 = [val + (i,) for i in mons]
allo_rates_list.extend(d1)
col_names1 = wa4.columns.tolist()
col_names1.extend(['Month'])
wa5 = | pd.DataFrame(allo_rates_list, columns=col_names1) | pandas.DataFrame |
import os
import re
import string
import numpy as np
import pandas as pd
import spacy
from spacy.cli import download
download("en")
nlp = spacy.load("en")
class CleanText:
def remove_names(self, text):
"""
Parameters
--------
text: str
Returns
--------
cleaned_text: str
"""
all_names = pd.read_pickle(
os.path.join(os.path.dirname(__file__), "data/all_names")
)
cleaned_text = text
for _, row in all_names.iterrows():
# Matches name as long as it is not followed by lowercase characters
# Removing names that are a part of another word
cleaned_text = re.sub(row["name"] + "(?![a-z])", " ", cleaned_text)
return cleaned_text
def remove_links(self, text):
"""
Parameters
--------
text: str
Returns
--------
cleaned_text: str
"""
cleaned_text = text
links_found = re.findall(
r"(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]\.[^\s]{2,})",
cleaned_text,
)
for link in links_found:
cleaned_text = cleaned_text.replace(link, "")
return cleaned_text
def lematize(self, text):
"""
Parameters
--------
text: str
Returns
--------
list of spacy tokens
"""
spacy_text = nlp(text)
return [token.lemma_ for token in spacy_text if not token.is_space]
def remove_email_greetings_signatures(self, text):
"""
In order to obtain the main text of an email only, this method removes greetings, signoffs,
and signatures by identifying sentences with less than 5% verbs to drop. Does not replace links.
Inspiration from: https://github.com/mynameisvinn/EmailParser
Parameters
--------
text: str
Returns
--------
text: str
"""
sentences = text.strip().split("\n")
non_sentences = []
for sentence in sentences:
spacy_text = nlp(sentence.strip())
verb_count = np.sum(
[
(
token.pos_ == "VERB"
or token.pos_ == "AUX"
or token.pos_ == "ROOT"
or token.pos_ == "pcomp"
)
for token in spacy_text
]
)
try:
prob = float(verb_count) / len(spacy_text)
except Exception:
prob = 1.0
# If 5% or less of a sentence is verbs, it's probably not a real sentence
if prob <= 0.05:
non_sentences.append(sentence)
for non_sentence in non_sentences:
# Don't replace links
if "http" not in non_sentence and non_sentence not in string.punctuation:
text = text.replace(non_sentence, "")
return text
def clean_column_names(self, df):
"""
Rename all columns to use underscores to reference columns without bracket formatting
Parameters
--------
df: DataFrame
Returns
--------
df: DataFrame
"""
df.rename(
columns=lambda x: str(x).strip().replace(" ", "_").lower(), inplace=True
)
return df
def remove_duplicate_columns(self, df):
"""
Remove columns with the same name
Parameters
--------
df: DataFrame
Returns
--------
df: DataFrame
"""
df = df.loc[:, ~df.columns.duplicated()]
return df
def fix_col_data_type(self, df, col, desired_dt):
"""
Change column datatype using the best method for each type.
Parameters
--------
df: DataFrame
col: str
Column to change the dtype for
desired_dt: str
{'float', 'int', 'datetime', 'str'}
Returns
--------
df: DataFrame
"""
if desired_dt in ("float", "int"):
df[col] = | pd.to_numeric(df[col], errors="coerce") | pandas.to_numeric |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to reproduce the post-processing of data on text charts.
Some text-based charts (pivot tables and t-test table) perform
post-processing of the data in Javascript. When sending the data
to users in reports we want to show the same data they would see
on Explore.
In order to do that, we reproduce the post-processing in Python
for these chart types.
"""
from typing import Any, Callable, Dict, Optional, Union
import pandas as pd
from superset.utils.core import DTTM_ALIAS, extract_dataframe_dtypes, get_metric_name
def sql_like_sum(series: pd.Series) -> pd.Series:
"""
A SUM aggregation function that mimics the behavior from SQL.
"""
return series.sum(min_count=1)
def pivot_table(
result: Dict[Any, Any], form_data: Optional[Dict[str, Any]] = None
) -> Dict[Any, Any]:
"""
Pivot table.
"""
for query in result["queries"]:
data = query["data"]
df = pd.DataFrame(data)
form_data = form_data or {}
if form_data.get("granularity") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
metrics = [get_metric_name(m) for m in form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Callable[[Any], Any]]] = {}
for metric in metrics:
aggfunc = form_data.get("pandas_aggfunc") or "sum"
if pd.api.types.is_numeric_dtype(df[metric]):
if aggfunc == "sum":
aggfunc = sql_like_sum
elif aggfunc not in {"min", "max"}:
aggfunc = "max"
aggfuncs[metric] = aggfunc
groupby = form_data.get("groupby") or []
columns = form_data.get("columns") or []
if form_data.get("transpose_pivot"):
groupby, columns = columns, groupby
df = df.pivot_table(
index=groupby,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=form_data.get("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
df = df[metrics]
# Display metrics side by side with each column
if form_data.get("combine_metric"):
df = df.stack(0).unstack().reindex(level=-1, columns=metrics)
# flatten column names
df.columns = [" ".join(column) for column in df.columns]
# re-arrange data into a list of dicts
data = []
for i in df.index:
row = {col: df[col][i] for col in df.columns}
row[df.index.name] = i
data.append(row)
query["data"] = data
query["colnames"] = list(df.columns)
query["coltypes"] = extract_dataframe_dtypes(df)
query["rowcount"] = len(df.index)
return result
def list_unique_values(series: pd.Series) -> str:
"""
List unique values in a series.
"""
return ", ".join(set(str(v) for v in pd.Series.unique(series)))
pivot_v2_aggfunc_map = {
"Count": pd.Series.count,
"Count Unique Values": pd.Series.nunique,
"List Unique Values": list_unique_values,
"Sum": pd.Series.sum,
"Average": pd.Series.mean,
"Median": pd.Series.median,
"Sample Variance": lambda series: pd.series.var(series) if len(series) > 1 else 0,
"Sample Standard Deviation": (
lambda series: | pd.series.std(series) | pandas.series.std |
import pandas as pd
import talib
def macd_side(close):
macd, signal, hist = talib.MACD(close.values)
hist = | pd.Series(hist) | pandas.Series |
"""
Created by: <NAME>
Sep 21
IEEE Fraud Detection Model
- FE009
- Adding raddar user level features
- Add first, second, third digit of addr1 and addr2 features
- Drop only DOY features with low importance
- Different Parameters
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import sys
import matplotlib.pylab as plt
from sklearn.model_selection import KFold
from datetime import datetime
import time
import logging
from sklearn.metrics import roc_auc_score
from catboost import CatBoostClassifier, Pool
from timeit import default_timer as timer
import lightgbm as lgb
import gc
start = timer()
##################
# PARAMETERS
###################
run_id = "{:%m%d_%H%M}".format(datetime.now())
KERNEL_RUN = False
MODEL_NUMBER = os.path.basename(__file__).split('.')[0]
if KERNEL_RUN:
INPUT_DIR = '../input/champs-scalar-coupling/'
FE_DIR = '../input/molecule-fe024/'
FOLDS_DIR = '../input/champs-3fold-ids/'
TARGET = "isFraud"
N_ESTIMATORS = 100000
N_META_ESTIMATORS = 500000
LEARNING_RATE = 0.005
VERBOSE = 100
EARLY_STOPPING_ROUNDS = 100
RANDOM_STATE = 529
N_THREADS = 58
DEPTH = -1 #14
N_FOLDS = 5
SHUFFLE = False
FE_SET = 'FE010' # Feature Engineering Version
MODEL_TYPE = "lightgbm"
#####################
## SETUP LOGGER
#####################
def get_logger():
"""
credits to: https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480
"""
os.environ["TZ"] = "US/Eastern"
time.tzset()
FORMAT = "[%(levelname)s]%(asctime)s:%(name)s:%(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
fhandler = logging.FileHandler(f'../logs/{MODEL_NUMBER}_{run_id}.log')
formatter = logging.Formatter(FORMAT)
handler.setFormatter(formatter)
# logger.addHandler(handler)
logger.addHandler(fhandler)
return logger
logger = get_logger()
logger.info(f'Running for Model Number {MODEL_NUMBER}')
##################
# PARAMETERS
###################
if MODEL_TYPE == 'xgboost':
EVAL_METRIC = "AUC"
elif MODEL_TYPE == 'lightgbm':
EVAL_METRIC = 'auc'
elif MODEL_TYPE == 'catboost':
EVAL_METRIC = "AUC"
##################
# TRACKING FUNCTION
###################
def update_tracking(run_id,
field,
value, csv_file="../tracking/tracking.csv", integer=False, digits=None, drop_incomplete_rows=False):
"""
Function to update the tracking CSV with information about the model
"""
try:
df = | pd.read_csv(csv_file, index_col=[0]) | pandas.read_csv |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import inspect
import numpy as np
import pandas as pd
import pyspark
import databricks.koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.indexes import MissingPandasLikeIndex, MissingPandasLikeMultiIndex
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class IndexesTest(ReusedSQLTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0],},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
def test_index(self):
for pdf in [
pd.DataFrame(np.random.randn(10, 5), index=list("abcdefghij")),
pd.DataFrame(
np.random.randn(10, 5), index=pd.date_range("2011-01-01", freq="D", periods=10)
),
pd.DataFrame(np.random.randn(10, 5), columns=list("abcde")).set_index(["a", "b"]),
]:
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
def test_index_getattr(self):
kidx = self.kdf.index
item = "databricks"
expected_error_message = "'Index' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_multi_index_getattr(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
kidx = kdf.index
item = "databricks"
expected_error_message = "'MultiIndex' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_to_series(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
# With name
pidx.name = "Koalas"
kidx.name = "Koalas"
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name=("x", "a"))), repr(pidx.to_series(name=("x", "a"))))
# With tupled name
pidx.name = ("x", "a")
kidx.name = ("x", "a")
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series())
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name="a"), pidx.to_series(name="a"))
def test_to_frame(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
pidx.name = "a"
kidx.name = "a"
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name="x")), repr(pidx.to_frame(name="x")))
self.assert_eq(
repr(kidx.to_frame(index=False, name="x")),
repr(pidx.to_frame(index=False, name="x")),
)
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(
repr(kidx.to_frame(name=["x", "y"])), repr(pidx.to_frame(name=["x", "y"]))
)
self.assert_eq(
repr(kidx.to_frame(index=False, name=["x", "y"])),
repr(pidx.to_frame(index=False, name=["x", "y"])),
)
def test_index_names(self):
kdf = self.kdf
self.assertIsNone(kdf.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
pdf = pd.DataFrame(np.random.randn(10, 5), index=idx, columns=list("abcde"))
kdf = ks.from_pandas(pdf)
pser = pdf.a
kser = kdf.a
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.name = "renamed"
kidx.name = "renamed"
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
pidx.name = None
kidx.name = None
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
with self.assertRaisesRegex(ValueError, "Names must be a list-like"):
kidx.names = "hi"
expected_error_message = "Length of new names must be {}, got {}".format(
len(kdf._internal.index_map), len(["0", "1"])
)
with self.assertRaisesRegex(ValueError, expected_error_message):
kidx.names = ["0", "1"]
def test_multi_index_names(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.names = ["renamed_number", "renamed_color"]
kidx.names = ["renamed_number", "renamed_color"]
self.assertEqual(kidx.names, pidx.names)
pidx.names = ["renamed_number", None]
kidx.names = ["renamed_number", None]
self.assertEqual(kidx.names, pidx.names)
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx, pidx)
else:
self.assert_eq(kidx, pidx)
with self.assertRaises(PandasNotImplementedError):
kidx.name
with self.assertRaises(PandasNotImplementedError):
kidx.name = "renamed"
def test_index_rename(self):
pdf = pd.DataFrame(
np.random.randn(10, 5), index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
)
kdf = ks.from_pandas(pdf)
pidx = pdf.index
kidx = kdf.index
self.assert_eq(kidx.rename("y"), pidx.rename("y"))
self.assert_eq(kdf.index.names, pdf.index.names)
kidx.rename("z", inplace=True)
pidx.rename("z", inplace=True)
self.assert_eq(kidx, pidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kidx.rename(None), pidx.rename(None))
self.assert_eq(kdf.index.names, pdf.index.names)
def test_multi_index_rename(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
pmidx = pdf.index
kmidx = kdf.index
self.assert_eq(kmidx.rename(["n", "c"]), pmidx.rename(["n", "c"]))
self.assert_eq(kdf.index.names, pdf.index.names)
kmidx.rename(["num", "col"], inplace=True)
pmidx.rename(["num", "col"], inplace=True)
self.assert_eq(kmidx, pmidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None]))
self.assert_eq(kdf.index.names, pdf.index.names)
self.assertRaises(TypeError, lambda: kmidx.rename("number"))
self.assertRaises(ValueError, lambda: kmidx.rename(["number"]))
def test_multi_index_levshape(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)])
kidx = ks.from_pandas(pidx)
self.assertEqual(pidx.levshape, kidx.levshape)
def test_index_unique(self):
kidx = self.kdf.index
# here the output is different than pandas in terms of order
expected = [0, 1, 3, 5, 6, 8, 9]
self.assert_eq(expected, sorted(kidx.unique().to_pandas()))
self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas()))
expected = [1, 2, 4, 6, 7, 9, 10]
self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas()))
with self.assertRaisesRegex(IndexError, "Too many levels*"):
kidx.unique(level=1)
with self.assertRaisesRegex(KeyError, "Requested level (hi)*"):
kidx.unique(level="hi")
def test_multi_index_copy(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = | pd.MultiIndex.from_arrays(arrays, names=("number", "color")) | pandas.MultiIndex.from_arrays |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), | DatetimeIndex(expected) | pandas.DatetimeIndex |
import os
import re
from pathlib import Path
import pandas as pd
from sparc.curation.tools.errors import IncorrectAnnotationError, NotAnnotatedError, IncorrectDerivedFromError, \
IncorrectSourceOfError, BadManifestError
from sparc.curation.tools.base import Singleton
from sparc.curation.tools.definitions import FILE_LOCATION_COLUMN, FILENAME_COLUMN, SUPPLEMENTAL_JSON_COLUMN, \
ADDITIONAL_TYPES_COLUMN, ANATOMICAL_ENTITY_COLUMN, SCAFFOLD_META_MIME, SCAFFOLD_VIEW_MIME, \
SCAFFOLD_THUMBNAIL_MIME, SCAFFOLD_DIR_MIME, DERIVED_FROM_COLUMN, SOURCE_OF_COLUMN, MANIFEST_DIR_COLUMN, MANIFEST_FILENAME, SHEET_NAME_COLUMN
from sparc.curation.tools.utilities import is_same_file
class ManifestDataFrame(metaclass=Singleton):
# dataFrame_dir = ""
_manifestDataFrame = None
_scaffold_data = None
_dataset_dir = None
def setup_dataframe(self, dataset_dir):
self._dataset_dir = dataset_dir
self._read_manifests()
self._scaffold_data = ManifestDataFrame.Scaffold(self)
return self
def _read_manifests(self, depth=0):
self._manifestDataFrame = pd.DataFrame()
result = list(Path(self._dataset_dir).rglob(MANIFEST_FILENAME))
for r in result:
xl_file = pd.ExcelFile(r)
for sheet_name in xl_file.sheet_names:
currentDataFrame = xl_file.parse(sheet_name)
currentDataFrame[SHEET_NAME_COLUMN] = sheet_name
currentDataFrame[MANIFEST_DIR_COLUMN] = os.path.dirname(r)
self._manifestDataFrame = pd.concat([currentDataFrame, self._manifestDataFrame])
if not self._manifestDataFrame.empty:
self._manifestDataFrame[FILE_LOCATION_COLUMN] = self._manifestDataFrame.apply(
lambda row: os.path.join(row[MANIFEST_DIR_COLUMN], row[FILENAME_COLUMN]) if | pd.notnull(row[FILENAME_COLUMN]) | pandas.notnull |
from itertools import product
import numpy as np
import pandas as pd
import utils
import parameter_graphs
from scipy import optimize
from sklearn.model_selection import KFold, train_test_split
from datetime import datetime
# Small constant to ensure log is never zero
LOG_CONSTANT = 1
# Whether to generate parameter relationship graphs
MAKE_GRAPHS = False
# Which TDD option to run [0 = "off", 1 = "TDD", 2 = "log_TDD"]
TDD_OPTION = 0
WORKERS = 1 #-1 # set to 1 for debug mode and -1 to use all workers on your machine
VERBOSE = False
LOCAL_SEARCH_ON_TOP_N_RESULTS = 100
LAST_STEP_INTERVAL = 10
def make_condition_dicts(file_name):
file_path = utils.find_full_path(file_name, ".csv")
all_conditions = pd.read_csv(file_path)
output = []
for index in all_conditions.index:
condition_dict = {
"BMI": all_conditions["BMI"][index],
"log_BMI": np.log(all_conditions["BMI"][index] + LOG_CONSTANT),
"CHO": all_conditions["CHO"][index],
"log_CHO": np.log(all_conditions["CHO"][index] + LOG_CONSTANT),
"TDD": all_conditions["TDD"][index],
"log_TDD": np.log(all_conditions["TDD"][index] + LOG_CONSTANT),
"MIN_OUTPUT": all_conditions["MIN_OUTPUT"][index],
"MAX_OUTPUT": all_conditions["MAX_OUTPUT"][index],
"X_intercept": 1,
}
output.append(condition_dict)
return output
def get_output_file_name(chunk_index, analysis_type):
now = datetime.now().strftime("%m-%d-%y")
return get_output_file_search_name(chunk_index, analysis_type) + f"-{now}.csv"
def get_output_file_search_name(chunk_index, analysis_type):
return f"{analysis_type}-{TDD_OPTION}-{LOCAL_SEARCH_ON_TOP_N_RESULTS}-equation-results-MAPE-lastindex-{chunk_index}"
basal_check_dicts = make_condition_dicts("basal_fitting_checks")
def brute_optimize(
X_df,
y_df,
objective_function,
parameter_search_range_tuple,
equation_function,
loss_function,
find_local_min_function=None,
verbose=False,
workers=-1,
):
"""
Brute search optimization with custom equation and loss function
This is a wrapper or helper function to
`scipy brute <https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brute.html>`_
function given that the brute function is a little esoteric.
Parameters
----------
X_df: dataframe
These are the known X values.
Here a df is used so that the column names are specified.
Also, be sure to add the intercept term if needed.
y_df: dataframe
These are the known or actual y values.
Here a df is used so that the column names are specified.
objective_function : function
The function you want to minimize.
parameter_search_range_tuple : tuple
A tuple that contains slices for each of the parameters_to_estimate
The search range for each of the parameters using the slice(min, max, step).
Each parameter should have its own slide -> (slice(min, max, step), slice(min, max, step), etc.)
CAUTION: increasing the step size or the number of parameters can be computationally expensive
equation_function : function
The equation you are trying to fit.
loss_function : function
A function with the first two argumets as (y_actual, y_predict) for compatibility with sklearn
find_local_min_function : function
Default to None, optimize.fmin is another option
Returns
-------
optimal_parameter_df : dataframe
meta_dict: dictionary
# TODO fill in an example
"""
y_col_name = y_df.columns[0]
X_col_names = list(X_df.columns)
X_ndarray = X_df.values
# print("fitting {} = {}".format(y_col_name, X_col_names))
optimize_args_tuple = (
equation_function,
loss_function,
X_ndarray,
y_df.values,
verbose,
X_col_names,
y_col_name,
)
brute_results = optimize.brute(
objective_function,
parameter_search_range_tuple,
args=optimize_args_tuple,
full_output=True,
finish=find_local_min_function,
disp=verbose,
workers=workers,
)
optimal_parameter_values = brute_results[0]
optimal_parameter_df = pd.DataFrame(
optimal_parameter_values.reshape([1, -1]), columns=X_col_names
)
loss_of_optimal_params = brute_results[1]
optimal_parameter_df["loss"] = loss_of_optimal_params
optimal_parameter_df["y_col_name"] = y_col_name
output_col_order = ["y_col_name"] + ["loss"] + X_col_names
optimal_parameter_df = optimal_parameter_df[output_col_order]
search_mesh = brute_results[2]
search_mesh_loss_scores = brute_results[3]
search_results_df = pd.DataFrame(
search_mesh_loss_scores.reshape([-1, 1]), columns=["loss"]
)
# TODO: make sure this won't break if number of parameters is 1
fit_equation_string = "{} = ".format(y_col_name)
for col_idx, X_col_name in enumerate(X_col_names):
if len(X_col_names) == 1:
search_results_df[X_col_name] = search_mesh
if col_idx == len(X_col_names) - 1:
fit_equation_string += "{} {}".format(
round(optimal_parameter_values, 5), X_col_name
)
else:
fit_equation_string += "{} {} + ".format(
round(optimal_parameter_values, 5), X_col_name
)
else:
search_results_df[X_col_name] = search_mesh[col_idx].reshape([-1, 1])
if col_idx == len(X_col_names) - 1:
fit_equation_string += "{} {}".format(
round(optimal_parameter_values[col_idx], 5), X_col_name
)
else:
fit_equation_string += "{} {} + ".format(
round(optimal_parameter_values[col_idx], 5), X_col_name
)
if verbose:
print(fit_equation_string)
meta_dict = dict()
meta_dict["fit_equation"] = fit_equation_string
meta_dict["optimal_parameter_values"] = optimal_parameter_values
meta_dict["loss_of_optimal_params"] = loss_of_optimal_params
search_results_df = search_results_df.sort_values(by="loss")
meta_dict["search_results_df"] = search_results_df.round(5)
meta_dict["brute_results"] = brute_results
meta_dict["optimal_parameter_df"] = optimal_parameter_df
meta_dict["equation_function_name"] = equation_function.__name__
meta_dict["y_col_name"] = y_col_name
meta_dict["X_col_names"] = X_col_names
meta_dict["parameter_search_range_tuple"] = parameter_search_range_tuple
meta_dict["loss_function"] = loss_function.__name__
meta_dict["find_local_min_function"] = find_local_min_function
return optimal_parameter_df.round(5), meta_dict
def custom_objective_function(parameters_to_estimate_1darray, *args_tuple):
# TODO: add in function header and some checks to make sure the inputs are correct
(
equation_function,
loss_function,
fixed_parameters_ndarray,
y_actual,
verbose,
X_col_names,
y_col_name,
) = args_tuple
y_estimate = equation_function(
parameters_to_estimate_1darray, fixed_parameters_ndarray
)
loss_score = loss_function(
y_actual,
y_estimate,
equation_function,
parameters_to_estimate_1darray,
X_col_names,
y_col_name,
)
if verbose:
print(parameters_to_estimate_1darray, loss_score)
return loss_score
def linear_regression_equation(
parameters_to_estimate_1darray, fixed_parameters_ndarray
):
parameters_to_estimate_1darray = np.reshape(parameters_to_estimate_1darray, (-1, 1))
return np.matmul(fixed_parameters_ndarray, parameters_to_estimate_1darray)
def sum_of_squared_errors_loss_function(y_actual, y_estimate):
return np.sum((y_estimate - y_actual) ** 2)
def custom_basal_loss_with_inf(
y_actual,
y_estimate,
equation,
fixed_parameters,
X_col_names,
y_col_name,
delta=0.65,
):
epsilon = np.finfo(np.float64).eps
residuals = y_estimate - y_actual
# median absolute percentage error
absolute_percent_error = np.abs(residuals) / np.maximum(np.abs(y_actual), epsilon)
loss_score = np.median(absolute_percent_error)
# %% old code with huber loss
# outlier_mask = absolute_percent_error > delta
# loss = np.ones(np.shape(absolute_percent_error)) * np.nan
# loss[~outlier_mask] = 0.5 * absolute_percent_error[~outlier_mask] ** 2
# loss[outlier_mask] = delta * (abs(absolute_percent_error[outlier_mask]) - (0.5 * delta))
# loss_score = np.sum(loss)
# %% here is a list of custom penalities
# penalize the loss if any of the estimates over prediction if y_estimate > y_actual,
# which implies that basal > TDD given that TDD > y_actual for all cases in our dataset
# n_overestimates = np.sum(residuals > 0)
# if n_overestimates > 0:
# loss_score = np.inf
# # add a penalty if any of the estimates are less than 0
# n_y_too_low = np.sum(y_estimate < 0)
# if n_y_too_low > 0:
# loss_score = np.inf
# # add a penalty if any of the estimates are greater than 35 U/hr
# n_y_too_high = np.sum(y_estimate > 35 * 24)
# if n_y_too_high > 0:
# loss_score = np.inf
# %% this is where we can add in the 19 checks
# this will look something like y_temp = equation(add in constants from our table (look at google doc)
# y_temp needs to between min and max basal
for check_dict in basal_check_dicts:
min_val = check_dict["MIN_OUTPUT"]
max_val = check_dict["MAX_OUTPUT"]
X_val = [check_dict[param] for param in X_col_names]
y_pred = equation(fixed_parameters, X_val)
if "log" in y_col_name:
y_pred = np.exp(y_pred)
if not (min_val <= y_pred <= max_val):
loss_score = np.inf
break
return loss_score
def fit_equ_with_custom_loss(
X_df,
y_df,
custom_objective_function,
linear_regression_equation,
custom_basal_loss_with_inf,
verbose=False,
workers=-1,
):
all_brute_results = pd.DataFrame(columns=["loss"] + list(X_df.columns))
# first do a broad
for i, m in enumerate([100, 10, 1]): # enumerate([100, 10, 1]): # 10, 1, 0.1, 0.01, 0.001]):
step = m / 10
if i == 0:
parameter_search_range_tuple = tuple(
[slice(np.round(-m, 5), np.round(m + step, 5), np.round(step, 5))]
* len(X_df.columns)
)
results_df, results_meta_info_dict = brute_optimize(
X_df=X_df,
y_df=y_df,
objective_function=custom_objective_function,
parameter_search_range_tuple=parameter_search_range_tuple,
equation_function=linear_regression_equation,
loss_function=custom_basal_loss_with_inf,
find_local_min_function=None, # None, #optimize.fmin,
verbose=verbose,
workers=workers,
)
all_brute_results = pd.concat(
[all_brute_results, results_meta_info_dict["search_results_df"]]
)
else:
local_search_df = all_brute_results.loc[
all_brute_results["loss"] != np.inf, :
].copy()
local_search_df.drop_duplicates(inplace=True, ignore_index=True)
local_search_df.sort_values(by="loss", inplace=True)
local_search_df.reset_index(drop=True, inplace=True)
# add in a loop here that goes through the length of local_search_df
for n_local_searches in range(min(LOCAL_SEARCH_ON_TOP_N_RESULTS, len(local_search_df))): # range(len(local_search_df)):
# print("searching with {} resolution, around {}".format(m, local_search_df.loc[n_local_searches:n_local_searches, :]))
parameter_search_range_list = []
for col_name in list(X_df.columns):
local_val = local_search_df.loc[n_local_searches, col_name]
parameter_search_range_list.append(
slice(
np.round(local_val - m, 5),
np.round(local_val + m + step, 5),
np.round(step, 5),
)
)
parameter_search_range_tuple = tuple(parameter_search_range_list)
results_df, results_meta_info_dict = brute_optimize(
X_df=X_df,
y_df=y_df,
objective_function=custom_objective_function,
parameter_search_range_tuple=parameter_search_range_tuple,
equation_function=linear_regression_equation,
loss_function=custom_basal_loss_with_inf,
find_local_min_function=None, # None, #optimize.fmin,
verbose=verbose,
workers=workers,
)
# print("lowest around this point is {}".format(results_df))
all_brute_results = pd.concat(
[all_brute_results, results_meta_info_dict["search_results_df"]]
)
# now do a moderate search around the parameter space
top_wide_search_df = all_brute_results.loc[
all_brute_results["loss"] != np.inf, :
].copy()
top_wide_search_df.drop_duplicates(inplace=True)
top_wide_search_df.sort_values(by="loss", inplace=True)
top_wide_search_df.reset_index(drop=True, inplace=True)
# print("that took {} seconds".format(time.time() - start_time))
# If we couldn't find a non-inf loss, we failed to find a fit
if len(top_wide_search_df) < 1:
return None, None, False
# do one last brute force search
parameter_search_range_list = []
steps = LAST_STEP_INTERVAL
for col_name in list(X_df.columns):
min_val = np.round(top_wide_search_df.loc[:, col_name].min(), 5)
max_val = np.round(top_wide_search_df.loc[:, col_name].max(), 5)
step_val = np.round((max_val - min_val) / steps, 5)
if step_val == 0:
min_val = min_val - 0.001
max_val = max_val + 0.001
step_val = np.round((max_val - min_val) / steps, 5)
parameter_search_range_list.append(
slice(min_val, np.round(max_val + step_val, 5), step_val)
)
parameter_search_range_tuple = tuple(parameter_search_range_list)
results_df, results_meta_info_dict = brute_optimize(
X_df=X_df,
y_df=y_df,
objective_function=custom_objective_function,
parameter_search_range_tuple=parameter_search_range_tuple,
equation_function=linear_regression_equation,
loss_function=custom_basal_loss_with_inf,
find_local_min_function=None, # None, #optimize.fmin,
verbose=verbose,
workers=workers,
)
all_brute_results = pd.concat(
[all_brute_results, results_meta_info_dict["search_results_df"]]
)
all_brute_results.sort_values(by="loss", inplace=True)
all_brute_results.reset_index(drop=True, inplace=True)
# valid_results_df = all_brute_results.loc[all_brute_results["loss"] != np.inf, :].copy()
top_result_df = all_brute_results.loc[0:0, :]
return top_result_df, all_brute_results, True
# %% start of code
# load in data
file_path = utils.find_full_path(
"2021-05-02_equation_paper_aspirational_data_reduced", ".csv"
)
all_data = pd.read_csv(
file_path,
usecols=[
"geomean_basal_rate",
"geomean_isf",
"geomean_weighted_cir",
"bmi",
"geomean_total_daily_carbs_in_chunk_outcomes",
"geomean_total_daily_insulin_dose_in_chunk_outcomes",
],
)
all_data.rename(
columns={
"geomean_basal_rate": "BR",
"geomean_isf": "ISF",
"geomean_weighted_cir": "CIR",
"bmi": "BMI",
"geomean_total_daily_carbs_in_chunk_outcomes": "CHO",
"geomean_total_daily_insulin_dose_in_chunk_outcomes": "TDD",
},
inplace=True,
)
# remove any rows where this is a value <= 0
clean_data = all_data[np.sum(all_data <= 0, axis=1) == 0].copy()
clean_data.reset_index(drop=True, inplace=True)
clean_data["X_intercept"] = 1
clean_data["BASAL"] = clean_data["BR"] * 24
clean_data["log_BASAL"] = np.log(clean_data["BASAL"] + LOG_CONSTANT)
clean_data["log_BR"] = np.log(clean_data["BR"] + LOG_CONSTANT)
clean_data["log_ISF"] = np.log(clean_data["ISF"] + LOG_CONSTANT)
clean_data["log_CIR"] = np.log(clean_data["CIR"] + LOG_CONSTANT)
clean_data["log_BMI"] = np.log(clean_data["BMI"] + LOG_CONSTANT)
clean_data["log_CHO"] = np.log(clean_data["CHO"] + LOG_CONSTANT)
clean_data["log_TDD"] = np.log(clean_data["TDD"] + LOG_CONSTANT)
y_cols = ["BASAL", "log_BASAL", "ISF", "log_ISF", "CIR", "log_CIR"]
x_cols = [
"X_intercept",
"BMI",
"log_BMI",
"CHO",
"log_CHO",
"TDD",
"log_TDD",
]
X = clean_data[x_cols]
y = clean_data[y_cols]
# break the data into 70% train and 30% test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=1,
)
# loop through each of the 3 independent variables
for y in [
["BASAL"]
]: # [["BASAL", "log_BASAL"]]: #, ["ISF", "log_ISF"], ["CIR", "log_CIR"]]:
print("solving for the {} equations".format(y[0]))
# consider all combinations
intercept = ["X_intercept"]
bmi = ["off", "BMI", "log_BMI"]
cho = ["off", "CHO", "log_CHO"]
tdd = [["off", "TDD", "log_TDD"][TDD_OPTION]]
all_combos = list(product(y, intercept, bmi, cho, tdd))
ac_df = pd.DataFrame(all_combos, columns=["y", "X_intercept", "BMI", "CHO", "TDD"])
ac_df["val_loss"] = np.nan
for x_beta in x_cols:
ac_df["beta_{}".format(x_beta)] = np.nan
for combo, ac in enumerate(all_combos):
if utils.file_exists(
get_output_file_search_name(combo, y[0]), ".csv", use_startswith=True
):
print(f"Skipping combo {combo} since we have data for it")
continue
print(combo, list(ac))
[y_lin_log, x_intercept, bmi_lin_log, cho_lin_log, tdd_lin_log] = ac
X_cols = [x_intercept, bmi_lin_log, cho_lin_log, tdd_lin_log]
X_cols = list(set(X_cols))
if "off" in X_cols:
X_cols.remove("off")
# fit with custom loss function
X_df = pd.DataFrame(X_train[X_cols])
y_df = pd.DataFrame(y_train[y_lin_log])
top_result, all_results, success = fit_equ_with_custom_loss(
X_df,
y_df,
custom_objective_function,
linear_regression_equation,
custom_basal_loss_with_inf,
verbose=VERBOSE,
workers=WORKERS, # -1
)
if not success:
print(f"ERROR: unable to find fit for {list(ac)} parameters")
continue
if MAKE_GRAPHS:
fixed_parameters = top_result.loc[0, X_cols].values
parameter_graphs.make_graphs(
linear_regression_equation, fixed_parameters, X_cols, y_lin_log
)
for result_col in top_result.columns:
if result_col == "train_loss":
ac_df.loc[combo, "train_loss"] = top_result[result_col].values
else:
ac_df.loc[combo, "beta_{}".format(result_col)] = top_result[
result_col
].values
# need to take an equation and run it through the custom loss function
# need to correct the loss values for the log_basal results
# double check that the seeds do not change
# see if there are issues with the searching the log space
# break the training set into 5 folds for model selection with cross validation
kf = KFold(n_splits=5)
fold = 0
for i, j in kf.split(X_train):
fold = fold + 1
print("starting fold {}".format(fold))
X_train_fold = X_train.iloc[i, :]
X_val_fold = X_train.iloc[j, :]
y_train_fold = y_train.iloc[i, :]
y_val_fold = y_train.iloc[j, :]
# fit with custom loss function
X_df_train = | pd.DataFrame(X_train_fold[X_cols]) | pandas.DataFrame |
import seaborn as sns
import pandas as pd
#name features
columns=['CI','NI','BTD1',
'BTD2','BTD3','BTD4',
'BTD5']
#from numpy array to Pandas dataframe CSDB
df_features_csdb = | pd.DataFrame(features_scaled,columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime
from .mappings import map_registryCode_inv, map_account_type_inv, map_unitType_inv, export_mappings
import os
import glob
# account information added by hand
NEW_ACC = [{"accountIDEutl": 111264,
"name": "EU Credit Exchange Account - Aviation",
'registry_id': "EU",
"openingDate": pd.to_datetime("2014-01-29"),
"isOpen": True,
"accountType_id": "100-23"},
{"accountIDEutl": 111265,
"name": "EU Credit Exchange Account - Aviation",
'registry_id': "EU",
"openingDate": pd.to_datetime("2014-01-29"),
"isOpen": True,
"accountType_id": "100-23"},
{"accountIDEutl": 111267,
"name": "EU Credit Exchange Account",
'registry_id': "EU",
"openingDate": pd.to_datetime("2014-01-29"),
"isOpen": True,
"accountType_id": "100-23"},
{"accountIDEutl": 111266,
"name": "EU Credit Exchange Account",
'registry_id': "EU",
"openingDate": pd.to_datetime("2014-01-29"),
"isOpen": True,
"accountType_id": "100-23"}]
def create_csv_tables(dir_in, dir_out, fn_coordinates=None,
fn_nace=None, fn_nace_codes=None):
"""Create all tables
:param dir_in: <string> directory with parsed data
:param dir_out: <string> output directory
:param fn_coordinates: <string> path to file with installation coordinates
:param fn_nace: <string> name of file with nace codes for installations
if None, NACE codes are not processed
:param fn_nace_codes: <string> name of file with nace classification scheme
If None, calssification lookup not exported
"""
print("####### Create lookup tables")
create_tables_lookup(dir_in, dir_out, fn_nace_codes=fn_nace_codes)
print("####### Create installation tables")
create_table_installation(dir_in, dir_out,
fn_coordinates=fn_coordinates,
fn_nace=fn_nace)
create_table_compliance(dir_in, dir_out)
create_table_surrender(dir_in, dir_out)
print("####### Create account tables")
create_table_accountHolder(dir_in, dir_out)
create_table_account(dir_in, dir_out)
print("####### Create transcation tables")
create_table_transaction(dir_in, dir_out)
def create_table_installation(dir_in, dir_out, fn_coordinates=None,
fn_nace=None):
"""Create installation table
:param dir_in: <string> directory with parsed data
:param dir_out: <string> output directory
:param fn_coordinates: <string> name of file with coordinates
:param fn_nace: <string> name of file with nace codes
if None, NACE codes are not processed
"""
# get data: installation data together with addresses with updated coordinates
# and entitlements
df_inst = pd.read_csv(dir_in + "installations.csv",)
df_enti = pd.read_csv(dir_in + "entitlements.csv", na_values=["Not Applicable", "Not Set"])
df_enti["installationID_new"] = df_enti.registry.map(lambda x: map_registryCode_inv.get(x))
df_enti["installationID"] = df_enti["installationID_new"] + "_" + df_enti["installationID"].map(str)
df_enti = df_enti[["installationID", "euEntitlement", "chEntitlement"]].copy()
df_inst = df_inst.merge(df_enti, on="installationID", how="left")
# transform dataframe to be consistent with Installation object
cols_inst = {'installationID': 'id',
"name": "name",
'registryCode': 'registry_id',
'activity': 'activity_id',
'eprtrID': 'eprtrID',
'parent': 'parentCompany',
'subsidiary': 'subsidiaryCompany',
'permitID': 'permitID',
'icaoID': 'designatorICAO',
'monitoringPlanId': 'monitoringID',
'monitoringPlanExpiry': 'monitoringExpiry',
'monitoringPlanFirstYear': 'monitoringFirstYear',
'permitExpiry': 'permitDateExpiry',
'isAircraftOperator': 'isAircraftOperator',
'ec7482009ID': 'ec748_2009Code',
'permitEntryDate': 'permitDateEntry',
'mainAddress': 'mainAddress',
'secondaryAddress': 'secondaryAddress',
'postalCode': 'postalCode',
'city': 'city',
'country': 'country_id',
'latitude': 'latitudeEutl',
'longitude': 'longitudeEutl',
"euEntitlement": "euEntitlement",
"chEntitlement": "chEntitlement",
}
df_inst_to_tbl = df_inst[[c for c in cols_inst.keys() if c in df_inst.columns]].copy()
df_inst_to_tbl = df_inst_to_tbl.rename(columns=cols_inst)
# convert activity id to id only (without description)
df_inst_to_tbl.activity_id = df_inst_to_tbl.activity_id.map(lambda x: int(x.split("-")[0]))
if fn_coordinates is not None:
df_ = pd.read_csv(fn_coordinates,
names=["id", "latitudeGoogle", "longitudeGoogle"],
usecols=["id", "latitudeGoogle", "longitudeGoogle"],
header=0)
df_inst_to_tbl = df_inst_to_tbl.merge(df_, on="id", how="left")
# add nace codes
if fn_nace:
# primarily use 2020 leakage list but fill with 15
df_ = pd.read_csv(fn_nace, usecols=["id", "nace15", "nace20"],
dtype={"nace15": "str", "nace20": "str"}).drop_duplicates()
df_["nace_id"] = df_.nace20.fillna(df_.nace15)
df_ = df_.rename(columns={"nace15": "nace15_id", "nace20": "nace20_id"})
df_inst_to_tbl = df_inst_to_tbl.merge(df_, on="id", how="left")
# for aircraft add the nace code 51 (Air transport)
df_inst_to_tbl.loc[df_inst_to_tbl.isAircraftOperator, "nace_id"] = df_inst_to_tbl.loc[df_inst_to_tbl.isAircraftOperator, "nace_id"].fillna(51)
# add created timestamp
df_inst_to_tbl["created_on"] = datetime.now()
df_inst_to_tbl["updated_on"] = datetime.now()
# export to csv
df_inst_to_tbl.to_csv(dir_out + "installations.csv", index=False, encoding="utf-8")
return
def create_table_compliance(dir_in, dir_out):
"""Create table with compliance data
:param dir_in: <string> directory with parsed data
:param dir_out: <string> output directory
"""
# get data
df_comp = pd.read_csv(dir_in + "compliance.csv")
# transform dataframe to be consistent with Installation object
cols_comp = {'installationID': 'installation_id',
'year': 'year',
'phase': 'euetsPhase',
'complianceCode': 'compliance_id',
'allocationFree': 'allocatedFree',
'allocationNewEntrance': 'allocatedNewEntrance',
'allocationTotal': 'allocatedTotal',
'allocation10c': 'allocated10c',
'verified': 'verified',
'verifiedCumulative': 'verifiedCummulative',
'complianceCodeUpdated': 'verifiedUpdated',
'surrendered': 'surrendered',
'surrenderedCumulative': 'surrenderedCummulative',
"reportedInSystem": "reportedInSystem"}
# calculate total allocation
df_comp["allocationTotal"] = (df_comp["allocationNewEntrance"].fillna(0) +
df_comp["allocationFree"].fillna(0) + df_comp["allocation10c"].fillna(0))
df_comp_to_tbl = df_comp[cols_comp.keys()].copy()
df_comp_to_tbl = df_comp_to_tbl.rename(columns=cols_comp)
# verified emission might have status "Excluded" which we set to missing (to have an int column)
df_comp_to_tbl.verified = df_comp_to_tbl.verified.replace(["Excluded", "Not Reported"], np.nan)
df_comp_to_tbl.verifiedCummulative = df_comp_to_tbl.verifiedCummulative.replace("Not Calculated", np.nan)
# add created timestamp
df_comp_to_tbl["created_on"] = datetime.now()
df_comp_to_tbl["updated_on"] = datetime.now()
# save table
df_comp_to_tbl.to_csv(dir_out + "compliance.csv", index=False, encoding="utf-8")
return
def create_table_surrender(dir_in, dir_out):
"""Create table with surrendering details as well as offset projects
:param dir_in: <string> directory with parsed data
:param dir_out: <string> output directory
"""
# get data
df_surr = pd.read_csv(dir_in + "surrendering.csv")
# create offset project table
df_proj = df_surr[["projectID", "track", "originatingRegistry"]].dropna(subset=["projectID"]).drop_duplicates()
df_proj.columns = ["id", "track", "country_id"]
# convert country names to country ids
df_proj.country_id = df_proj.country_id.map(map_registryCode_inv)
df_proj["created_on"] = datetime.now()
df_proj["updated_on"] = datetime.now()
# choose and rename columns in the surrendering table an insert them into the database
cols_surr = {'installationID': 'installation_id',
'year': 'year',
'unitType': 'unitType_id',
'amount': 'amount',
'originatingRegistry': 'originatingRegistry_id',
# 'accountID': 'account_id',
'projectID': 'project_id',
# 'expiryDate': 'expiryDate',
"reportedInSystem": "reportedInSystem"
}
df_surr_to_tbl = df_surr[cols_surr.keys()].copy()
df_surr_to_tbl = df_surr_to_tbl.rename(columns=cols_surr)
# impose lookup codes
df_surr_to_tbl.unitType_id = df_surr_to_tbl.unitType_id.map(map_unitType_inv)
df_surr_to_tbl.originatingRegistry_id = df_surr_to_tbl.originatingRegistry_id.map(map_registryCode_inv)
# need to add an primary key for surrendendering rows
# here we simply use the index
df_surr_to_tbl["id"] = df_surr_to_tbl.index
# add created timestamp
df_surr_to_tbl["created_on"] = datetime.now()
df_surr_to_tbl["updated_on"] = datetime.now()
# save data
df_surr_to_tbl.to_csv(dir_out + "surrender.csv", index=False, encoding="utf-8")
df_proj.to_csv(dir_out + "offset_projects.csv", index=False, encoding="utf-8")
def create_table_accountHolder(dir_in, dir_out):
""" Create account holder table dropping duplicated account holders
:param dir_in: <string> directory with parsed data
:param dir_out: <string> output directory
"""
df = pd.read_csv(dir_in + "/contacts.csv", na_values=["-", "na", ".", "0", "XXX"])
# Create a unique account holder ID that identifies duplicates
def get_duplicate_matching(df, cols_duplication, col_id):
"""Mapping of duplicated rows to ID of first occurance of the duplicated row
:param df: <pd.DataFrame> with data
:param cols_duplication: <list: string> name of columns checked for duplicates
:param col_id: <string> name of column with identifier
:return: <dict: col_id> to id in first occurance row
"""
df_d = df[df.duplicated(subset=cols_duplication, keep=False)
].drop_duplicates(cols_duplication)
df_d["__newID__"] = df_d[col_id]
df_f = df.merge(df_d, on=cols_duplication, suffixes=('', '_y'))
df_f = df_f[df_f["__newID__"].notnull()].copy()
m = pd.Series(df_f["__newID__"].values, index=df_f[col_id])#.to_dict()
return m
# require a minimum of information to identify duplicates
cols_nonNull = ["name", "mainAddress", "city", "country"]
df_ = df[df[cols_nonNull].notnull().all(axis=1)]
# get duplicates by all columns (except associated accountID)
cols_duplication = [c for c in df.columns if c not in ["accountID", "accountURL"]]
match_all = get_duplicate_matching(df_, cols_duplication, col_id="accountID")
# insert map on accountHolderID into original frame
# if not duplicate simply assign the original account ID
df["accountHolderID"] = df.accountID.map(lambda x: match_all.get(x, x))
# get a mapping from account holder to accountID
df_map_accountHolders = df[["accountHolderID", "accountID"]].copy()
# drop duplicates and map country column to codes
df = df.drop_duplicates("accountHolderID")
# create country lookups instead of full country names
df.country = df.country.map(map_registryCode_inv)
# rename columns
cols_accountHolder = {'accountHolderID': 'id',
'name': 'name',
'mainAddress': 'addressMain',
'secondaryAddress': 'addressSecondary',
'postalCode': 'postalCode',
'city': 'city',
'country': 'country_id',
"telephone1": "telephone1",
"telephone2": "telephone2",
"eMail": "eMail",
"legalEntityIdentifier": "legalEntityIdentifier",
# "accountID": "account_id"
}
df = df.rename(columns=cols_accountHolder)[cols_accountHolder.values()].copy()
# add created timestamp
df["created_on"] = datetime.now()
df["updated_on"] = datetime.now()
# save table
df.to_csv(dir_out + "accountHolders.csv", index=False, encoding="utf-8")
# also save the mapping from account holder to accounts
df_map_accountHolders.to_csv(dir_in + "accountHolder_mapping.csv", index=False)
return
def create_table_account(dir_in, dir_out):
"""Create account table.
AccountHolder table needs to be created first
:param dir_data: <string> directory with parsed data
:param dir_out: <string> output directory"""
# get account data and mapping for account types
df_acc = pd.read_csv(dir_in + "accounts.csv",
parse_dates=["closingDate", "openingDate"])
# renamce columns
cols_account = {'accountID': 'accountIDEutl',
'accountName': 'name',
'registryCode': 'registry_id',
'accountType': 'accountType_id',
'openingDate': 'openingDate',
'closingDate': 'closingDate',
'status': 'isOpen',
"commitmentPeriod": "commitmentPeriod",
'companyRegistrationNumber': 'companyRegistrationNumber',
'installationID': 'installation_id'}
# mark accounts with status "closing pending" as closed
# note that accounts with missin status are accounts of MT and CY in first periods. Thus, closed
df_acc["status"] = df_acc.status.replace({"closed": False,
"open": True,
"Closure Pending": False}).fillna(False).astype("boolean")
df_acc = df_acc.rename(columns=cols_account)[cols_account.values()].copy()
# impose accountTypes_ids
df_acc.accountType_id = df_acc.accountType_id.map(map_account_type_inv)
# make installation id unique
def form_id(row):
if pd.notnull(row["installation_id"]):
return f'{row["registry_id"]}_{int(row["installation_id"])}'
return
df_acc.installation_id = df_acc.apply(form_id, axis=1)
# Clean account names:
df_acc["name"] = df_acc["name"].map(lambda x: "-".join(x.split("-")[1:])[4:])
# add EU offset accounts by hand
# NOTE: We could also identify the missing accounts my non-matches and download the information
res = []
for i in NEW_ACC:
print("Added missing account:", i)
if i["accountIDEutl"] in df_acc.accountIDEutl:
continue
res.append(i)
df_new = pd.DataFrame(res)
if len(df_new) > 0:
df_acc = pd.concat([df_acc, df_new])
# add account identifiers used in transactions
map_accId = pd.read_csv(dir_in + "account_mapping.csv")
map_accId = map_accId.set_index("accountID")["accountIdentifierDB"].to_dict()
df_acc["accountIDtransactions"] = df_acc.accountIDEutl.map(lambda x: map_accId.get(x))
df_acc["isRegisteredEutl"] = df_acc["accountIDtransactions"].notnull()
# add the corresponding account holder ID
mapper = (pd.read_csv(dir_in + "accountHolder_mapping.csv")
.set_index("accountID")
.accountHolderID.to_dict()
)
df_acc["accountHolder_id"] = df_acc["accountIDEutl"].map(lambda x: mapper.get(x))
# add created timestamp
df_acc["created_on"] = datetime.now()
df_acc["updated_on"] = datetime.now()
# save to csv
df_acc.to_csv(dir_out + "accounts.csv", index=False, encoding="utf-8")
return
def create_table_transaction(dir_in, dir_out):
"""Create transaction table. This has to be run after all
all other tables have been created.
:param dir_data: <string> directory with parsed data
:param dir_out: <string> output directory"""
# load data: we need original transaction data as well as
# as the account table with new account ID. Also load already
# created project table to (eventually) add further projects.
# Finally, unit type mappings to map to unitType_id
# merge information from main transaction table to blocks
df = pd.read_csv(dir_in + "transactionBlocks.csv", low_memory=False,
parse_dates=["transactionDate"])
# extract cdm projects included in transaction data
# in version 05/2021 that does not seem to be necessary anymore
# NOTE: Here we drop one entry for project 5342 which as origin entry GH and NG.
df_proj_trans = df[["projectID", "projectTrack", "originatingRegistry"]
].dropna(subset=["projectID"]).drop_duplicates(subset=["projectID"])
df_proj_trans.columns = ["id", "track", "country_id"]
df_proj_trans["created_on"] = datetime.now()
df_proj_trans["updated_on"] = datetime.now()
df_proj_trans["source"] = "transactions"
df_proj_trans
df_proj = pd.read_csv(dir_out + "offset_projects.csv")
df_proj["source"] = "surrendering_details"
# only include those additional projects
df_proj_trans = df_proj_trans[~df_proj_trans["id"].isin(df_proj["id"])]
df_proj_new = pd.concat([df_proj, df_proj_trans])
df_proj_new.to_csv(dir_out + "offset_projects.csv", index=False, encoding="utf-8")
# create accounts which do not exist in the account table
# get accounts with accountID in transaction data but
# account missing in account table (all MT0 and CY0)
# we create accounts out of the data provided in the
# transaction data
res = []
for pf in ["acquiring", "transferring"]:
df_miss = df[df[pf + "AccountID"].isnull()].drop_duplicates()
df_miss = df_miss[[pf + "AccountIdentifierDB", pf + "AccountIdentifier", pf + "AccountName", pf + "RegistryCode"]]
df_miss.columns = ["accountIdentifierDB", "accountIDtransactions", "accountName", "registryCode"]
res.append(df_miss)
df_miss = pd.concat(res).drop_duplicates()
# for those accounts without an accountIdentierDB we
# create an account "unknwon" which is unique by country
if df_miss[df_miss.accountIdentifierDB.isnull()].registryCode.is_unique:
df_miss.accountIdentifierDB = df_miss.accountIdentifierDB.fillna(df_miss.registryCode + "_unknown")
df_miss.accountIDtransactions = df_miss.accountIDtransactions.fillna("unknown")
# these are accounts that are missing in the account database
# to easily identify and to get in conflict with newly emerging
# account IDs provided by the EUTL, we assign negative integers
# as account ids
df_miss = df_miss.reset_index(drop=True)
df_miss["accountIDEutl"] = -df_miss.index - 1
df_miss["created_on"] = datetime.now()
df_miss["updated_on"] = datetime.now()
# also insert the corresponding account id into the
# transaction block data
map_acc_new = df_miss[["accountIdentifierDB", "accountIDEutl"]
].set_index("accountIdentifierDB")["accountIDEutl"].to_dict()
for pf in ["acquiring", "transferring"]:
df[pf + "AccountIdentifierDB"] = (df[pf + "AccountIdentifierDB"]
.fillna(df[pf + "RegistryCode"] + "_unkown"))
df[pf + "AccountID"] = (df[pf + "AccountID"]
.fillna(df[pf + "AccountIdentifierDB"]
.map(lambda x: map_acc_new.get(x))
)
)
# Update account list as well as account mapping list
df_map_acc = pd.read_csv(dir_in + "account_mapping.csv")
df_map_acc = pd.concat([df_map_acc,
pd.DataFrame([[k, v] for k,v in map_acc_new.items()],
columns=["accountIdentifierDB", "accountID"])
])
df_acc = | pd.read_csv(dir_out + "accounts.csv") | pandas.read_csv |
import unittest
from random import random
from craft_ai.pandas import CRAFTAI_PANDAS_ENABLED
if CRAFTAI_PANDAS_ENABLED:
import copy
import pandas as pd
from numpy.random import randn
import craft_ai.pandas
from .data import pandas_valid_data, valid_data
from .utils import generate_entity_id
from . import settings
AGENT_ID_1_BASE = "test_pandas_1"
AGENT_ID_2_BASE = "test_pandas_2"
GENERATOR_ID_BASE = "test_pandas_generator"
SIMPLE_AGENT_CONFIGURATION = pandas_valid_data.SIMPLE_AGENT_CONFIGURATION
SIMPLE_AGENT_BOOSTING_CONFIGURATION = (
pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION
)
SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE = (
pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE
)
AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE = (
pandas_valid_data.AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE
)
SIMPLE_AGENT_DATA = pandas_valid_data.SIMPLE_AGENT_DATA
SIMPLE_AGENT_BOOSTING_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_DATA
SIMPLE_AGENT_BOOSTING_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_MANY_DATA
AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA = (
pandas_valid_data.AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA
)
SIMPLE_AGENT_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_MANY_DATA
COMPLEX_AGENT_CONFIGURATION = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION
COMPLEX_AGENT_CONFIGURATION_2 = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION_2
COMPLEX_AGENT_DATA = pandas_valid_data.COMPLEX_AGENT_DATA
COMPLEX_AGENT_DATA_2 = pandas_valid_data.COMPLEX_AGENT_DATA_2
DATETIME_AGENT_CONFIGURATION = pandas_valid_data.DATETIME_AGENT_CONFIGURATION
DATETIME_AGENT_DATA = pandas_valid_data.DATETIME_AGENT_DATA
MISSING_AGENT_CONFIGURATION = pandas_valid_data.MISSING_AGENT_CONFIGURATION
MISSING_AGENT_DATA = pandas_valid_data.MISSING_AGENT_DATA
MISSING_AGENT_DATA_DECISION = pandas_valid_data.MISSING_AGENT_DATA_DECISION
INVALID_PYTHON_IDENTIFIER_CONFIGURATION = (
pandas_valid_data.INVALID_PYTHON_IDENTIFIER_CONFIGURATION
)
INVALID_PYTHON_IDENTIFIER_DATA = pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DATA
INVALID_PYTHON_IDENTIFIER_DECISION = (
pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DECISION
)
EMPTY_TREE = pandas_valid_data.EMPTY_TREE
CLIENT = craft_ai.pandas.Client(settings.CRAFT_CFG)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_bad_index(self):
df = pd.DataFrame(randn(10, 5), columns=["a", "b", "c", "d", "e"])
self.assertRaises(
craft_ai.pandas.errors.CraftAiBadRequestError,
CLIENT.add_agent_operations,
self.agent_id,
df,
)
def test_add_agent_operations_df(self):
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_websocket(self):
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_unexpected_property(self):
df = pd.DataFrame(
randn(300, 6),
columns=["a", "b", "c", "d", "e", "f"],
index=pd.date_range("20200101", periods=300, freq="T").tz_localize(
"Europe/Paris"
),
)
self.assertRaises(
craft_ai.pandas.errors.CraftAiBadRequestError,
CLIENT.add_agent_operations,
self.agent_id,
df,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_complex_agent(self):
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_complex_agent_websocket(self):
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_without_tz(self):
test_df = COMPLEX_AGENT_DATA.drop(columns="tz")
CLIENT.add_agent_operations(self.agent_id, test_df)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_without_tz_websocket(self):
test_df = COMPLEX_AGENT_DATA.drop(columns="tz")
CLIENT.add_agent_operations(self.agent_id, test_df, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasMissingAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "MissingAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(MISSING_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_missing_agent(self):
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_missing_agent_websocket(self):
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_agent_operations_df(self):
df = CLIENT.get_agent_operations(self.agent_id)
self.assertEqual(len(df), 300)
self.assertEqual(len(df.dtypes), 5)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-01 04:59:00", tz="Europe/Paris"),
)
def test_get_agent_states_df(self):
df = CLIENT.get_agent_states(self.agent_id)
self.assertEqual(len(df), 180)
self.assertEqual(len(df.dtypes), 5)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-01 04:58:20", tz="Europe/Paris"),
)
def test_tree_visualization(self):
tree1 = CLIENT.get_agent_decision_tree(
self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9
)
craft_ai.pandas.utils.create_tree_html(tree1, "", "constant", None, 500)
def test_display_tree_raised_error(self):
tree1 = CLIENT.get_agent_decision_tree(
self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9
)
self.assertRaises(
craft_ai.pandas.errors.CraftAiError,
craft_ai.pandas.utils.display_tree,
tree1,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgentWithOperations(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWOp")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(valid_data.VALID_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, valid_data.VALID_OPERATIONS_SET)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_decision_tree_with_pdtimestamp(self):
# test if we get the same decision tree
decision_tree = CLIENT.get_agent_decision_tree(
self.agent_id, pd.Timestamp(valid_data.VALID_TIMESTAMP, unit="s", tz="UTC")
)
ground_truth_decision_tree = CLIENT.get_agent_decision_tree(
self.agent_id, valid_data.VALID_TIMESTAMP
)
self.assertIsInstance(decision_tree, dict)
self.assertNotEqual(decision_tree.get("_version"), None)
self.assertNotEqual(decision_tree.get("configuration"), None)
self.assertNotEqual(decision_tree.get("trees"), None)
self.assertEqual(decision_tree, ground_truth_decision_tree)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_agent_operations_df_complex_agent(self):
df = CLIENT.get_agent_operations(self.agent_id)
self.assertEqual(len(df), 10)
self.assertEqual(len(df.dtypes), 3)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-10 00:00:00", tz="Europe/Paris"),
)
def test_decide_from_contexts_df(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9
)
test_df = COMPLEX_AGENT_DATA
test_df_copy = test_df.copy(deep=True)
df = CLIENT.decide_from_contexts_df(tree, test_df)
self.assertEqual(len(df), 10)
self.assertEqual(len(df.dtypes), 6)
self.assertTrue(test_df.equals(test_df_copy))
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-10 00:00:00", tz="Europe/Paris"),
)
# Also works as before, with a plain context
output = CLIENT.decide(tree, {"a": 1, "tz": "+02:00"})
self.assertEqual(output["output"]["b"]["predicted_value"], "Pierre")
def test_decide_from_contexts_df_zero_rows(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9
)
test_df = COMPLEX_AGENT_DATA.iloc[:0, :]
self.assertRaises(
craft_ai.errors.CraftAiBadRequestError,
CLIENT.decide_from_contexts_df,
tree,
test_df,
)
def test_decide_from_contexts_df_empty_df(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9
)
self.assertRaises(
craft_ai.errors.CraftAiBadRequestError,
CLIENT.decide_from_contexts_df,
tree,
pd.DataFrame(),
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgent2WithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent2WData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION_2, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_decide_from_contexts_df_null_decisions(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9
)
test_df = pd.DataFrame(
[["Jean-Pierre", "+02:00"], ["Paul"]],
columns=["b", "tz"],
index=pd.date_range("20200201", periods=2, freq="D").tz_localize(
"Europe/Paris"
),
)
test_df_copy = test_df.copy(deep=True)
df = CLIENT.decide_from_contexts_df(tree, test_df)
self.assertEqual(len(df), 2)
self.assertTrue(test_df.equals(test_df_copy))
self.assertTrue(pd.notnull(df["a_predicted_value"][0]))
self.assertTrue(pd.notnull(df["a_predicted_value"][1]))
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgent3WithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent3WData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION_2, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA_2)
def test_decide_from_contexts_df_empty_tree(self):
test_df = pd.DataFrame(
[[0, "Jean-Pierre", "+02:00"], [1, "Paul", "+02:00"]],
columns=["a", "b", "tz"],
index=pd.date_range("20200201", periods=2, freq="D").tz_localize(
"Europe/Paris"
),
)
df = CLIENT.decide_from_contexts_df(EMPTY_TREE, test_df)
expected_error_message = (
"Unable to take decision: the decision tree is not "
"based on any context operations."
)
self.assertEqual(len(df), 2)
self.assertEqual(df.columns, ["error"])
self.assertEqual(df["error"][0], expected_error_message)
self.assertEqual(df["error"][1], expected_error_message)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_decide_from_contexts_df_with_array(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA_2.last_valid_index().value // 10 ** 9
)
test_df = pd.DataFrame(
[["Jean-Pierre", "+02:00"], ["Paul"]],
columns=["b", "tz"],
index=pd.date_range("20200201", periods=2, freq="D").tz_localize(
"Europe/Paris"
),
)
test_df_copy = test_df.copy(deep=True)
df = CLIENT.decide_from_contexts_df(tree, test_df)
self.assertEqual(len(df), 2)
self.assertTrue(test_df.equals(test_df_copy))
self.assertTrue(pd.notnull(df["a_predicted_value"][0]))
self.assertTrue(pd.notnull(df["a_predicted_value"][1]))
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasMissingAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "MissingAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(MISSING_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_decide_from_missing_contexts_df(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9, "2"
)
df = CLIENT.decide_from_contexts_df(tree, MISSING_AGENT_DATA_DECISION)
self.assertEqual(len(df), 2)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-02 00:00:00", tz="Europe/Paris"),
)
# Also works as before, with a context containing an optional value
output = CLIENT.decide(tree, {"b": {}, "tz": "+02:00"})
self.assertTrue(pd.notnull(output["output"]["a"]["predicted_value"]))
# Also works as before, with a context containing a missing value
output = CLIENT.decide(tree, {"b": None, "tz": "+02:00"})
self.assertTrue( | pd.notnull(output["output"]["a"]["predicted_value"]) | pandas.notnull |
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import numpy as np
from flask import Flask, render_template,request
import requests, bs4
def detect_news(data):
df = pd.read_csv('data.csv')
text_data = df['title']
sim = []
final_list = []
final_list1 = []
all_news=0;
my_data = {}
tfidf = TfidfVectorizer(stop_words='english')
tfidf = TfidfVectorizer()
sparse_matrix = tfidf.fit_transform(text_data)
data = tfidf.transform([data])
doc_term_matrix = sparse_matrix.todense()
for a in doc_term_matrix:
result = cosine_similarity(a,data)
result = float(result)
sim.append(result)
final_list.append(result)
final_list1 = []
for i in range(0, 4):
max1 = 0
for j in range(len(final_list)):
if final_list[j] > max1:
max1 = final_list[j]
final_list.remove(max1)
max1 = float(max1)
final_list1.append(max1)
#
# print(final_list
for a in final_list1:
if max(final_list1) > 0.40:
index = sim.index(a)
title = df.loc[index,"title"]
text = df.loc[index,"text"]
link = df.loc[index,"link"]
source = df.loc[index,"source"]
score = round(a,2)
my_data[all_news] = [title,text,link,source,score]
all_news+=1
else:
my_data = ["Not",a]
return my_data
def crawler(claim):
my_data = {}
all_news=0
res = requests.get('https://news.google.com/search?q='+claim)
soup = bs4.BeautifulSoup(res.text, "html.parser")
search_result = soup.find_all('div',{'class':'NiLAwe'})
for i in search_result:
title = i.article.h3.text
text = i.article.div.span.text
source = i.find('a',{'class':'wEwyrc AVN2gc uQIVzc Sksgp'}).text
link = i.a.get('href')
link = "https://www.news.google.com/"+link
my_data[all_news] = [title,text,source,link]
all_news+=1
df = | pd.DataFrame.from_dict(my_data,orient='index', columns=['title','text','link','source']) | pandas.DataFrame.from_dict |
from collections import defaultdict
from itertools import product
import os
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
# creates the output directory
simulation_data_dir = 'datasets'
if not os.path.exists(simulation_data_dir):
os.mkdir(simulation_data_dir)
random_seeds = np.arange(2020, 2025) # repetition with different random seeds
N = 10000 # big enough so that we can subsample to get different sizes
Nfeats = [10, 100, 500] # number of features
feat_perc_informative = 0.6 # percent of informative features
#binary_ratio = 0.5
# classification specific
feat_perc_redundant = 0.1 # percent of redundant features
feat_perc_repeated = 0.1 # percent of repeated features
n_classes = 2 # number of classes
n_clusters_per_class = 1 # number of clasters per class
class_ratios = [1, 10, 100] # class 1:2 ratio
flip_ys = [0.01, 0.1, 0.2] # probability of flipping class
# regression specific
effective_rank_percs = [0.1, 0.5] # degree of colinearity
reg_noises = [0.1, 1, 10] # variance of gaussian noise added
# generate simulated datasets
df_cls = defaultdict(list)
for random_seed, Nfeat, class_ratio, flip_y in product(random_seeds, Nfeats, class_ratios, flip_ys): # iterate over all combinations
# print current parameters
print(random_seed, Nfeat, class_ratio, flip_y)
# define class weight
cw = np.array([1, class_ratio])
# call sklearn's make_classification function
X, y = make_classification(
n_samples=N, n_features=Nfeat,
n_informative=round(Nfeat*feat_perc_informative),
n_redundant=round(Nfeat*feat_perc_redundant),
n_repeated=round(Nfeat*feat_perc_repeated),
n_classes=n_classes, n_clusters_per_class=n_clusters_per_class,
weights=cw/cw.sum(),
flip_y=flip_y,
random_state=random_seed)
# add some monotonic nonlinearity to the features to make it harder
X[:,:Nfeat//3] = X[:,:Nfeat//3]**3/10
X[:,Nfeat//3:Nfeat//3*2] = 10*np.sign(X[:,Nfeat//3:Nfeat//3*2])*np.log1p(np.abs(X[:,Nfeat//3:Nfeat//3*2]))
X[:,Nfeat//3*2:] = np.exp(X[:,Nfeat//3*2:])
#X[:, :round(X.shape[1]*binary_ratio)] = (X[:, :round(X.shape[1]*binary_ratio)]>0).astype(int) # make binary
# standardize
std = X.std(axis=0)
std[std<0.001] = 1
X = X/std*5
# convert into pandas dataframe and then save as csv
df = pd.DataFrame(
data=np.c_[X, y],
columns=[f'x{i+1}' for i in range(X.shape[1])]+['event']
)
save_path = os.path.join(simulation_data_dir, f'simulated_dataset_classfication_Nfeat{Nfeat}_classratio{class_ratio}_flipy{flip_y}_randomseed{random_seed}.csv')
df.to_csv(save_path, index=False, float_format='%.1f')
df_cls['RandomSeed'].append(random_seed)
df_cls['Nfeat'].append(Nfeat)
df_cls['ClassRatio'].append(class_ratio)
df_cls['FlipYProb'].append(flip_y)
df_cls['Path'].append(save_path)
# save the overall dataset list
df_cls = | pd.DataFrame(data=df_cls) | pandas.DataFrame |
import xml.etree.ElementTree as ET
from pathlib import Path
import pandas as pd
from .utils import remove_duplicate_indices, resample_data
NAMESPACES = {
"default": "http://www.topografix.com/GPX/1/1",
"gpxtpx": "http://www.garmin.com/xmlschemas/TrackPointExtension/v1",
"gpxx": "http://www.garmin.com/xmlschemas/GpxExtensions/v3",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
def xml_find_value_or_none(element, match, namespaces=None):
e = element.find(match, namespaces=namespaces)
if e is None:
return e
else:
return e.text
def read_gpx(fpath, resample: bool = False, interpolate: bool = False) -> pd.DataFrame:
"""This method loads a GPX file into a Pandas DataFrame.
Columns names are translated to sweat terminology (e.g. "heart_rate" > "heartrate").
Args:
fpath: str, file-like or Path object
resample: whether or not the data frame needs to be resampled to 1Hz
interpolate: whether or not missing data in the data frame needs to be interpolated
Returns:
A pandas data frame with all the data.
"""
tree = ET.parse(Path(fpath))
root = tree.getroot()
trk = root.find("default:trk", NAMESPACES)
trkseg = trk.find("default:trkseg", NAMESPACES)
records = []
for trackpoint in trkseg.findall("default:trkpt", NAMESPACES):
latitude = trackpoint.attrib.get("lat", None)
longitude = trackpoint.attrib.get("lon", None)
elevation = xml_find_value_or_none(trackpoint, "default:ele", NAMESPACES)
datetime = xml_find_value_or_none(trackpoint, "default:time", NAMESPACES)
extensions = trackpoint.find("default:extensions", NAMESPACES)
power = xml_find_value_or_none(extensions, "default:power", NAMESPACES)
trackpoint_extension = extensions.find("gpxtpx:TrackPointExtension", NAMESPACES)
temperature = xml_find_value_or_none(
trackpoint_extension, "gpxtpx:atemp", NAMESPACES
)
heartrate = xml_find_value_or_none(
trackpoint_extension, "gpxtpx:hr", NAMESPACES
)
cadence = xml_find_value_or_none(trackpoint_extension, "gpxtpx:cad", NAMESPACES)
records.append(
dict(
latitude=pd.to_numeric(latitude),
longitude= | pd.to_numeric(longitude) | pandas.to_numeric |
"""
Summary
-------
USBR Neural Network Training Module. The goal of this FF-ANN BGD Training Model
is to determine the Weight values for the final FF-ANN.
To migrate to tensorflow v2.0, follow these instructions, accessed 2019-09-20:
https://www.tensorflow.org/beta/guide/migration_guide
To run code on Google Colaboratory with tensorflow v2.0, follow these
instructions, accessed 2019-09-20: https://www.tensorflow.org/beta/tutorials/quickstart/beginner
Notes
-----
1. Regarding os.environ(), see comment by Carmezim here, accessed 4/12/2017:
https://github.com/tensorflow/tensorflow/issues/7778
Basicially, the os.environ() function surpresses TensorFlow WARNINGS, which
state that TensorFlow can run faster on CPU if built from source. Building
from source will be a consideration if increased runtime is necessary. More
information also available via the following link:
https://stackoverflow.com/questions/47068709/your-cpu-supports-instructions-that-this-tensorflow-binary-was-not-compiled-to-u
2. This model is set up so that the final trained model will accept an input
feature column vector because that is how traditional Linear Algebra is
taught. Data sets typically have row vectors (i.e. column features) for ease
of viewing because the number of data points (rows) greatly outnumbers the
number of features. Be aware that input data will need to be reshaped when
it has gone through pre- and post-processing.
3. Successfully able to run this training model with Relu activation function.
In order to train properly on y = sin(x), the AdamOptimizers needs a small
learning rate (0.001 according the the Tensorflow website).
"""
# %% Import libraries.
# Import standard libraries.
import os
import datetime as dt
# Import third party libraries.
import numpy as np
import pandas as pd
import tensorflow as tf
# %% Set attributes.
# See Note #1 for explanation.
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
if tf.__version__ != '1.14.0':
msg = ('This module is tested to work only with tensorflow version 1.14.0'
' and will not run on any other version. Please, install tensorflow'
' 1.14.0 to use this module.')
raise ImportError(msg)
# %% Define functions.
def ActFunc(Activation='linear'):
"""
Summary
-------
This function is currently not being used but will be used in future
development.
"""
ActivationFunctions = {'TanH': tf.tanh,
'ReLu': tf.nn.relu,
'linear': lambda x: x}
return ActivationFunctions[Activation]
def write_summaries():
"""
For future use.
"""
pass
def construct_nn():
"""
For future use.
"""
pass
def train_nn():
"""
For future use.
"""
pass
# def FFANN_SGD(x_Trn, t_Trn, x_Val, t_Val, x_Tst, t_Tst, nHidWeights, vSeed=100,
# Activation='ReLu', Prompt_Report=True, L_Rate=0.5,
# Max_Iter=100000, SummaryFreq=10000, ReportFreq=10000):
def FFANN_SGD(x_Trn, t_Trn, x_Val, t_Val, x_Tst, t_Tst, nHidWeights, vSeed=100,
Prompt_Report=True, L_Rate=0.5, Max_Iter=100000,
SummaryFreq=10000, ReportFreq=10000):
time0 = dt.datetime.now()
# Construct input and target placeholders.
x = tf.compat.v1.placeholder(tf.float32, name="Inputs")
t = tf.compat.v1.placeholder(tf.float32, name="Targets")
# Map placeholders to provided datasets.
trn_dict = {x: x_Trn, t: t_Trn}
val_dict = {x: x_Val, t: t_Val}
tst_dict = {x: x_Tst, t: t_Tst}
# Initialize variables.
Max_Iter = Max_Iter + 1
rows_x_Trn = x_Trn.shape[0] #number of rows for data input
rows_t_Trn = t_Trn.shape[0] #number of rows for data targets
if type(nHidWeights) is not list:
nHidWeights = [nHidWeights]
nLayers = len(nHidWeights) #number of hidden layers
tf.compat.v1.set_random_seed(vSeed)
C_tanh = tf.constant(1.73205080756888, dtype=tf.float32)
C_x = tf.constant(0.658478948462, dtype=tf.float32)
I = {1: x} #input into hidden layer
rI = {1: rows_x_Trn} #number of rows of input into hidden layer
W = {} #hidden layer weights & bias
H = {} #hidden layer output; input into the next layer
# Construct hidden layers.
time1 = dt.datetime.now()
for i, nHW in enumerate(nHidWeights, start=1):
w_id = 'W{}'.format(i)
StDev_HW = ((nHW+1)**(-0.5)) / 3
W_init = tf.random.normal((nHW, rI[i]), 0, StDev_HW, dtype=tf.float32)
B_init = tf.random.normal((nHW, 1 ), 0, 0.00033 , dtype=tf.float32)
W[i] = {'W': tf.Variable(W_init, name='W{}'.format(i)),
'B': tf.Variable(B_init, name='B{}'.format(i))}
Step1 = tf.matmul(W[i]['W'], I[i])
Step2 = tf.add(Step1, W[i]['B'])
#Step3 = tf.multiply(Step2, C_x)
#Step4 = tf.tanh(Step3)
H[i] = tf.nn.relu(Step2, name='H{}'.format(i)) #tf.multiply(Step4,C_tanh)
I[i + 1] = H[i]
rI[i + 1] = nHW
# print(I[i].name[:-2])
# print(W[i]['W'].name[:-2])
# Construct Output Layer.
StDev_OW = ((rows_t_Trn+1)**(-0.5)) / 3
OW_init = tf.random.normal((rows_t_Trn, rI[nLayers + 1]), 0, StDev_OW, dtype=tf.float32)
OB_init = tf.random.normal((rows_t_Trn, 1 ), 0, 0.00033 , dtype=tf.float32)
O = {'W': tf.Variable(OW_init, name='WO'),
'B': tf.Variable(OB_init, name='BO')}
Step1 = tf.matmul(O['W'], I[nLayers + 1])
Step2 = tf.add(Step1, O['B'], name='Output')
#Step3 = tf.multiply(Step2, C_x)
#Step4 = tf.tanh(Step3)
y = Step2 #y = tf.multiply(Step4, C_tanh)
O['y'] = y
# Construct Error and Accuracy Equations.
E = tf.reduce_mean(tf.multiply(tf.square(tf.subtract(y, t)), 0.5))
t_ave = tf.reduce_mean(t)
Et = tf.reduce_sum(tf.square(tf.subtract(t, t_ave)), name="Model_Variance")
Es = tf.reduce_sum(tf.square(tf.subtract(y, t)), name="Prediction_Variance")
Acc = 1 - (Es / Et)
# Construct optimizer.
# optimizeE = tf.compat.v1.train.GradientDescentOptimizer(L_Rate).minimize(E)
optimizeE = tf.compat.v1.train.AdamOptimizer(L_Rate).minimize(E)
# Initialize training sessions.
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
# TODO: Move this to board writing.
# <JAS 2019-09-20>
Model_Tag = 'FFANN_SGD'
Run_Stamp = time0.strftime('%Y-%m-%d-%H%M%S_'+Model_Tag+'_i'+str(int(Max_Iter/1000))+'k')
tb_dir = os.path.abspath(r'../data/tensorboards')
run_dir = os.path.join(tb_dir, Run_Stamp)
trn_dir = os.path.join(run_dir, '0_Training')
val_dir = os.path.join(run_dir, '1_Validation')
tst_dir = os.path.join(run_dir, '2_Testing')
# Write histograms of Hidden Weights, Biases, and Returns.
for i, w in W.items():
HL_Name = 'Hidden_Layer_{}'.format(i)
with tf.name_scope(HL_Name):
tf.compat.v1.summary.histogram(w['W'].name[:-2], w['W'])
tf.compat.v1.summary.histogram(w['B'].name[:-2], w['B'])
tf.compat.v1.summary.histogram(H[i].name[:-2], H[i])
# Write histogram of Output Weights, Biases, and Returns.
with tf.name_scope("Output_Layer"):
tf.compat.v1.summary.histogram(O['W'].name[:-2], O['W'])
tf.compat.v1.summary.histogram(O['W'].name[:-2], O['B'])
tf.compat.v1.summary.histogram(O['y'].name[:-2], O['y'])
# Construct Error & Accuracy Equations.
with tf.name_scope("Error_Equation"):
tf.compat.v1.summary.scalar('Error', E)
with tf.name_scope("Accuracy_Equation"):
tf.compat.v1.summary.scalar('Accuracy', Acc)
# TODO: Move this to board writing.
# <JAS 2019-09-20>
# Merge summaries for reporting.
merged_summary = tf.compat.v1.summary.merge_all()
# Write summaries to respective directories.
TrnWriter = tf.compat.v1.summary.FileWriter(trn_dir)
ValWriter = tf.compat.v1.summary.FileWriter(val_dir)
TstWriter = tf.compat.v1.summary.FileWriter(tst_dir)
# Add graphs to TensorBoard session.
TrnWriter.add_graph(sess.graph)
ValWriter.add_graph(sess.graph)
TstWriter.add_graph(sess.graph)
# ????: Return session object and placeholder dictionaries at this point?
# <JAS 2019-09-20>
time2 = dt.datetime.now()
# Perform training iterations.
product_header = [['Error', 'Accuracy'],
['Training', 'Validation', 'Testing']]
lvl_names = ['Indicator', 'Dataset']
report_header = | pd.MultiIndex.from_product(product_header, names=lvl_names) | pandas.MultiIndex.from_product |
"""
Wesbrook has a DH system fed first by heat pumps using waste and alst by NG peaking boilers. This script takes the
demand calculated by the CEA and reinterprets it for this system, outputting the results directly into the CEA
demand files.
"""
import pandas as pd
import time
import logging
logging.getLogger('numba').setLevel(logging.WARNING)
from itertools import repeat
import cea.utilities.parallel
import cea.config
import cea.utilities
import cea.inputlocator
import cea.demand.demand_main
import cea.resources.radiation_daysim.radiation_main
import cea.bigmacc.bigmacc_rules
import cea.datamanagement.archetypes_mapper
import cea.datamanagement.data_initializer
import cea.analysis.costs.system_costs
import cea.analysis.lca.main
import cea.utilities.dbf
__author__ = "<NAME>"
__copyright__ = ""
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = ""
__email__ = ""
__status__ = ""
def demand_source(locator, bldg, resource):
# Qhs_sys_kWh, Qww_sys_kWh
hourly_results = locator.get_demand_results_file(bldg, 'csv')
df_demand = pd.read_csv(hourly_results)
return df_demand[resource].rename(bldg)
def breakup_use(df, config):
df = df.loc["total"]
df = df.transpose()
df.index = pd.date_range(start=f'1/1/{config.emissions.year_to_calculate}', periods=8760, freq='H')
return df
def district_buildings(locator):
supply_hs_df = pd.read_excel(locator.get_database_supply_assemblies(), sheet_name='HEATING')
supply_dhw_df = pd.read_excel(locator.get_database_supply_assemblies(), sheet_name='HOT_WATER')
hs_codes = supply_hs_df[supply_hs_df['feedstock'] == 'DISTRICT']['code'].to_list()
dhw_codes = supply_dhw_df[supply_dhw_df['feedstock'] == 'DISTRICT']['code'].to_list()
supply_df = cea.utilities.dbf.dbf_to_dataframe(locator.get_building_supply(), index='Name')
def get_build_list(codes, supply_type):
if supply_type in codes:
return 'Yes'
else:
return 'No'
supply_df['hs_keep'] = supply_df.apply(lambda x: get_build_list(hs_codes, x['type_hs']), axis=1)
on_DH_hs = supply_df[supply_df['hs_keep'] == 'Yes']['Name'].to_list()
supply_df['dhw_keep'] = supply_df.apply(lambda x: get_build_list(dhw_codes, x['type_dhw']), axis=1)
on_DH_dhw = supply_df[supply_df['dhw_keep'] == 'Yes']['Name'].to_list()
return on_DH_hs, on_DH_dhw
def ng(total, hplim):
if total > hplim:
return total - hplim
else:
return 0
def hp(total, ng_demand):
if ng_demand > 0:
return total - ng_demand
else:
return total
def hp1(hp_demand, trlim):
if hp_demand >= trlim:
return trlim
else:
return hp_demand
def hp2(hp_demand, hp1_demand, trlim):
if hp1_demand < trlim:
return 0
else:
return hp_demand - trlim
def calc_district_demand(df):
months = list(range(1, 13, 1))
triumf_max = [5, 3.5, 5, 9, 9, 9.5, 11, 10.5, 9.5, 9, 8, 6.5]
hp_max = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
district = ((557900 + 75900) / 1078800)
triumf_district = [round(i * district * 1000, 2) for i in triumf_max]
hp_district = [round(i * district * 1000, 2) for i in hp_max]
triumf_limit = dict(zip(months, triumf_district))
hp_limit = dict(zip(months, hp_district))
df['tr_limit'] = df.index.month.map(triumf_limit)
df['hp_limit'] = df.index.month.map(hp_limit)
df['ng_demand'] = df.apply(lambda x: ng(x['total'], x['hp_limit']), axis=1)
df['hp_demand'] = df.apply(lambda x: hp(x['total'], x['ng_demand']), axis=1)
df['hp1_demand'] = df.apply(lambda x: hp1(x['hp_demand'], x['tr_limit']), axis=1)
df['hp2_demand'] = df.apply(lambda x: hp2(x['hp_demand'], x['hp1_demand'], x['tr_limit']), axis=1)
df['ng_source_demand'] = df['ng_demand'] / 0.95
df['hp1_source_demand'] = df['hp1_demand'] / 3.4
df['hp2_source_demand'] = df['hp2_demand'] / 2.7
df['hp_source_demand'] = df['hp1_source_demand'] + df['hp2_source_demand']
return df[['ng_source_demand', 'hp_source_demand']]
def recalc_DH(config):
# TODO By splitting up DHW and HS the overall demand on the system is minimized
# TODO rewrite so the district heat and district hot water can run independentely
locator = cea.inputlocator.InputLocator(config.scenario)
on_DH_hs, on_DH_dhw = district_buildings(locator)
while (len(on_DH_hs) == 0 or len(on_DH_dhw) == 0):
print('wrong')
break
heat_df = pd.DataFrame()
print(' - - - Gathering space heating...')
for bldg in on_DH_hs:
heat_df = heat_df.append(demand_source(locator, bldg, 'Qhs_sys_kWh'))
heat_df['Name'] = heat_df.index
dhw_df = pd.DataFrame()
print(' - - - Gathering DHW...')
for bldg in on_DH_dhw:
dhw_df = dhw_df.append(demand_source(locator, bldg, 'Qww_sys_kWh'))
dhw_df['Name'] = dhw_df.index
demand_df = pd.concat([heat_df, dhw_df], ignore_index=True).groupby(['Name'], as_index=False).sum()
demand_df = demand_df.set_index(demand_df['Name'], drop=True)
del demand_df['Name']
demand_df.loc["total"] = demand_df.sum()
heat_df.loc["total"] = heat_df.sum()
del heat_df['Name']
dhw_df.loc["total"] = dhw_df.sum()
del dhw_df['Name']
def calc_share(demand, total):
return demand / total
print(' - - - Calculating share of district heat load...')
heat_df_share = heat_df.apply(lambda x: calc_share(x, heat_df.loc['total']), axis=1)
dhw_df_share = dhw_df.apply(lambda x: calc_share(x, dhw_df.loc['total']), axis=1)
demand_DH_heat = pd.DataFrame(breakup_use(heat_df, config))
demand_DH_dhw = pd.DataFrame(breakup_use(dhw_df, config))
demand_DH_heat = calc_district_demand(demand_DH_heat)
demand_DH_dhw = calc_district_demand(demand_DH_dhw)
hp_dhw = demand_DH_dhw['hp_source_demand'].reset_index(drop=True).transpose() * dhw_df_share
hp_heat = demand_DH_heat['hp_source_demand'].reset_index(drop=True).transpose() * heat_df_share
ng_dhw = demand_DH_dhw['ng_source_demand'].reset_index(drop=True).transpose() * dhw_df_share
ng_heat = demand_DH_heat['ng_source_demand'].reset_index(drop=True).transpose() * heat_df_share
all_bldgs = on_DH_hs + list(set(on_DH_dhw) - set(on_DH_hs))
if on_DH_hs == on_DH_dhw:
print(' - - - Changing results for all bldgs...')
for bldg in all_bldgs:
# open bldg demand file and replace _ww_kWh with following
hourly_results = locator.get_demand_results_file(bldg, 'csv')
df_demand = pd.read_csv(hourly_results)
df_demand['GRID_hs_kWh'] = hp_heat.loc[bldg]
df_demand['E_hs_kWh'] = hp_heat.loc[bldg]
df_demand['NG_hs_kWh'] = ng_heat.loc[bldg]
df_demand['DH_hs_kWh'] = 0
df_demand['GRID_ww_kWh'] = hp_dhw.loc[bldg]
df_demand['E_ww_kWh'] = hp_heat.loc[bldg]
df_demand['NG_ww_kWh'] = ng_dhw.loc[bldg]
df_demand['DH_ww_kWh'] = 0
df_demand['GRID_kWh'] = df_demand[['GRID_a_kWh', 'GRID_l_kWh', 'GRID_v_kWh', 'GRID_ve_kWh','GRID_data_kWh',
'GRID_pro_kWh', 'GRID_aux_kWh', 'GRID_ww_kWh','GRID_hs_kWh',
'GRID_cs_kWh', 'GRID_cdata_kWh', 'GRID_cre_kWh']].sum(axis=1)
df_demand['E_sys_kWh'] = df_demand[['Eal_kWh', 'Ea_kWh', 'El_kWh', 'Ev_kWh', 'Eve_kWh', 'Edata_kWh',
'Epro_kWh', 'Eaux_kWh', 'E_ww_kWh', 'E_hs_kWh', 'E_cs_kWh',
'E_cre_kWh', 'E_cdata_kWh']].sum(axis=1)
df_demand.to_csv(hourly_results)
else:
for bldg in on_DH_hs:
# open bldg demand file and replace _ww_kWh with following
print(' - - - Resetting results for all district heat buildings...')
hourly_results = locator.get_demand_results_file(bldg, 'csv')
df_demand = pd.read_csv(hourly_results)
df_demand['GRID_hs_kWh'] = hp_heat.loc[bldg]
df_demand['E_hs_kWh'] = hp_heat.loc[bldg]
df_demand['NG_hs_kWh'] = ng_heat.loc[bldg]
df_demand['DH_hs_kWh'] = 0
df_demand.to_csv(hourly_results)
for bldg in on_DH_dhw:
# open bldg demand file and replace _ww_kWh with following
print(' - - - Resetting results for district hot water bldgs...')
hourly_results = locator.get_demand_results_file(bldg, 'csv')
df_demand = pd.read_csv(hourly_results)
df_demand['GRID_ww_kWh'] = hp_dhw.loc[bldg]
df_demand['E_ww_kWh'] = hp_dhw.loc[bldg]
df_demand['NG_ww_kWh'] = ng_dhw.loc[bldg]
df_demand['DH_ww_kWh'] = 0
df_demand.to_csv(hourly_results)
for bldg in all_bldgs:
hourly_results = locator.get_demand_results_file(bldg, 'csv')
df_demand = | pd.read_csv(hourly_results) | pandas.read_csv |
from fastapi import APIRouter, HTTPException
import pandas as pd
import plotly.express as px
import json
from dotenv import load_dotenv
import os
import psycopg2
from sqlalchemy import create_engine
from sqlalchemy.types import Integer, Float, Text, String, DateTime
router = APIRouter()
@router.post('/precip_viz/')
async def precip_viz(user_queried_citystates: list):
"""
### Path Parameter (POST from front-end)
list: A list of city-states the user queried in this format: ["Albany, NY", "San Francisco, CA", "Chicago, IL"]
### Response
JSON string of all figures to render with [react-plotly.js](https://plotly.com/javascript/react/)
"""
def create_db_uri():
# give full path to .env
env_path = r'.env'
# LOAD environment variables
load_dotenv(dotenv_path=env_path, verbose=True)
# GET .env vars
DB_FLAVOR = os.getenv("DB_FLAVOR")
DB_PYTHON_LIBRARY = os.getenv("DB_PYTHON_LIBRARY")
DB_HOST = os.getenv("DB_HOST")
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASS = os.getenv("DB_PASS")
DB_PORT = os.getenv("DB_PORT")
DB_URI = DB_FLAVOR + "+" + DB_PYTHON_LIBRARY + "://" + DB_USER + ":" + DB_PASS + "@" + DB_HOST + ":" + DB_PORT + "/" + DB_NAME
return DB_URI
DB_URI = create_db_uri()
# CONNECTION Engine with SQLAlchemy
engine = create_engine(DB_URI, echo=True)
# _list = ["Albany, NY", "Sacramento, CA", "Austin, TX"]
def sql_query(user_queried_citystates):
'''
Create a SQL query to grab only the user queried cities' data from the weather table in the DB.
Output: subset grouped DF by month and city with only queried cities
'''
# get length of list of queried cities
list_length = len(user_queried_citystates)
# Create Boolean Statements to Avoid Errors with output
if list_length == 1:
city1 = user_queried_citystates[0]
query1 = 'SELECT * FROM weather WHERE "City_State" IN (%(city1)s)'
weather = pd.read_sql(sql = query1, columns = "City_State", params={"city1":city1}, con=engine, parse_dates=['created_at', 'updated_at'])
elif list_length == 2:
city1 = user_queried_citystates[0]
city2 = user_queried_citystates[1]
query2 = 'SELECT * FROM weather WHERE "City_State" IN (%(city1)s, %(city2)s)'
weather = | pd.read_sql(sql = query2, columns = "City_State", params={"city1":city1, "city2":city2}, con=engine, parse_dates=['created_at', 'updated_at']) | pandas.read_sql |
import os
import logging
import copy
import numpy as np
import pandas as pd
from oemof.solph import EnergySystem, Bus, Sink, Source
import oemof.tabular.tools.postprocessing as pp
from oemof.tools.economics import annuity
from oemof_flexmex.helpers import delete_empty_subdirs, load_elements, load_scalar_input_data,\
load_yaml
from oemof_flexmex.parametrization_scalars import get_parameter_values
from oemof_flexmex.facades import TYPEMAP
basic_columns = ['region', 'name', 'type', 'carrier', 'tech']
# Path definitions
module_path = os.path.abspath(os.path.dirname(__file__))
MODEL_CONFIG = 'model_config'
PATH_MAPPINGS_REL = '../flexmex_config'
path_mappings = os.path.abspath(os.path.join(module_path, PATH_MAPPINGS_REL))
path_map_output_timeseries = os.path.join(path_mappings, 'mapping-output-timeseries.yml')
path_map_input_scalars = os.path.join(path_mappings, 'mapping-input-scalars.yml')
# Load mappings
map_output_timeseries = load_yaml(path_map_output_timeseries)
FlexMex_Parameter_Map = load_yaml(path_map_input_scalars)
def create_postprocessed_results_subdirs(postprocessed_results_dir):
for parameters in map_output_timeseries.values():
for subdir in parameters.values():
path = os.path.join(postprocessed_results_dir, subdir)
if not os.path.exists(path):
os.makedirs(path)
def get_capacities(es):
r"""
Calculates the capacities of all components.
Adapted from oemof.tabular.tools.postprocessing.write_results()
Parameters
----------
es : oemof.solph.EnergySystem
EnergySystem containing the results.
Returns
-------
capacities : pd.DataFrame
DataFrame containing the capacities.
"""
def get_facade_attr(attr):
# Function constructor for getting a specific property from
# the Facade object in bus_results() DataFrame columns "from" or "to"
def fnc(flow):
# Get property from the Storage object in "from" for the discharge device
if isinstance(flow['from'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return getattr(flow['from'], attr, np.nan)
# Get property from the Storage object in "to" for the charge device
if isinstance(flow['to'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return getattr(flow['to'], attr, np.nan)
# Get property from other object in "from"
return getattr(flow['from'], attr, np.nan)
return fnc
def get_parameter_name(flow):
if isinstance(flow['from'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return "capacity_discharge_invest"
if isinstance(flow['to'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return "capacity_charge_invest"
return np.nan
try:
flows = pp.bus_results(es, es.results, select="scalars", concat=True)
flows.name = "var_value"
endogenous = flows.reset_index()
# Results already contain a column named "type". Call this "var_name" to
# preserve its content ("invest" for now)
endogenous.rename(columns={"type": "var_name"}, inplace=True)
# Update "var_name" with Storage specific parameter names for charge and discharge devices
df = pd.DataFrame({'var_name': endogenous.apply(get_parameter_name, axis=1)})
endogenous.update(df)
endogenous["region"] = endogenous.apply(get_facade_attr('region'), axis=1)
endogenous["name"] = endogenous.apply(get_facade_attr('label'), axis=1)
endogenous["type"] = endogenous.apply(get_facade_attr('type'), axis=1)
endogenous["carrier"] = endogenous.apply(get_facade_attr('carrier'), axis=1)
endogenous["tech"] = endogenous.apply(get_facade_attr('tech'), axis=1)
endogenous.drop(['from', 'to'], axis=1, inplace=True)
endogenous.set_index(
["region", "name", "type", "carrier", "tech", "var_name"], inplace=True
)
except ValueError:
endogenous = pd.DataFrame()
d = dict()
for node in es.nodes:
if not isinstance(node, (Bus, Sink, TYPEMAP["shortage"], TYPEMAP["link"])):
# Specify which parameters to read depending on the technology
parameters_to_read = []
if isinstance(node, TYPEMAP["storage"]):
# TODO for brownfield optimization
# parameters_to_read = ['capacity', 'storage_capacity']
# WORKAROUND Skip 'capacity' to safe some effort in aggregation and elsewhere
# possible because storages are greenfield optimized only: 'capacity' = 0
parameters_to_read = ['storage_capacity']
elif isinstance(node, TYPEMAP["asymmetric storage"]):
parameters_to_read = ['capacity_charge', 'capacity_discharge', 'storage_capacity']
elif getattr(node, "capacity", None) is not None:
parameters_to_read = ['capacity']
# Update dict with values in oemof's parameter->value structure
for p in parameters_to_read:
key = (
node.region,
node.label,
# [n for n in node.outputs.keys()][0],
node.type,
node.carrier,
node.tech, # tech & carrier are oemof-tabular specific
p
) # for oemof logic
d[key] = {'var_value': getattr(node, p)}
exogenous = pd.DataFrame.from_dict(d).T # .dropna()
if not exogenous.empty:
exogenous.index = exogenous.index.set_names(
['region', 'name', 'type', 'carrier', 'tech', 'var_name']
)
# Read storage capacities (from oemof.heat)
# only component_results() knows about 'storage_capacity'
try:
components = pd.concat(pp.component_results(es, es.results, select='scalars'))
components.name = 'var_value'
storage = components.reset_index()
storage.drop('level_0', 1, inplace=True)
storage.columns = ['name', 'to', 'var_name', 'var_value']
storage['region'] = [
getattr(t, "region", np.nan) for t in components.index.get_level_values('from')
]
storage['type'] = [
getattr(t, "type", np.nan) for t in components.index.get_level_values('from')
]
storage['carrier'] = [
getattr(t, "carrier", np.nan) for t in components.index.get_level_values('from')
]
storage['tech'] = [
getattr(t, "tech", np.nan) for t in components.index.get_level_values('from')
]
storage = storage.loc[storage['to'].isna()]
storage.drop('to', 1, inplace=True)
storage = storage[['region', 'name', 'type', 'carrier', 'tech', 'var_name', 'var_value']]
# Delete unused 'init_cap' rows - parameter name misleading! (oemof issue)
storage.drop(storage.loc[storage['var_name'] == 'init_cap'].index, axis=0, inplace=True)
storage.replace(
['invest'],
['storage_capacity_invest'],
inplace=True
)
storage.set_index(
['region', "name", "type", "carrier", "tech", "var_name"], inplace=True
)
except ValueError:
storage = pd.DataFrame()
capacities = pd.concat([endogenous, exogenous, storage])
return capacities
def format_capacities(oemoflex_scalars, capacities):
df = pd.DataFrame(columns=oemoflex_scalars.columns)
df.loc[:, 'name'] = capacities.reset_index().loc[:, 'name']
df.loc[:, 'tech'] = capacities.reset_index().loc[:, 'tech']
df.loc[:, 'carrier'] = capacities.reset_index().loc[:, 'carrier']
df.loc[:, 'var_name'] = capacities.reset_index().loc[:, 'var_name']
df.loc[:, 'var_value'] = capacities.reset_index().loc[:, 'var_value']
df.loc[:, 'type'] = capacities.reset_index().loc[:, 'type']
df.loc[:, 'region'] = capacities.reset_index().loc[:, 'region']
df['var_unit'] = 'MW'
return df
def get_sequences_by_tech(results):
r"""
Creates a dictionary with carrier-tech as keys with the sequences of the components
from optimization results.
Parameters
----------
results : dict
Dictionary containing oemof.solph.Model results.
Returns
-------
sequences_by_tech : dict
Dictionary containing sequences with carrier-tech as keys.
"""
# copy to avoid manipulating the data in es.results
sequences = copy.deepcopy({key: value['sequences'] for key, value in results.items()})
sequences_by_tech = []
# Get internal busses for all 'ReservoirWithPump' and 'Bev' nodes to be ignored later
internal_busses = get_subnodes_by_type(sequences, Bus)
# Get inflows for all 'ReservoirWithPump' nodes
reservoir_inflows = get_subnodes_by_type(sequences, Source)
for key, df in sequences.items():
if isinstance(key[0], Bus):
component = key[1]
bus = key[0]
if isinstance(component, TYPEMAP["link"]):
if bus == component.from_bus:
var_name = 'flow_gross_forward'
elif bus == component.to_bus:
var_name = 'flow_gross_backward'
elif isinstance(component, (TYPEMAP["extraction"], TYPEMAP["backpressure"])):
var_name = 'flow_fuel'
else:
var_name = 'flow_in'
if isinstance(key[1], Bus):
bus = key[1]
component = key[0]
if isinstance(component, TYPEMAP["link"]):
if bus == component.to_bus:
var_name = 'flow_net_forward'
elif bus == component.from_bus:
var_name = 'flow_net_backward'
elif isinstance(component, (TYPEMAP["extraction"], TYPEMAP["backpressure"])):
if bus == component.electricity_bus:
var_name = 'flow_electricity'
elif bus == component.heat_bus:
var_name = 'flow_heat'
elif component in reservoir_inflows:
var_name = 'flow_inflow'
else:
var_name = 'flow_out'
if key[1] is None:
component = key[0]
var_name = 'storage_content'
# Ignore sequences FROM internal busses (concerns ReservoirWithPump, Bev)
if bus in internal_busses and component not in reservoir_inflows:
continue
carrier_tech = component.carrier + '-' + component.tech
if isinstance(component, TYPEMAP["link"]):
# Replace AT-DE by AT_DE to be ready to be merged with DataFrames from preprocessing
region = component.label.replace('-', '_')
else:
# Take AT from AT-ch4-gt, string op since sub-nodes lack of a 'region' attribute
region = component.label.split('-')[0]
df.columns = pd.MultiIndex.from_tuples([(region, carrier_tech, var_name)])
df.columns.names = ['region', 'carrier_tech', 'var_name']
sequences_by_tech.append(df)
sequences_by_tech = pd.concat(sequences_by_tech, axis=1)
return sequences_by_tech
def get_subnodes_by_type(sequences, cls):
r"""
Get all the subnodes of type 'cls' in the <to> nodes of 'sequences'
Parameters
----------
sequences : dict (special format, see get_sequences_by_tech() and before)
key: tuple of 'to' node and 'from' node: (from, to)
value: timeseries DataFrame
cls : Class
Class to check against
Returns
-------
A list of all subnodes of type 'cls'
"""
# Get a list of all the components
to_nodes = []
for k in sequences.keys():
# It's sufficient to look into one side of the flows ('to' node, k[1])
to_nodes.append(k[1])
subnodes_list = []
for component in to_nodes:
if hasattr(component, 'subnodes'):
# Only get subnodes of type 'cls'
subnodes_per_component = [n for n in component.subnodes if isinstance(n, cls)]
subnodes_list.extend(subnodes_per_component)
return subnodes_list
def get_summed_sequences(sequences_by_tech, prep_elements):
# Put component definitions into one DataFrame - drops 'carrier_tech' information in the keys
base = pd.concat(prep_elements.values())
df = base.loc[:, basic_columns]
sum = sequences_by_tech.sum()
sum.name = 'var_value'
sum_df = sum.reset_index()
# Form helper column for proper merging with component definition
df['carrier_tech'] = df['carrier'] + '-' + df['tech']
summed_sequences = pd.merge(df, sum_df, on=['region', 'carrier_tech'])
# Drop helper column
summed_sequences.drop('carrier_tech', axis=1, inplace=True)
summed_sequences = summed_sequences.loc[summed_sequences['var_name'] != 'storage_content']
summed_sequences['var_unit'] = 'MWh'
return summed_sequences
def get_re_generation(oemoflex_scalars):
renewable_carriers = ['solar', 'wind']
re_generation = pd.DataFrame(columns=oemoflex_scalars.columns)
re_flow = oemoflex_scalars.loc[(oemoflex_scalars['carrier'].isin(renewable_carriers)) &
(oemoflex_scalars['var_name'] == 'flow_out')]
curtailment = oemoflex_scalars.loc[(oemoflex_scalars['carrier'] == 'electricity') &
(oemoflex_scalars['tech'] == 'curtailment') &
(oemoflex_scalars['var_name'] == 'flow_in')]
sum = re_flow.groupby('region').sum() - curtailment.groupby('region').sum()
re_generation['region'] = sum.index
re_generation['carrier'] = 're'
re_generation['type'] = 'none'
re_generation['tech'] = 'none'
re_generation['var_name'] = 're_generation'
re_generation = re_generation.drop('var_value', 1)
re_generation = pd.merge(re_generation, sum['var_value'], on='region')
re_generation['var_unit'] = 'MWh'
return re_generation
def get_transmission_losses(oemoflex_scalars):
r"""Calculates losses_forward losses_backward for each link."""
def gross_minus_net_flow(direction):
flow_gross = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == f'flow_gross_{direction}'].set_index('name')
flow_net = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == f'flow_net_{direction}'].set_index('name')
loss = flow_gross.copy()
loss['var_name'] = f'loss_{direction}'
loss['var_value'] = flow_gross['var_value'] - flow_net['var_value']
return loss
losses = []
for direction in ['forward', 'backward']:
loss = gross_minus_net_flow(direction)
losses.append(loss)
losses = pd.concat(losses)
losses = losses.reset_index()
return losses
def get_storage_losses(oemoflex_scalars):
storage_data = oemoflex_scalars.loc[
oemoflex_scalars['type'].isin(['storage', 'asymmetric storage'])
]
flow_in = storage_data.loc[storage_data['var_name'] == 'flow_in'].set_index('name')
flow_out = storage_data.loc[storage_data['var_name'] == 'flow_out'].set_index('name')
losses = flow_in.copy()
losses['var_name'] = 'loss'
losses['var_value'] = flow_in['var_value'] - flow_out['var_value']
losses = losses.reset_index()
return losses
def get_reservoir_losses(oemoflex_scalars):
reservoir_data = oemoflex_scalars.loc[
oemoflex_scalars['type'].isin(['reservoir'])
]
flow_in = reservoir_data.loc[reservoir_data['var_name'] == 'flow_in'].set_index('name')
flow_out = reservoir_data.loc[reservoir_data['var_name'] == 'flow_out'].set_index('name')
flow_inflow = reservoir_data.loc[reservoir_data['var_name'] == 'flow_inflow'].set_index('name')
losses = flow_in.copy()
losses['var_name'] = 'losses'
losses['var_value'] = flow_inflow['var_value'] - (flow_out['var_value'] - flow_in['var_value'])
losses = losses.reset_index()
return losses
def aggregate_storage_capacities(oemoflex_scalars):
storage = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['storage_capacity', 'storage_capacity_invest'])].copy()
# Make sure that values in columns used to group on are strings and thus equatable
storage[basic_columns] = storage[basic_columns].astype(str)
storage = storage.groupby(by=basic_columns, as_index=False).sum()
storage['var_name'] = 'storage_capacity_sum'
storage['var_value'] = storage['var_value'] * 1e-3 # MWh -> GWh
storage['var_unit'] = 'GWh'
charge = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity_charge', 'capacity_charge_invest'])]
charge = charge.groupby(by=basic_columns, as_index=False).sum()
charge['var_name'] = 'capacity_charge_sum'
charge['var_unit'] = 'MW'
discharge = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity_discharge', 'capacity_discharge_invest'])]
discharge = discharge.groupby(by=basic_columns, as_index=False).sum()
discharge['var_name'] = 'capacity_discharge_sum'
discharge['var_unit'] = 'MW'
return pd.concat([storage, charge, discharge])
def aggregate_other_capacities(oemoflex_scalars):
capacities = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity', 'invest'])
].copy()
# Make sure that values in columns used to group on are strings and thus equatable
capacities[basic_columns] = capacities[basic_columns].astype(str)
capacities = capacities.groupby(by=basic_columns, as_index=False).sum()
capacities['var_name'] = 'capacity_sum'
capacities['var_unit'] = 'MW'
return capacities
def get_emissions(oemoflex_scalars, scalars_raw):
try:
emissions = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'cost_emission'].copy()
except KeyError:
logging.info("No key 'cost_emissions' found to calculate 'emissions'.")
return None
price_emission = get_parameter_values(scalars_raw, 'Energy_Price_CO2')
emissions['var_value'] *= 1/price_emission
emissions['var_name'] = 'emissions'
emissions['var_unit'] = 'tCO2'
return emissions
def map_link_direction(oemoflex_scalars):
r"""Swaps name and region for backward flows of links."""
backward = (
(oemoflex_scalars['type'] == 'link') &
(oemoflex_scalars['var_name'].str.contains('backward'))
)
def swap(series, delimiter):
return series.str.split(delimiter).apply(lambda x: delimiter.join(x[::-1]))
def drop_regex(series, regex):
return series.str.replace(regex, '', regex=True)
oemoflex_scalars.loc[backward, 'name'] = swap(oemoflex_scalars.loc[backward, 'name'], '-')
oemoflex_scalars.loc[backward, 'region'] = swap(oemoflex_scalars.loc[backward, 'region'], '_')
oemoflex_scalars.loc[:, 'var_name'] = drop_regex(
oemoflex_scalars.loc[:, 'var_name'], '.backward|.forward'
)
return oemoflex_scalars
def map_to_flexmex_results(oemoflex_scalars, flexmex_scalars_template, mapping, scenario):
mapping = mapping.set_index('Parameter')
flexmex_scalars = flexmex_scalars_template.copy()
oemoflex_scalars = oemoflex_scalars.set_index(['region', 'carrier', 'tech', 'var_name'])
oemoflex_scalars.loc[oemoflex_scalars['var_unit'] == 'MWh', 'var_value'] *= 1e-3 # MWh to GWh
for i, row in flexmex_scalars.loc[flexmex_scalars['UseCase'] == scenario].iterrows():
try:
select = mapping.loc[row['Parameter'], :]
except KeyError:
continue
try:
value = oemoflex_scalars.loc[
(row['Region'],
select['carrier'],
select['tech'],
select['var_name']), 'var_value']
except KeyError:
logging.info(
f"No key "
f"{(row['Region'], select['carrier'], select['tech'], select['var_name'])}"
f"found to be mapped to FlexMex."
)
continue
if isinstance(value, float):
flexmex_scalars.loc[i, 'Value'] = np.around(value)
flexmex_scalars.loc[:, 'Modell'] = 'oemof'
return flexmex_scalars
def get_varom_cost(oemoflex_scalars, prep_elements):
r"""
Calculates the VarOM cost by multiplying consumption by marginal cost.
Which value is taken as consumption depends on the actual technology type.
Parameters
----------
oemoflex_scalars
prep_elements
Returns
-------
"""
varom_cost = []
for prep_el in prep_elements.values():
if 'marginal_cost' in prep_el.columns:
df = prep_el[basic_columns]
if prep_el['type'][0] == 'excess':
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_in']
elif prep_el['type'][0] in ['backpressure', 'extraction']:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_electricity']
elif prep_el['type'][0] in ['link', 'electrical line']:
net_flows = ['flow_net_forward', 'flow_net_backward']
flow = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(net_flows)]
flow = flow.groupby(basic_columns, as_index=False).sum()
else:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_out']
df = pd.merge(
df, flow,
on=basic_columns
)
df['var_value'] = df['var_value'] * prep_el['marginal_cost']
df['var_name'] = 'cost_varom'
varom_cost.append(df)
varom_cost = pd.concat(varom_cost)
varom_cost['var_unit'] = 'Eur'
return varom_cost
def get_carrier_cost(oemoflex_scalars, prep_elements):
carrier_cost = []
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
df = prep_el[basic_columns]
if prep_el['type'][0] in ['backpressure', 'extraction']:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_fuel']
else:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_in']
df = pd.merge(
df, flow,
on=basic_columns
)
df['var_value'] = df['var_value'] * prep_el['carrier_cost']
df['var_name'] = 'cost_carrier'
carrier_cost.append(df)
if carrier_cost:
carrier_cost = pd.concat(carrier_cost)
else:
carrier_cost = pd.DataFrame(carrier_cost)
carrier_cost['var_unit'] = 'Eur'
return carrier_cost
def get_fuel_cost(oemoflex_scalars, prep_elements, scalars_raw):
r"""
Re-calculates the fuel costs from the carrier costs if there are CO2 emissions.
Bypass for non-emission carriers (cost_carrier = cost_fuel).
Having emissions or not is determined by the parameter mapping dict (emission_factor).
TODO Let's think about using the 'flow' values as input because this way we could
generalize the structure with get_varom_cost() and get_emission_cost() into one function
for all 'flow'-derived values.
Parameters
----------
oemoflex_scalars
prep_elements
scalars_raw
Returns
-------
"""
fuel_cost = pd.DataFrame()
# Iterate over oemof.tabular components (technologies)
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
# Set up a list of the current technology's elements
df = prep_el.loc[:, basic_columns]
# Select carriers from the parameter map
carrier_name = prep_el['carrier'][0]
parameters = FlexMex_Parameter_Map['carrier'][carrier_name]
# Only re-calculate if there is a CO2 emission
if 'emission_factor' in parameters.keys():
price_carrier = get_parameter_values(scalars_raw, parameters['carrier_price'])
price_emission = get_parameter_values(scalars_raw, parameters['co2_price'])\
* get_parameter_values(scalars_raw, parameters['emission_factor'])
factor = price_carrier / (price_carrier + price_emission)
# Otherwise take the carrier cost value for the fuel cost
else:
factor = 1.0
df = get_calculated_parameters(df, oemoflex_scalars, 'cost_carrier', factor)
# Update other columns
df['var_name'] = 'cost_fuel'
df['var_unit'] = 'Eur'
# Append current technology elements to the return DataFrame
fuel_cost = pd.concat([fuel_cost, df])
return fuel_cost
def get_emission_cost(oemoflex_scalars, prep_elements, scalars_raw):
r"""
Re-calculates the emission costs from the carrier costs if there are CO2 emissions.
Structure only slightly different (+ else branch) from get_fuel_cost() because there are costs
of zero instead of the fuel costs (in get_fuel_cost()) if there are no emissions.
Parameters
----------
oemoflex_scalars
prep_elements
scalars_raw
Returns
-------
"""
emission_cost = pd.DataFrame()
# Iterate over oemof.tabular components (technologies)
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
# Set up a list of the current technology's elements
df = prep_el.loc[:, basic_columns]
# Select carriers from the parameter map
carrier_name = prep_el['carrier'][0]
parameters = FlexMex_Parameter_Map['carrier'][carrier_name]
# Only re-calculate if there is a CO2 emission
if 'emission_factor' in parameters.keys():
price_carrier = get_parameter_values(scalars_raw, parameters['carrier_price'])
price_emission = get_parameter_values(scalars_raw, parameters['co2_price']) \
* get_parameter_values(scalars_raw, parameters['emission_factor'])
factor = price_emission / (price_carrier + price_emission)
df = get_calculated_parameters(df, oemoflex_scalars, 'cost_carrier', factor)
else:
df['var_value'] = 0.0
# Update other columns
df['var_name'] = 'cost_emission'
df['var_unit'] = 'Eur'
# Append current technology elements to the return DataFrame
emission_cost = pd.concat([emission_cost, df])
return emission_cost
def get_calculated_parameters(df, oemoflex_scalars, parameter_name, factor):
r"""
Takes the pre-calculated parameter 'parameter_name' from
'oemoflex_scalars' DataFrame and returns it multiplied by 'factor' (element-wise)
with 'df' as a template
Parameters
----------
df
output template DataFrame
oemoflex_scalars
DataFrame with pre-calculated parameters
parameter_name
parameter to manipulate
factor
factor to multiply parameter with
Returns
-------
"""
calculated_parameters = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == parameter_name].copy()
if calculated_parameters.empty:
logging.info("No key '{}' found as input"
"for postprocessing calculation.".format(parameter_name))
# Make sure that values in columns to merge on are strings
# See here:
# https://stackoverflow.com/questions/39582984/pandas-merging-on-string-columns-not-working-bug
calculated_parameters[basic_columns] = calculated_parameters[basic_columns].astype(str)
df = pd.merge(
df, calculated_parameters,
on=basic_columns
)
df['var_value'] = df['var_value'] * factor
return df
def get_invest_cost(oemoflex_scalars, prep_elements, scalars_raw):
invest_cost = pd.DataFrame()
for prep_el in prep_elements.values():
# In the following line: Not 'is'! pandas overloads operators!
if 'expandable' in prep_el.columns and prep_el['expandable'][0] == True: # noqa: E712, E501 # pylint: disable=C0121
# element is expandable --> 'invest' values exist
df = prep_el[basic_columns]
tech_name = prep_el['tech'][0]
parameters = FlexMex_Parameter_Map['tech'][tech_name]
interest = get_parameter_values(
scalars_raw,
'EnergyConversion_InterestRate_ALL') * 1e-2 # percent -> 0...1
# Special treatment for storages
if tech_name in ['h2_cavern', 'liion_battery']:
# Charge device
capex = get_parameter_values(scalars_raw, parameters['charge_capex'])
lifetime = get_parameter_values(scalars_raw, parameters['charge_lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df_charge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_charge_invest',
annualized_cost)
# Discharge device
capex = get_parameter_values(scalars_raw, parameters['discharge_capex'])
lifetime = get_parameter_values(scalars_raw, parameters['discharge_lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df_discharge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_discharge_invest',
annualized_cost)
# Storage cavern
capex = get_parameter_values(scalars_raw,
parameters['storage_capex']) * 1e-3 # €/MWh -> €/GWh
lifetime = get_parameter_values(scalars_raw, parameters['storage_lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df_storage = get_calculated_parameters(df, oemoflex_scalars,
'storage_capacity_invest',
annualized_cost)
df = pd.concat([df_charge, df_discharge, df_storage])
# Sum the 3 amounts per storage, keep indexes as columns
df = df.groupby(by=basic_columns, as_index=False).sum()
else:
capex = get_parameter_values(scalars_raw, parameters['capex'])
lifetime = get_parameter_values(scalars_raw, parameters['lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df = get_calculated_parameters(df, oemoflex_scalars, 'invest', annualized_cost)
df['var_name'] = 'cost_invest'
df['var_unit'] = 'Eur'
invest_cost = pd.concat([invest_cost, df])
return invest_cost
def get_fixom_cost(oemoflex_scalars, prep_elements, scalars_raw):
fixom_cost = pd.DataFrame()
for prep_el in prep_elements.values():
# not 'is'! pandas overloads operators!
if 'expandable' in prep_el.columns and prep_el['expandable'][0] == True: # noqa: E712, E501 # pylint: disable=C0121
# element is expandable --> 'invest' values exist
df = prep_el[basic_columns]
tech_name = prep_el['tech'][0]
parameters = FlexMex_Parameter_Map['tech'][tech_name]
# Special treatment for storages
if tech_name in ['h2_cavern', 'liion_battery']:
# One fix cost factor for all sub-components
fix_cost_factor = get_parameter_values(
scalars_raw, parameters['fixom']) * 1e-2 # percent -> 0...1
# Charge device
capex = get_parameter_values(scalars_raw, parameters['charge_capex'])
df_charge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_charge_invest',
fix_cost_factor * capex)
# Discharge device
capex = get_parameter_values(scalars_raw, parameters['discharge_capex'])
df_discharge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_discharge_invest',
fix_cost_factor * capex)
# Storage cavern
capex = get_parameter_values(scalars_raw,
parameters['storage_capex']) * 1e-3 # €/MWh -> €/GWh
df_storage = get_calculated_parameters(df, oemoflex_scalars,
'storage_capacity_invest',
fix_cost_factor * capex)
df = pd.concat([df_charge, df_discharge, df_storage])
# Sum the 3 amounts per storage, keep indexes as columns
df = df.groupby(by=basic_columns, as_index=False).sum()
else:
capex = get_parameter_values(scalars_raw, parameters['capex'])
fix_cost_factor = get_parameter_values(
scalars_raw, parameters['fixom']) * 1e-2 # percent -> 0...1
df = get_calculated_parameters(df, oemoflex_scalars,
'invest',
fix_cost_factor * capex)
df['var_name'] = 'cost_fixom'
df['var_unit'] = 'Eur'
fixom_cost = pd.concat([fixom_cost, df])
return fixom_cost
def aggregate_by_country(df):
if not df.empty:
aggregated = df.groupby(['region', 'var_name', 'var_unit']).sum()
aggregated['name'] = 'energysystem'
aggregated['carrier'] = 'ALL'
aggregated['tech'] = 'ALL'
aggregated['type'] = 'ALL'
aggregated = aggregated.reset_index()
return aggregated
return None
def get_total_system_cost(oemoflex_scalars):
cost_list = ['cost_varom', 'cost_fuel', 'cost_invest', 'cost_emission']
df = oemoflex_scalars.loc[oemoflex_scalars['var_name'].isin(cost_list)]
total_system_cost = pd.DataFrame(columns=oemoflex_scalars.columns)
total_system_cost.loc[0, 'var_name'] = 'total_system_cost'
total_system_cost.loc[0, 'var_value'] = df['var_value'].sum()
total_system_cost['carrier'] = 'ALL'
total_system_cost['tech'] = 'ALL'
total_system_cost['region'] = 'ALL'
total_system_cost['var_unit'] = 'Eur'
return total_system_cost
def save_flexmex_timeseries(sequences_by_tech, scenario, model, year, dir):
for carrier_tech in sequences_by_tech.columns.unique(level='carrier_tech'):
try:
components_paths = map_output_timeseries[carrier_tech]
except KeyError:
logging.info(f"No entry found in {path_map_output_timeseries} for '{carrier_tech}'.")
continue
idx = pd.IndexSlice
for var_name, subdir in components_paths.items():
df_var_value = sequences_by_tech.loc[:, idx[:, carrier_tech, var_name]]
for region in df_var_value.columns.get_level_values('region'):
filename = os.path.join(
dir,
subdir,
'_'.join([scenario, model, region, year]) + '.csv'
)
single_column = df_var_value.loc[:, region]
single_column = single_column.reset_index(drop=True)
single_column.columns = single_column.columns.droplevel('carrier_tech')
remaining_column_name = list(single_column)[0]
single_column.rename(columns={remaining_column_name: 'value'}, inplace=True)
single_column.index.name = 'timeindex'
single_column.to_csv(filename, header=True)
delete_empty_subdirs(dir)
def sum_transmission_flows(sequences_by_tech):
idx = pd.IndexSlice
try:
flow_net_fw = sequences_by_tech. \
loc[:, idx[:, 'electricity-transmission', 'flow_net_forward']]
flow_net_bw = sequences_by_tech. \
loc[:, idx[:, 'electricity-transmission', 'flow_net_backward']]
except KeyError:
return None
flow_net_fw = flow_net_fw.rename(columns={'flow_net_forward': 'flow_net_sum'})
flow_net_bw = flow_net_bw.rename(columns={'flow_net_backward': 'flow_net_sum'})
flow_net_sum = flow_net_fw - flow_net_bw
return flow_net_sum
def aggregate_re_generation_timeseries(sequences_by_tech):
idx = pd.IndexSlice
# Sum flow_out sequences from renewable energies
renewable_techs = ['wind-offshore', 'wind-onshore', 'solar-pv']
df_renewable = sequences_by_tech.loc[:, idx[:, renewable_techs, 'flow_out']]
df_renewable_sum = df_renewable.groupby(['region'], axis=1).sum()
df_renewable_sum.columns = pd.MultiIndex.from_product(
[list(df_renewable_sum.columns), ['energysystem'], ['re_generation']],
names=['region', 'carrier_tech', 'var_name']
)
# Substract Curtailment
df_curtailment = sequences_by_tech.loc[:, (slice(None), 'electricity-curtailment')]
df_curtailment.columns = df_renewable_sum.columns
df_re_generation = df_renewable_sum.sub(df_curtailment, axis=0)
return df_re_generation
def export_bus_sequences(es, destination):
if not os.path.exists(destination):
os.mkdir(destination)
bus_results = pp.bus_results(es, es.results)
for key, value in bus_results.items():
if value.empty:
continue
file_path = os.path.join(destination, key + '.csv')
value.to_csv(file_path)
def log_solver_time_to_file(meta_results, path):
r"""Log solver time from oemof.outputlib.processing.meta_results() to a log file in 'path'"""
sys_time = meta_results['solver']['System time'] # equals 'Total time (CPU seconds)' in stdout
wc_time = meta_results['solver']['Wallclock time']
user_time = meta_results['solver']['User time'] # Always -1 so far
time = meta_results['solver']['Time'] # Not clear what this means
output_path = os.path.join(path, 'solver_time.csv')
df = pd.DataFrame(
{'system_time': [sys_time],
'wallclock_time': [wc_time],
'user_time': [user_time],
'time': [time],
})
df.to_csv(output_path, index=False)
def log_problem_metrics_to_file(meta_results, path):
r"""Log a number of solver metrics from oemof.outputlib.processing.meta_results()
to a log file in 'path'"""
no_of_constraints = meta_results['problem']['Number of constraints']
no_of_vars = meta_results['problem']['Number of variables']
no_of_nonzeros = meta_results['problem']['Number of nonzeros']
output_path = os.path.join(path, 'problem_metrics.csv')
df = pd.DataFrame(
{'constraints': [no_of_constraints],
'vars': [no_of_vars],
'nonzeros': [no_of_nonzeros],
})
df.to_csv(output_path, index=False)
def run_postprocessing(scenario_specs, exp_paths):
create_postprocessed_results_subdirs(exp_paths.results_postprocessed)
# load raw data
scalars_raw = load_scalar_input_data(scenario_specs, exp_paths.data_raw)
# load scalars templates
flexmex_scalars_template = pd.read_csv(os.path.join(exp_paths.results_template, 'Scalars.csv'))
flexmex_scalars_template = flexmex_scalars_template.loc[
flexmex_scalars_template['UseCase'] == scenario_specs['scenario']
]
# load mapping
mapping = pd.read_csv(os.path.join(path_mappings, 'mapping-output-scalars.csv'))
# Load preprocessed elements
prep_elements = load_elements(os.path.join(exp_paths.data_preprocessed, 'data', 'elements'))
# restore EnergySystem with results
es = EnergySystem()
es.restore(exp_paths.results_optimization)
log_solver_time_to_file(es.meta_results, exp_paths.logging_path)
log_problem_metrics_to_file(es.meta_results, exp_paths.logging_path)
# format results sequences
sequences_by_tech = get_sequences_by_tech(es.results)
flow_net_sum = sum_transmission_flows(sequences_by_tech)
sequences_by_tech = pd.concat([sequences_by_tech, flow_net_sum], axis=1)
df_re_generation = aggregate_re_generation_timeseries(sequences_by_tech)
sequences_by_tech = pd.concat([sequences_by_tech, df_re_generation], axis=1)
oemoflex_scalars = pd.DataFrame(
columns=[
'region',
'name',
'type',
'carrier',
'tech',
'var_name',
'var_value',
'var_unit'
]
)
# then sum the flows
summed_sequences = get_summed_sequences(sequences_by_tech, prep_elements)
oemoflex_scalars = | pd.concat([oemoflex_scalars, summed_sequences]) | pandas.concat |
import pandas as pd
from src.Utils.Graphs import *
from src.Utils.Fitness import *
from os import path,mkdir
class Performances:
def __init__(self,algorithmList,criterionList,nbItem,objectiveNames=[]):
self.algorithmList = algorithmList
self.criterionList = criterionList
self.objectiveNames = objectiveNames
self.leaderBoard = np.zeros((len(algorithmList),len(objectiveNames)),dtype=float)
self.nbItem = nbItem
self.Init()
def InitScores(self):
self.columnsScores = ['algorithm'] + self.objectiveNames
self.scores = | pd.DataFrame(columns=self.columnsScores) | pandas.DataFrame |
from __future__ import print_function
import unittest
from unittest import mock
import datetime
import six
import warnings
from collections import defaultdict
import pandas as pd
import numpy as np
from . import utils
from .. import test_utils
from dataprofiler.profilers import DateTimeColumn
from dataprofiler.profilers.profiler_options import DateTimeOptions
# This is taken from: https://github.com/rlworkgroup/dowel/pull/36/files
# undo when cpython#4800 is merged.
unittest.case._AssertWarnsContext.__enter__ = test_utils.patched_assert_warns
class TestDateTimeColumnProfiler(unittest.TestCase):
def setUp(self):
utils.set_seed(seed=0)
@staticmethod
def _generate_datetime_data(date_format):
gen_data = []
for i in range(50):
start_date = pd.Timestamp(1950, 7, 14)
end_date = pd.Timestamp(2020, 7, 14)
date_sample = utils.generate_random_date_sample(
start_date, end_date, [date_format]
)
gen_data.append(date_sample)
return pd.Series(gen_data)
def _test_datetime_detection_helper(self, date_formats):
for date_format in date_formats:
# generate a few samples for each date format
gen_data = self._generate_datetime_data(date_format)
# Test to see if the format and col type is detected correctly.
datetime_profile = DateTimeColumn(gen_data.name)
datetime_profile.update(gen_data)
self.assertEqual(date_format, datetime_profile.date_formats[0])
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = DateTimeColumn(data.name)
profiler.update(data)
profiler.update(data) # intentional to validate no changes if empty
self.assertEqual(profiler.match_count, 0)
self.assertIsNone(profiler.min)
self.assertIsNone(profiler.max)
self.assertListEqual([], profiler.date_formats)
self.assertIsNone(profiler.data_type_ratio)
def test_profiled_date_time_formats(self):
"""
Checks whether the profiler properly determines all datetime formats.
:return:
"""
date_formats_1 = [
"%Y-%m-%d %H:%M:%S", # 2013-03-5 15:43:30
"%Y-%m-%dT%H:%M:%S", # 2013-03-6T15:43:30
"%Y-%m-%dT%H:%M:%S.%fZ", # 2013-03-6T15:43:30.123456Z
"%m/%d/%y %H:%M", # 03/10/13 15:43
"%m/%d/%Y %H:%M", # 3/8/2013 15:43
"%Y%m%dT%H%M%S", # 2013036T154330
"%H:%M:%S.%f", # 05:46:30.258509
]
df_1 = pd.Series([], dtype=object)
for date_format in date_formats_1:
# generate a few samples for each date format
df_1 = pd.concat(
[df_1, self._generate_datetime_data(date_format)]
)
date_formats_2 = [
"%Y-%m-%d", # 2013-03-7
"%m/%d/%Y", # 3/8/2013
"%m/%d/%y", # 03/10/13
"%B %d, %Y", # March 9, 2013
"%b %d, %Y", # Mar 11, 2013
"%d%b%y", # 12Mar13
"%b-%d-%y", # Mar-13-13
"%m%d%Y", # 03142013
]
df_2 = pd.Series([], dtype=object)
for date_format in date_formats_2:
# generate a few samples for each date format
df_2 = pd.concat(
[df_2, self._generate_datetime_data(date_format)]
)
date_formats_all = date_formats_1 + date_formats_2
df_all = pd.concat([df_1, df_2])
datetime_profile = DateTimeColumn(df_all.name)
datetime_profile.update(df_all)
six.assertCountEqual(self,
date_formats_all,
set(datetime_profile.date_formats))
# Test chunks
datetime_profile = DateTimeColumn(df_1.name)
datetime_profile.update(df_1)
six.assertCountEqual(self,
date_formats_1,
set(datetime_profile.date_formats))
datetime_profile.update(df_2)
six.assertCountEqual(self,
date_formats_all,
datetime_profile.date_formats)
def test_profiled_min(self):
def date_linspace(start, end, steps):
delta = (end - start) / steps
increments = list(range(0, steps)) * np.array([delta] * steps)
return start + increments
df = pd.core.series.Series(
date_linspace(datetime.datetime.min, datetime.datetime.max, 11)
)
df = df.apply(
lambda x: x - datetime.timedelta(microseconds=x.microsecond)
).apply(str)
datetime_profile = DateTimeColumn(df[1:].name)
datetime_profile.update(df[1:])
self.assertEqual(datetime_profile.min, df.iloc[1])
datetime_profile.update(df)
self.assertEqual(datetime_profile.min, df.iloc[0])
datetime_profile.update(pd.Series([np.nan, df.iloc[3]]))
self.assertEqual(datetime_profile.min, df.iloc[0])
datetime_profile.update(df[1:2]) # only way to keep as df
self.assertEqual(datetime_profile.min, df.iloc[0])
def test_profiled_max(self):
def date_linspace(start, end, steps):
delta = (end - start) / steps
increments = list(range(0, steps)) * np.array([delta] * steps)
return start + increments
df = pd.core.series.Series(
date_linspace(datetime.datetime.min, datetime.datetime.max, 11)
)
df = df.apply(
lambda x: x - datetime.timedelta(microseconds=x.microsecond)
).apply(str)
datetime_profile = DateTimeColumn(df[:-1].name)
datetime_profile.update(df[:-1])
self.assertEqual(datetime_profile.max, df.iloc[-2])
datetime_profile.update(df)
self.assertEqual(datetime_profile.max, df.iloc[-1])
datetime_profile.update(pd.Series([np.nan, df.iloc[3]]))
self.assertEqual(datetime_profile.max, df.iloc[-1])
datetime_profile.update(df[1:2]) # only way to keep as df
self.assertEqual(datetime_profile.max, df.iloc[-1])
def test_date_time_detection(self):
"""
Tests if get_datetime_params is able to detect the date time cols
correctly
:return:
"""
date_formats = [
"%Y-%m-%d %H:%M:%S", # 2013-03-5 15:43:30
"%Y-%m-%dT%H:%M:%S", # 2013-03-6T15:43:30
"%Y-%m-%dT%H:%M:%S.%fZ", # 2013-03-6T15:43:30.123456Z
"%m/%d/%y %H:%M", # 03/10/13 15:43
"%m/%d/%Y %H:%M", # 3/8/2013 15:43
"%Y%m%dT%H%M%S", # 2013036T154330
"%H:%M:%S.%f" # 05:46:30.258509
]
self._test_datetime_detection_helper(date_formats)
def test_date_time_detection_without_time(self):
"""
Tests if get_datetime_params is able to detect the date cols correctly
:return:
"""
date_formats = [
"%Y-%m-%d", # 2013-03-7
"%m/%d/%Y", # 3/8/2013
"%m/%d/%y", # 03/10/13
"%B %d, %Y", # March 9, 2013
"%b %d, %Y", # Mar 11, 2013
"%d%b%y", # 12Mar13
"%b-%d-%y", # Mar-13-13
"%m%d%Y", # 03142013
]
self._test_datetime_detection_helper(date_formats)
def test_data_ratio(self):
data = [
2.5, 12.5, '2013-03-5 15:43:30', 5, '03/10/13 15:43', 'Mar 11, 2013'
]
df = pd.Series(data).apply(str)
profiler = DateTimeColumn(df.name)
self.assertEqual(profiler.data_type_ratio, None)
profiler.update(df)
self.assertEqual(profiler.data_type_ratio, 0.5)
profiler.update(pd.Series([None, '10/20/13', 'nan']))
self.assertEqual(profiler.data_type_ratio, 4/9.0)
def test_profile(self):
data = [
2.5, 12.5, '2013-03-10 15:43:30', 5, '03/10/13 15:43',
'Mar 11, 2013'
]
df = pd.Series(data).apply(str)
profiler = DateTimeColumn(df.name)
expected_profile = dict(
min='03/10/13 15:43',
max='Mar 11, 2013',
histogram=None,
format=[
'%Y-%m-%d %H:%M:%S',
"%m/%d/%y %H:%M",
"%b %d, %Y",
],
times=defaultdict(float, {'datetime': 1.0})
)
time_array = [float(i) for i in range(4, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), profiler.profile['times'])
# Validate the time in the datetime class has the expected time.
profiler.update(df)
expected = defaultdict(float, {'datetime': 1.0})
self.assertEqual(expected, profiler.profile['times'])
profile = profiler.profile
self.assertCountEqual(expected_profile, profile)
# Validate time in datetime class has expected time after second
# update
profiler.update(df)
expected = defaultdict(float, {'datetime': 2.0})
self.assertEqual(expected, profiler.profile['times'])
def test_warning_for_bad_dates(self):
df = pd.Series(['03/10/2013 15:43'])
profiler = DateTimeColumn(df.name)
with warnings.catch_warnings(record=True) as w:
profiler.update(df)
self.assertEqual(len(w), 0)
df = pd.Series(['03/10/13 15:43'])
with self.assertWarns(RuntimeWarning) as r_warning:
profiler.update(df)
self.assertEqual(
str(r_warning.warning),
"Years provided were in two digit format. As a result, "
"datetime assumes dates < 69 are for 2000s and above "
"are for the 1990s. "
"https://stackoverflow.com/questions/37766353/"
"pandas-to-datetime-parsing-wrong-year"
)
def test_add(self):
# unique format for the first profile
data1 = [
"2013-03-5 15:43:30",
"2013-03-6T15:43:30",
"2013-03-6T15:43:30.123456Z",
"03/10/2013 15:43",
"3/8/2013 15:43",
"%2013036T154330",
"05:46:30.258509",
]
df = pd.Series(data1).apply(str)
profile1 = DateTimeColumn(df.name)
profile1.update(df)
# unique format for second profile
data2 = [
2.5, 12.5, '2013-03-10 15:23:20', 5, '03/10/2013 15:23',
'Mar 12, 2013'
]
df = pd.Series(data2).apply(str)
profile2 = DateTimeColumn(df.name)
profile2.update(df)
merged_profile = profile1 + profile2
# checks for _dt_objs
min_dt_obj = datetime.datetime.strptime('05:46:30.258509',
'%H:%M:%S.%f')
max_dt_obj = datetime.datetime.strptime('2013-03-12', '%Y-%m-%d')
self.assertEqual(min_dt_obj, merged_profile._dt_obj_min)
self.assertEqual(max_dt_obj, merged_profile._dt_obj_max)
# checks for the proper max and min to be merged
self.assertEqual('05:46:30.258509', merged_profile.min)
self.assertEqual('Mar 12, 2013', merged_profile.max)
# checks for date format merge
self.assertCountEqual(
['%Y-%m-%d %H:%M:%S', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%fZ',
'%m/%d/%Y %H:%M', '%H:%M:%S.%f', '%b %d, %Y'],
merged_profile.date_formats)
# Checks for DateTimeColumn type for argument
with self.assertRaises(TypeError) as exc:
profile2 = "example_string"
profile1 + profile2
self.assertEqual(str(exc.exception),
"Unsupported operand type(s) for +: "
"'DateTimeColumn' and '{}'"
.format(profile2.__class__.__name__))
def test_null_add(self):
# initialize the profiles
dates = [None, "2014-12-18", "2015-07-21"]
df = pd.Series(dates)
df_nulls = df[:1]
df_dates = df[1:]
profile1 = DateTimeColumn(name="date")
profile2 = DateTimeColumn(name="date")
profile1.update(df_nulls)
profile2.update(df_dates)
# test when first profile has the nulls
merged_profile = profile1 + profile2
self.assertEqual("2014-12-18", merged_profile.min)
self.assertEqual("2015-07-21", merged_profile.max)
# test when second profile has the nulls
merged_profile = profile2 + profile1
self.assertEqual("2014-12-18", merged_profile.min)
self.assertEqual("2015-07-21", merged_profile.max)
def test_datetime_column_with_wrong_options(self):
with self.assertRaisesRegex(ValueError,
"DateTimeColumn parameter 'options' must be"
" of type DateTimeOptions."):
profiler = DateTimeColumn("Datetime", options="wrong_data_type")
def test_diff(self):
data1 = [None, 'Mar 12, 2013', "2013-05-18", "2014-03-01"]
df1 = | pd.Series(data1) | pandas.Series |
import pandas as pd
from tqdm import tqdm
import requests
import numpy as np
import sys
from typing import *
from time import sleep
class _UniProtClient:
def __init__(self, base_url):
self._base_url = base_url
@staticmethod
def _query(query_string) -> str:
for i in range(10):
try:
response = requests.get(query_string)
return response.text
except ConnectionResetError:
sleep(i*10)
@staticmethod
def _response2dictlist(response_string) -> List[dict]:
header_row = response_string.split("\n")[0]
header_items = header_row.split("\t")
r_dict_list = []
for line in response_string.split("\n")[1:]:
if not line:
continue
line_items = line.split("\t")
assert len(header_items) == len(line_items), (header_items, line_items)
r_dict_list.append(dict(zip(header_items, line_items)))
return r_dict_list
@staticmethod
def _chunkwise(iterables, chunk_size):
for i in range(0, len(iterables), chunk_size):
chunk = iterables[i:i + chunk_size]
yield chunk
class UniProtMapper(_UniProtClient):
def __init__(self, from_id: str, to_id: str):
"""For mapping of protein IDs to another ID. Uses UniProt API.
for valid parameters see: https://www.uniprot.org/help/api_idmapping
:param to_id:
Parameters
----------
from_id: origin ID string
to_id: target ID string
Examples
________
gi2uniprotmapping = UniProtMapper("P_GI", "ACC") # This class mapps form GI-number to Uniprot IDs
"""
super().__init__("https://www.uniprot.org/uploadlists/")
self._from_id = from_id
self._to_id = to_id
self._data_format = "tab"
def map_protein_ids(self, protein_list: List[str], chunk_size: int = 500) -> pd.DataFrame:
final_dict_list = []
pbar = tqdm(total=len(protein_list))
try:
for chunk in self._chunkwise(protein_list, chunk_size):
chunklist = "+".join(chunk)
server_query = f"?from={self._from_id}&to={self._to_id}&format={self._data_format}&query={chunklist}"
req = "".join([self._base_url, server_query])
server_response = self._query(req)
server_response_formatted = self._response2dictlist(server_response)
final_dict_list.extend(server_response_formatted)
pbar.update(len(chunk))
finally:
pbar.close()
valid_mappings = pd.DataFrame(final_dict_list)
invalid_ids = set(protein_list) - set(valid_mappings["From"].unique())
invalid_mapping = pd.DataFrame()
invalid_mapping["From"] = sorted(invalid_ids)
invalid_mapping["To"] = np.nan
return pd.concat([valid_mappings, invalid_mapping])
def simple_name_from(long_name):
""" Extracts primary name from uniprot string containing all names.
Additional names are given in brackets and parantheses
"""
out = []
buffer = []
in_bracket = 0
in_square_bracket = 0
for letter in long_name:
if letter == "(":
in_bracket += 1
buffer.append(letter)
elif letter == ")":
in_bracket -= 1
buffer.append(letter)
elif letter == "[":
in_square_bracket += 1
buffer.append(letter)
elif letter == "]":
in_square_bracket -= 1
buffer.append(letter)
else:
# If not in bracket
if in_bracket == 0 and in_square_bracket == 0:
if letter == " ":
buffer.append(letter)
elif buffer:
out.extend(buffer)
buffer = []
out.append(letter)
else:
out.append(letter)
else:
buffer.append(letter)
assert in_bracket == 0
assert in_square_bracket == 0
return "".join(out)
class UniProtProteinInfo(_UniProtClient):
""" A class for information retrieval about proteins form UniProt.
"""
def __init__(self, column_list: Optional[List[str]] = None):
"""
Parameters
----------
column_list: strings of column identifiers. [1]
References
__________
[1] https://www.uniprot.org/help/uniprotkb%5Fcolumn%5Fnames
"""
super().__init__("https://www.uniprot.org/uniprot/")
if column_list is None:
column_list = ["id", "entry_name", "protein_names", "families", "organism", "ec", "genes(PREFERRED)",
"go(molecular_function)"]
column_list = [self._reformat_column_string(col_id, lower=False) for col_id in column_list]
if "id" not in column_list:
column_list.append("id")
self.columns = ",".join(column_list)
@staticmethod
def _reformat_column_string(column_name: str, lower=True) -> str:
""" A cheap and hacky string formatting procedure replacing spaces with underscores"""
column_name_reformat = column_name
while " " in column_name_reformat:
column_name_reformat = column_name_reformat.replace(" ", " ")
column_name_reformat = column_name_reformat.replace(" (", "(")
column_name_reformat = column_name_reformat.replace(" )", ")")
column_name_reformat = column_name_reformat.replace(" ", "_")
if lower:
column_name_reformat = column_name_reformat.lower()
return column_name_reformat
def load_protein_info(self, protein_list: List[str], chunk_size: int = 200, sleeptime=0) -> pd.DataFrame:
final_dict_list = []
with tqdm(total=len(protein_list)) as p_bar:
for protein_chunk in self._chunkwise(protein_list, chunk_size):
joined_proteins = "+OR+accession:".join(protein_chunk)
server_query = f"?query=accession:{joined_proteins}&format=tab&columns={self.columns}"
req = "".join([self._base_url, server_query])
server_response = self._query(req)
server_response_formatted = self._response2dictlist(server_response)
final_dict_list.extend(server_response_formatted)
p_bar.update(len(protein_chunk))
sleep(sleeptime)
valid_entry_df = | pd.DataFrame(final_dict_list) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
notna,
)
import pandas._testing as tm
def test_expanding_corr(series):
A = series.dropna()
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(series):
result = series.expanding(min_periods=0).count()
tm.assert_almost_equal(
result, series.rolling(window=len(series), min_periods=0).count()
)
def test_expanding_quantile(series):
result = series.expanding().quantile(0.5)
rolling_result = series.rolling(window=len(series), min_periods=1).quantile(0.5)
| tm.assert_almost_equal(result, rolling_result) | pandas._testing.assert_almost_equal |
# -*- coding: utf-8 -*-
import boto3
from datetime import datetime
from urllib.request import urlopen
from io import StringIO, BytesIO
import io
import numpy as np
import pandas as pd
import plotly.express as px
import requests
import xml.etree.ElementTree as ET
import time
from zipfile import ZipFile
s3 = boto3.resource('s3')
# historical data and other variables
orig_col_list = ['DEM', 'OTHER', 'REP', 'NET_DEM', 'WINNER']
# get florida data
fl_hist = 'https://en2020.s3.amazonaws.com/florida_hist.csv'
fl_hist_df = pd.read_csv(fl_hist, index_col=0)
# get PA data
penn_hist = 'https://en2020.s3.amazonaws.com/penn_hist.csv'
penn_hist_df = pd.read_csv(penn_hist, index_col=0)
# get MI data
mich_hist = 'https://en2020.s3.amazonaws.com/mich_hist.csv'
mich_hist_df = pd.read_csv(mich_hist, index_col=0)
# get NC data
ncar_hist = 'https://en2020.s3.amazonaws.com/ncar_hist.csv'
ncar_hist_df = pd.read_csv(ncar_hist, index_col=0)
# update FL url on election night
url_FL_live = "https://flelectionfiles.floridados.gov/enightfilespublic/20201103_ElecResultsFL.txt"
url_FL = 'https://en2020.s3.amazonaws.com/FL_data_live.txt'
# https://www.electionreturns.pa.gov/ElectionFeed/ElectionFeed
url_PA_live = 'https://electionreturns.pa.gov/electionFeed.aspx?ID=29&FeedName=2020+General+Election+by+County'
url_PA = 'https://en2020.s3.amazonaws.com/PA_data_live.xml'
# https://www.michigan.gov/sos/0,4670,7-127-1633_8722---,00.html
url_MI_live = 'https://mielections.us/election/results/DATA/2016GEN_MI_CENR_BY_COUNTY.xls'
url_MI = 'https://en2020.s3.amazonaws.com/MI_data_live.txt'
# no need to update NC - this is it!!
url_NC_live = 'http://dl.ncsbe.gov/ENRS/2020_11_03/results_pct_20201103.zip'
url_NC = 'https://en2020.s3.amazonaws.com/NC_data_live.txt'
request_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"}
# function strips white space from downloaded text file
def read_csv_regex(data, date_columns=[]):
df = pd.read_csv(data, sep="\t", quotechar='"', parse_dates=date_columns)
# remove front and ending blank spaces
df = df.replace({"^\s*|\s*$":""}, regex=True)
# if there remained only empty string "", change to Nan
df = df.replace({"":np.nan})
return df
def county_winner_EN(row):
if row['Proj_Dem_Vote_Total'] > row['Proj_Rep_Vote_Total']:
val = 'Biden'
else:
val = 'Trump'
return val
# clean and condense historical vote data
def condense_third_parties(row):
if row['PartyCode'] == 'REP':
val = 'REP'
elif row['PartyCode'] == 'DEM':
val = 'DEM'
else:
val = 'OTHER'
return val
def county_winner(row):
if row['DEM'] > row['REP']:
val = 1
else:
val = 2
return val
def rename_columns(cols, suffix):
new = []
for i in range(len(cols)):
new.append(cols[i] + suffix)
# print(cols[i])
return new
def update_FL(input_url):
resp = requests.get(input_url, headers=request_header)
with open('/tmp/FL_data_live.txt', 'wb') as f:
f.write(resp.content)
s3.Bucket('en2020').put_object(Key='FL_data_live.txt', Body=resp.content, ACL='public-read')
election_night_data_FL = read_csv_regex(url_FL, ['ElectionDate'])
# write FL data from election feed to s3
# csv_buffer_FL_live = StringIO()
# election_night_data_FL.to_csv(csv_buffer_FL_live)
# s3.Object('en2020', 'FL_data_live.txt').put(Body=csv_buffer_FL_live.getvalue(), ACL='public-read')
return election_night_data_FL
def update_MI(input_url):
resp = requests.get(input_url, headers=request_header)
# first write the MI data to s3, then read the file that was just written
# different than FL method since the data feed posts a XLS file that is actually a text file, which is weird
# so this needs to write the text somewhere (similar to how we handle XML in PA) then read it
with open('MI_data_live.txt', 'wb') as f:
f.write(resp.content)
s3.Bucket('en2020').put_object(Key='MI_data_live.txt', Body=resp.content, ACL='public-read')
election_night_data_MI = read_csv_regex(url_MI, ['ElectionDate'])
# write FL data from election feed to s3
# csv_buffer_FL_live = StringIO()
# election_night_data_FL.to_csv(csv_buffer_FL_live)
# s3.Object('en2020', 'FL_data_live.txt').put(Body=csv_buffer_FL_live.getvalue(), ACL='public-read')
return election_night_data_MI
def update_NC(input_url):
resp = requests.get(input_url, headers=request_header)
NC_filename = 'results_pct_20201103.txt'
buffer = ZipFile(BytesIO(resp.content))
s3.meta.client.upload_fileobj(
buffer.open(NC_filename),
Bucket='en2020',
Key='NC_data_live.txt',
ExtraArgs={'ACL':'public-read'}
)
election_night_data_NC = read_csv_regex(url_NC, ['Election Date'])
return election_night_data_NC
def update_PA(input_url):
resp = requests.get(input_url)
# create archive version of xml file
with open('PA_data_live.xml', 'wb') as f:
f.write(resp.content)
s3.Bucket('en2020').put_object(Key='PA_data_live.xml', Body=resp.content, ACL='public-read')
root = ET.fromstring(resp.content)
# root = tree.getroot()
# create data frames for extracts from XML feed for PA
EN_extract_columns = ['RaceCode','CandidateName','PartyCode','ElectionDayVotes','MailVotes',
'ProvisionalVotes','CanVotes','PctVote','CountyName']
EN_extract_df = | pd.DataFrame(columns=EN_extract_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
tm.assert_frame_equal(df, df2)
# no index to start
expected = DataFrame({0: Series(1, index=range(4))},
columns=['A', 'B', 0])
df = DataFrame(columns=['A', 'B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['A', 'B'])
df.loc[:, 0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
tm.assert_frame_equal(y, expected)
# tm.assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A', 'B', 'C'])
expected = DataFrame(columns=['A', 'B', 'C'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series())
tm.assert_frame_equal(df, DataFrame({0: Series()}))
df = DataFrame(Series(name='foo'))
tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0, index=[0], columns=['a'])
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_consistencies(self):
# GH 6171
# consistency on empty frames
df = DataFrame(columns=['x', 'y'])
df['x'] = [1, 2]
expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=['x', 'y'])
df['x'] = ['1', '2']
expected = DataFrame(
dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df.loc[0, 'x'] = 1
expected = DataFrame(dict(x=[1], y=[np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = tm.makeDataFrame()
df['A'] # cache series
df.ix["Hello Friend"] = df.ix[0]
self.assertIn("Hello Friend", df['A'].index)
self.assertIn("Hello Friend", df['B'].index)
panel = tm.makePanel()
panel.ix[0] # get first item into cache
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
self.assertIn("A+1", panel.ix[0].columns)
self.assertIn("A+1", panel.ix[1].columns)
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=['x', 'y', 'z'])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]['z'].iloc[0] = 1.
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 1)
# correct setting
df.loc[(0, 0), 'z'] = 2
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 2)
# 10264
df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e'], index=range(5))
df['f'] = 0
df.f.values[3] = 1
# TODO(wesm): unused?
# y = df.iloc[np.arange(2, len(df))]
df.f.values[3] = 2
expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
expected.at[3, 'f'] = 2
tm.assert_frame_equal(df, expected)
expected = Series([0, 0, 0, 2, 0], name='f')
tm.assert_series_equal(df.f, expected)
def test_set_ix_out_of_bounds_axis_0(self):
df = pd.DataFrame(
randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_set_ix_out_of_bounds_axis_1(self):
df = pd.DataFrame(
randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
self.assertRaises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_loc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_index_type_coercion(self):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
self.assertTrue(s.index.is_integer())
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(indexer(s2)[0.1] == 0)
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
for s in [Series(range(5), index=np.arange(5.))]:
self.assertTrue(s.index.is_floating())
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
idxr(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(idxr(s2)[0.1] == 0)
s2 = s.copy()
idxr(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
idxr(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_duplicate_ix_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_float_index_at_iat(self):
s = pd.Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in s.iteritems():
self.assertEqual(s.at[el], item)
for i in range(len(s)):
self.assertEqual(s.iat[i], i + 1)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right):
# label, index, slice
r, i, s = list('bcd'), [1, 2, 3], slice(1, 4)
c, j, l = ['joe', 'jolie'], [1, 2], slice(1, 3)
left = df.copy()
left.loc[r, c] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.iloc[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[s, l] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[r, c] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
cols = ['jim', 'joe', 'jolie', 'joline']
df = pd.DataFrame(xs, columns=cols, index=list('abcde'))
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right = df.copy()
right.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right)
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame['joe'] = frame['joe'].astype('float64')
frame['jolie'] = frame['jolie'].map('@{0}'.format)
run_tests(df, rhs, right)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
idx = Index(idx)
s = Series(np.arange(20), index=idx)
assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])
assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
s = Series(np.arange(20), index=_mklbl('A', 20))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]}).set_index('z')
expected = df.copy()
rhs = dict(x=9, y=99)
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})
df2 = df.ix[[], :]
self.assertEqual(df2.loc[:, 'a'].dtype, np.int64)
tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])
tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
# GH 11652
for x in [5, 999999, 1000000]:
s = pd.Series(index=range(x))
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_non_reducing_slice(self):
df = pd.DataFrame([[0, 1], [2, 3]])
slices = [
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
pd.Series([0, 1])
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
self.assertTrue(isinstance(df.loc[tslice_], DataFrame))
def test_list_slice(self):
# like dataframe getitem
slices = [['A'], pd.Series(['A']), np.array(['A'])]
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B'])
expected = pd.IndexSlice[:, ['A']]
for subset in slices:
result = _non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])
def test_maybe_numeric_slice(self):
df = pd.DataFrame({'A': [1, 2], 'B': ['c', 'd'], 'C': [True, False]})
result = _maybe_numeric_slice(df, slice_=None)
expected = pd.IndexSlice[:, ['A']]
self.assertEqual(result, expected)
result = _maybe_numeric_slice(df, None, include_bool=True)
expected = pd.IndexSlice[:, ['A', 'C']]
result = _maybe_numeric_slice(df, [1])
expected = [1]
self.assertEqual(result, expected)
class TestSeriesNoneCoercion(tm.TestCase):
EXPECTED_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_setitem_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = | Series(start_data) | pandas.core.api.Series |
import os
import torch
import onnxruntime as ort
import pandas as pd
import numpy as np
import os
import time
import torch.nn.functional as F
import onnx
import getpass
from transformers import AutoTokenizer
import time
import pyarrow.parquet as pq
from glob import glob
import os
import numpy as np
import argparse
import logging
import socket
import multiprocessing as mp
from functools import partial
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
print('libs loaded')
parser = argparse.ArgumentParser()
parser.add_argument("--input_path", type=str, help="path to input data")
parser.add_argument("--output_path", type=str, help="path where inference csv is saved")
parser.add_argument("--country_code", type=str, help="path where inference csv is saved")
parser.add_argument("--iteration_number", type=int)
args = parser.parse_args()
print(args)
####################################################################################################################################
# HELPER FUNCTIONS
####################################################################################################################################
# inference
def get_tokens(tokens_dict, i):
i_tokens_dict = dict()
for key in ['input_ids', 'token_type_ids', 'attention_mask']:
i_tokens_dict[key] = tokens_dict[key][i]
tokens = {name: np.atleast_2d(value) for name, value in i_tokens_dict.items()}
return tokens
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def run_inference(ort_session, tokens):
ort_outs = ort_session.run(None, tokens)
torch_onnx_output = torch.tensor(ort_outs[0], dtype=torch.float32)
onnx_logits = F.softmax(torch_onnx_output, dim=1)
return onnx_logits.detach().cpu().numpy()[0].tolist()
def inference(onnx_model, model_dir, examples):
quantized_str = ''
if 'quantized' in onnx_model:
quantized_str = 'quantized'
# onnx session
options = ort.SessionOptions()
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
options.intra_op_num_threads = 1
#options.inter_op_num_threads = multiprocessing.cpu_count()
print(onnx_model)
ort_session = ort.InferenceSession(onnx_model, options)
# pytorch pretrained model and tokenizer
if 'bertweet' in onnx_model:
tokenizer = AutoTokenizer.from_pretrained(model_dir, normalization=True)
else:
tokenizer = AutoTokenizer.from_pretrained(model_dir)
tokenizer_str = "TokenizerFast"
print("**************** {} ONNX inference with batch tokenization and with {} tokenizer****************".format(
quantized_str, tokenizer_str))
tokens_dict = tokenizer.batch_encode_plus(examples, max_length=128)
tokens_dict_list = [dict(zip(tokens_dict, t)) for t in zip(*tokens_dict.values())]
with mp.Pool(mp.cpu_count()) as pool:
run_inference_on_tokens = partial(run_inference, ort_session=ort_session)
onnx_inference = pool.map(run_inference_on_tokens, tokens_dict_list)
return onnx_inference
def get_env_var(varname, default):
if os.environ.get(varname) != None:
var = int(os.environ.get(varname))
print(varname, ':', var)
else:
var = default
print(varname, ':', var, '(Default)')
return var
# Choose Number of Nodes To Distribute Credentials: e.g. jobarray=0-4, cpu_per_task=20, credentials = 90 (<100)
SLURM_ARRAY_TASK_ID = get_env_var('SLURM_ARRAY_TASK_ID', 0)
SLURM_ARRAY_TASK_COUNT = get_env_var('SLURM_ARRAY_TASK_COUNT', 1)
SLURM_JOB_ID = get_env_var('SLURM_JOB_ID', 1)
print('Hostname:', socket.gethostname())
print('SLURM_ARRAY_TASK_ID', SLURM_ARRAY_TASK_ID)
print('SLURM_ARRAY_TASK_COUNT', SLURM_ARRAY_TASK_COUNT)
print('Number of CPUs per task:', mp.cpu_count())
# ####################################################################################################################################
# # loading data
# ####################################################################################################################################
path_to_data = args.input_path
print('Load random Tweets:')
start_time = time.time()
paths_to_random = list(np.array_split(
glob(os.path.join(path_to_data, '*.parquet')),
SLURM_ARRAY_TASK_COUNT)[SLURM_ARRAY_TASK_ID])
print('#files:', len(paths_to_random))
tweets_random = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import csv
#Set the variables in Settings.csv before running the script
dataList = []
with open('Settings.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line = 0
for row in csv_reader:
if line == 1:
timeInterval = row[2]
print("Time Interval = " + str(timeInterval))
csvFileName = row[3]
print("csvFileName = " + str(csvFileName))
if line >= 1 and row[0] != '' and row[1] != '':
dataList.append([row[0], row[1]])
line += 1
print(dataList)
dfList = []
dataTypeList = []
df = pd.read_csv(csvFileName + '.csv') #Import CSV
for n in dataList: #Run this for every data type (i.e. flow, depth, vel, rain, etc)
dataTypeList.append(n[1]) #Make a list of the data types (flow, velocity, etc)
df[n[0]] = | pd.to_datetime(df[n[0]]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Thu May 14 12:07:51 2020
@author: <NAME>
"""
# Import required packages
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
# %% Filtering the data based on feature index array
def feature_filter(data, feat_index):
# This function takes your X_train & X_test data and filters out the
# selected features based on the feat_index which is a boolean indexer.
# If only one data set was passed, put it in a list
if not type(data) == list:
data = [data]
for i in range(len(data)):
# Filter appropriately depending on data type
if type(data[i]) == np.ndarray:
data[i] = data[i][:, feat_index]
else:
data[i] = data[i].iloc[:, feat_index]
# If only only data set was passed, only return one data set
if len(data) == 1:
return data[0]
return data
# %% Loading & pre-processing of the data
def init(label = 'DILI1', file = 'p7-mcf7-camda2020.csv', return_validation = False,
upsample = False, downsample = False):
if downsample == True & upsample == True:
raise Exception("Downsample and upsample cannot both be True!")
# Load the requested input data
if file == 'all':
file = ['p3-phh-camda2020.csv',
'p4-hepg2-camda2020.csv',
'p5-haie-camda2020.csv',
'p6-a375-camda2020.csv',
'p7-mcf7-camda2020.csv',
'p8-pc3-camda2020.csv']
if type(file) == str:
# Load one input file
X = pd.read_csv(f'Data/{file}')
elif type(file) == list:
# Load all requested inputs and merge into one dataframe
X = pd.read_csv(f'Data/{file[0]}')
X = X.add_suffix(f'_{file[0][0:2]}')
X = X.rename(columns = {f'CAM_ID_{file[0][0:2]}' : 'CAM_ID'})
for i in range(1, len(file)):
X = pd.merge(X,
pd.read_csv(f'Data/{file[i]}').add_suffix(f'_{file[i][0:2]}'),
left_on = 'CAM_ID',
right_on = f'CAM_ID_{file[i][0:2]}',
validate = 'one_to_one',
sort = False)
# Remove double sample column
X.drop(f'CAM_ID_{file[i][0:2]}', axis = 1, inplace = True)
# Load the labels of the data
Y = pd.read_csv('Data/targets-camda2020.csv')
# Only keep samples for which training data is available.
Y = Y.loc[Y.CAM_ID.isin(X.CAM_ID)]
# Create identical row order for X & Y
X = X.sort_values('CAM_ID').reset_index(drop = True)
Y = Y.sort_values('CAM_ID').reset_index(drop = True)
# Ensure that X and Y have identical CAM_IDs in each row
if not Y.CAM_ID.equals(X.CAM_ID):
raise Exception('X != Y... make sure the rows of X and Y describe ' +
'the same samples!!')
# Seperate the validation sets
Y_val = Y.loc[Y.Training_Validation != 'Training Set']
X_val = X.loc[X.CAM_ID.isin(Y_val.CAM_ID)]
# Set CAM_ID as rowname
X_val.set_index('CAM_ID', inplace = True, verify_integrity = True)
# Remove validation sets from the test and training sets
Y = Y.loc[Y.Training_Validation == 'Training Set']
X = X.loc[X.CAM_ID.isin(Y.CAM_ID)]
# when upsampling remove the CAM_ID column as there will be duplicates
# which can thus not be used as index. Otherwise set CAM_ID as index
if upsample:
X.drop('CAM_ID',axis = 1, inplace = True)
Y.drop('CAM_ID',axis = 1, inplace = True)
else:
# Turn ID column into rownames
X.set_index('CAM_ID', inplace = True, verify_integrity = True)
Y.set_index('CAM_ID', inplace = True, verify_integrity = True)
# Create the test and training set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size = 0.2)
if upsample == True:
# Up-sample the minority class in the data to balance the classes
# Combine X_train and Y_train in one dataframe
df = pd.concat([X_train, Y_train], axis = 1)
# Separate majority and minority classes
df_majority = df[df.DILI1==0]
df_minority = df[df.DILI1==1]
# Upsample the minority class
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=df_majority.shape[0], # to match majority class
random_state=1) # reproducible results
# Combine majority class with upsampled minority class
df_upsampled = | pd.concat([df_majority, df_minority_upsampled]) | pandas.concat |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import csv
from io import StringIO
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.parsers import TextParser
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = {"index_col": 0}
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
with TextParser(data_list, chunksize=2, **kwargs) as parser:
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = | DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Name: csv_text_solr_search.py
# Description:
#
# Author: m.akei
# Copyright: (c) 2021 by m.na.akei
# Time-stamp: <2021-07-30 18:42:37>
# Licence:
# ----------------------------------------------------------------------
import argparse
import fileinput
import textwrap
import sys
from pathlib import Path
import re
import pandas as pd
import json
import pprint
import datetime
from typing import Union, Tuple, List, Dict, Callable, Any, Optional, Type, NoReturn
from textmining_lib import removeSentences, replaceSentences
from csv_text_tfidf import get_words_0, read_extend_word
from csv_text_solr import communicateSlor, trim_sentence
VERSION = 1.0
# EXTENDED_PREFIX = "_ext"
F_indent_text = lambda x: textwrap.fill(x, width=130, initial_indent="\t", subsequent_indent="\t")
def init():
arg_parser = argparse.ArgumentParser(description="",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''
remark:
using '--extend_word_file', bow of sentence is extended with additional words.
this option may be usefull when the configuration of devices is important.
format of 'extend word file' there is 'word word1,word2' in a line.
for searching, see about query paramters:
The Standard Query Parser | Apache Solr Reference Guide 8.9 https://solr.apache.org/guide/8_9/the-standard-query-parser.html
Query format (only for '--search_detail*'):
boosting: word^float
ranging : [ V1 TO V2 ], { V1 TO V2 ]
only for '-search_ex', followings have effects:
'--user_dictionary', '--minmum_length_of_word', '--remove_words', '--remove_pattern', '--replace_pattern_file',and '--extend_word_file'
NOTE: in search keywords, single cuqitation and double quotation have diffrent meanings.
the string enclosed in double quotes is used for the search as a full match.
example:
cat <<EOF > rep_pat.txt
s/《[^》]*》//
s/(?i)GHI/KLM/
EOF
cat <<EOF > tfidf_extend_word.txt
# regexp word1,word2,...
書生 学生,学校
原$ 野原
EOF
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_field=content --search "主人" "学生"
csv_text_solr_search.py --core wagahaiwa_nekodearu --extend_word_file=tfidf_extend_word.txt --search_field=content --search "主人" "学生" \\
--search_ex "よく主人の所へ遊びに来る馬鹿野郎"
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_detail="date_local:\"01-12\""
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_detail="content:主人"
cat <<EOF > search_detail.query
content:"主人が熱心"
content:学生
EOF
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_detail_file=search_detail.query
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_detail='content:"猫"^0.1 content:"吾輩"^2' --search_limit=10
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_detail='date:[2021-12-31T00:00:00Z TO *]' --search_limit=10
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_detail='date:[NOW-1MONTH/DAY TO * ]' --search_limit=10
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_detail="content:書生 content:\+騒々しい"
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_limit=10 --search_detail='content:書生 content:見る'
csv_text_solr_search.py --core wagahaiwa_nekodearu --search_limit=10 --search_detail='content:書生 !content:見る'
'''))
arg_parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(VERSION))
arg_parser.add_argument("--host",
dest="HOST",
help="ip address of host, default=127.0.0.1",
type=str,
metavar='IP_ADDRESS',
default="127.0.0.1")
arg_parser.add_argument("--port", dest="PORT", help="port number, default=8983", type=int, metavar='PORT', default=8983)
arg_parser.add_argument("--core", dest="CORE", help="name of core of solr", type=str, metavar='NAME', required=True)
arg_parser.add_argument("--extend_columns",
dest="EX_COLUMN",
help="decompose sentence and extend words",
type=str,
metavar="FIELD[,FIELD..]",
default=None)
arg_parser.add_argument("--user_dictionary", dest="UDICT", help="path of user dictionary", type=str, metavar='FILE', default="")
arg_parser.add_argument("--minmum_length_of_word",
dest="MLENGTH",
help="minimum length of word, default=2",
type=int,
metavar='INT',
default=2)
arg_parser.add_argument("--remove_words",
dest="RWORDS",
help="list of words to remove, default='。,、,?,.,\,,?'",
type=list,
metavar='WORDS[,WORD,...]',
default="。,、,?,.,\,,?")
arg_parser.add_argument("--remove_pattern",
dest="RPATTERN",
help="regex pattern to remove before analyzing or file",
type=str,
metavar='REGEX_OR_FILE',
default="")
arg_parser.add_argument("--replace_pattern_file",
dest="REP_FILE",
help="path of file that has regex pattern to replace before analyzing",
type=str,
metavar='FILE',
default="")
arg_parser.add_argument("--extend_word_file",
dest="EXT_FILE",
help="path of file that has regex pattern and word to add at deriving words",
type=str,
metavar='FILE',
default=None)
arg_parser.add_argument("--search", dest="SEARCH", help="sentence(s) to search", type=str, metavar='STR', nargs="+", default=None)
arg_parser.add_argument("--search_field", dest="SEARCH_FIELD", help="field to search", type=str, metavar='FIELD', default=None)
arg_parser.add_argument(
"--search_ex",
dest="SEARCH_EX",
help=
"sentence(s) to search with exntending by '--user_ditctionary','--remove_words','--remove_pattern','--replace_pattern_File','--extended_word_file'",
type=str,
metavar='STR',
nargs="+",
default=None)
# arg_parser.add_argument("--search_morelikethis", dest="SEARCH_MLT", help="enable morelkethis", action="store_true", default=False)
arg_parser.add_argument("--search_limit",
dest="SEARCH_LIMIT",
help="limit of search results, default=50",
type=int,
metavar='INT',
default=50)
arg_parser.add_argument("--search_operator",
dest="SEARCH_OP",
help="operator to search, default='OR'",
choices=["OR", "AND"],
default="OR")
arg_parser.add_argument("--search_detail",
dest="SEARCH_DETAIL",
help="detail search for each field",
type=str,
metavar='FIELD:KEYWORD[,FIELD:KEYWORD..]',
default=None)
arg_parser.add_argument("--search_detail_file",
dest="SEARCH_DETAIL_FILE",
help="path of file that have detail search queriesfor each field",
type=str,
metavar="FILE",
default=None)
arg_parser.add_argument("--search_output",
dest="SEARCH_OUT",
help="path of csv file to store result, default=stdout",
type=str,
metavar="CSV_FILE",
default=sys.stdout)
arg_parser.add_argument('csv_files', metavar='CSV_FILE', nargs='*', help='csv files to read. if empty, stdin is used')
args = arg_parser.parse_args()
return args
def search_sentences_on_solr(com_solr: communicateSlor,
field: str,
search_sentences: List[str],
search_ex_sentences: List[str],
search_detail: List[str],
remove_pattern_c=None,
replace_pattern_c=None,
min_length=2,
user_dict=None,
remove_words=None,
extend_words=None,
operator=None,
morelikethis=False,
limit_of_hits=50) -> Tuple[pd.DataFrame, int, float]:
"""TODO describe function
:param com_solr: communicateSlorクラスのインスタンス
:param field: 検索対象のSolrのフィールド
:param search_sentences: 検索文のリスト
:param search_ex_sentences: 分かち書き処理を行い利用する検索文のリスト
:param search_detail: SolrのQuery形式の検索クエリー
:param remove_pattern_c: removeSentencesクラスのインスタンス
:param replace_pattern_c: replaceSentencesクラスのインスタンス
:param min_length: 語句として残す最小の文字列長さ
:param user_dict: Mecabのユーザ辞書ファイルのパス
:param remove_words: 分かち書き後に除去する語句のリスト
:param extend_words: 分かち書き後の各語句に対する追加国辞書
:param operator: 検索文の論理結合式(OR 又は AND)
:returns: DataFrame, 検索結果のレコード数, 最大スコア
"""
q_str = [] # 対象フィールドに対する検索語句リスト
if search_sentences is not None:
q_str.extend(search_sentences)
if search_ex_sentences is not None:
for sts in search_ex_sentences:
trimed_sentence = trim_sentence(sts,
remove_pattern_c=remove_pattern_c,
replace_pattern_c=replace_pattern_c,
min_length=min_length,
user_dict=user_dict,
remove_words=remove_words,
extend_words=extend_words)
if isinstance(trimed_sentence, list):
q_str.extend(trimed_sentence)
else:
q_str.append(trimed_sentence)
if len(q_str) == 0 and len(search_detail) == 0:
print(f"#warn:csv_text_solr_search:search:no query", file=sys.stderr)
df = pd.DataFrame()
nhits = 0
max_score = 0
else:
q_str = list(set(q_str))
if search_detail is not None:
q_mes = F_indent_text(f"{q_str},{search_detail}")
else:
q_mes = F_indent_text(f"{q_str}")
print(f"%inf:csv_text_solr_search:search:query strings:\n{q_mes}", file=sys.stderr)
res = com_solr.search(field, q_str, detail=search_detail, operator=operator, morelikethis=morelikethis, limit=limit_of_hits)
result = json.loads(res.data)
if morelikethis:
print(f"#warn:csv_text_solr_search:'--search_morelikethis' is not implemented.", file=sys.stderr)
pass
# docs = []
# max_score = []
# nhits = []
# for _, result in result["moreLikeThis"].items():
# docs.extend(result["docs"])
# max_score.append(result["maxScore"])
# nhits.append(result["numFound"])
else:
docs = result["response"]["docs"]
max_score = result["response"]["maxScore"]
nhits = result["response"]["numFound"]
# start_no = result["response"]["start"]
df = | pd.DataFrame(docs) | pandas.DataFrame |
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras.layers import Conv3D, Conv2D
from tensorflow.keras.layers import ConvLSTM2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras import losses
import numpy as np
import pandas as pd
import random
import pandasql as ps
import pickle
from scipy.stats import entropy
from numpy import percentile
import tensorflow.keras as keras
import gc
########## Create ConvLSTM network ##############
from tensorflow.keras.layers import LayerNormalization
def create_model(pixel,filters,channel,hiddenlayers = 4):
seq = Sequential()
#seq.add(BatchNormalization(trainable=False))
seq.add(ConvLSTM2D(filters=filters, kernel_size=(3, 3),
input_shape=(None, pixel, pixel, channel),
padding='same', return_sequences=True))#activation = 'tanh', recurrent_activation = 'tanh')),activation = 'elu'
#seq.add(BatchNormalization(trainable=False))
for layer in range(hiddenlayers-1):
seq.add(ConvLSTM2D(filters=filters, kernel_size=(3, 3),
padding='same', return_sequences=True))# activation = 'tanh', recurrent_activation = 'tanh'))
seq.add(ConvLSTM2D(filters=filters, kernel_size=(3, 3),
padding='same', return_sequences=False)) #activation = 'tanh', recurrent_activation = 'tanh'))
seq.add(Conv2D(filters=1, kernel_size=(3, 3),
activation='elu',
padding='same', data_format='channels_last'))
#seq.add(BatchNormalization(trainable=False))
seq.compile(loss='mean_squared_error', optimizer='adam',metrics=['mae'])
return seq
import pandas as pd
import statsmodels.formula.api as sm
def get_localdist(trainX,spatialboundary,ST,boundmargin,span,channel):
trainx_dist = []
for day in range(span):
if day <= boundmargin:
_trainx_dist = trainX[0:ST,spatialboundary[0]:spatialboundary[1],spatialboundary[2]:spatialboundary[3],::]
elif day >= span - boundmargin-1:
_trainx_dist = trainX[span-ST:span,spatialboundary[0]:spatialboundary[1],spatialboundary[2]:spatialboundary[3],::]
else:
_trainx_dist = trainX[day-boundmargin:day+boundmargin+1,spatialboundary[0]:spatialboundary[1],spatialboundary[2]:spatialboundary[3],::]
_trainx_dist = _trainx_dist.reshape(ST**3,channel)
_trainx_dist = np.std(_trainx_dist, axis = 0)
trainx_dist.append(_trainx_dist)
trainx_dist = np.array(trainx_dist)
return (trainx_dist)
def get_localranddist(trainx_dist,span,channel,spatial):
randomlist = np.array(random.sample(range(-5, 5), span))[::,np.newaxis]
for j in range(1,channel):
if j in spatial:
a = random.randint(-5,5)
_randomlist = np.array([a for i in range(10)])[::,np.newaxis]
else:
_randomlist = np.array(random.sample(range(-5, 5), span))[::,np.newaxis]
randomlist = np.concatenate((randomlist,_randomlist),axis = 1)
randomlist[randomlist == 0 ] =1
return (trainx_dist/randomlist)
import statsmodels.api as sm
def run_ST_lime_pixel(model,trainX,trainx_dist,samp,span,channel,spatial,ST,r,c,channellist,incubation):
trainx = []
trainy = []
#print(r,c)
incubation_span = span - incubation
for i in range(samp):
rand_trainx_dist = get_localranddist(trainx_dist,span,channel,spatial)
_trainx = pickle.loads(pickle.dumps(trainX , -1))
#if (r,c) == (5,6):
# print(_trainx[::,r,c,4])
temp = _trainx[::,r,c,::]+rand_trainx_dist
rand_trainx_dist[np.where((temp <0) | (temp >1) )] = rand_trainx_dist[np.where((temp <0) | (temp >1) )] * -1
_trainx[(incubation_span - ST):incubation_span,r,c,channellist] = _trainx[(incubation_span - ST):incubation_span,r,c,channellist]+rand_trainx_dist[(incubation_span - ST):incubation_span,channellist]
#print(_trainx[::,r,c,4])
for C in spatial:
_trainx[::,::,::,C] = _trainx[incubation_span-1,::,::,C]
_trainy = model.predict(_trainx[np.newaxis,::,::,::,::])
_trainy = _trainy[0,::,::,0]
trainx.append(_trainx)
trainy.append(_trainy)
trainx = np.array(trainx)[::,::,r,c,::]
#print(trainx[::,::,4].shape)
trainy = np.array(trainy)[::,r,c]
traindata = pd.DataFrame()
for C in channellist:
if C in spatial:
traindata['C'+str(C)] = trainx[::,span-1,C].flatten()
else:
for T in range(incubation+1,incubation+ST+1):
traindata['C'+str(C)+'_T'+str(T)] = trainx[::,span-T,C].flatten()
traindata['Y'] = trainy.flatten()
traindata = traindata[traindata.sum(axis=1)>0]
X=list(traindata.columns)
X.remove('Y')
#X.remove('index')
_traindata = pickle.loads(pickle.dumps(traindata,-1))
for x in X:
_traindata[x] = (_traindata[x] - _traindata[x].mean())/_traindata[x].std()
_traindata['Y'] = (_traindata['Y'] - _traindata['Y'].mean())/_traindata['Y'].std()
try:
res = sm.OLS(_traindata['Y'],_traindata[X]).fit()
except:
print(channellist)
print(traindata.iloc[0]) #trainx[::,span-4,4].flatten()) #trainx[::,span-1,2].flatten())
raise
return(res,traindata)
import itertools
def run_regression(model,grid,train,train_gridday,frames_grid,exclude_channel = [0],spatial = [1],start=0,ST=3,margin = 4,samp= 500, incubation = 3,offset=10):
trainsamp = []
maxday = max(frames_grid['day'])
span = train.shape[1]
channel = train.shape[-1]
channellist = list(set(range(channel)) - set(exclude_channel))
pix = np.int(np.sqrt(max(frames_grid['pixno'])))
_gridpix = np.flip(np.array(range(1,max(frames_grid['pixno'])+1)).reshape(pix,pix),0)
gridpix = _gridpix[margin:pix-margin,margin:pix-margin].flatten()
allowedgridpix = frames_grid[(frames_grid['no_pat']>10) & (frames_grid['grid'] == grid)].groupby(['grid','pixno'])['day'].count().reset_index()
allowedgridpix = allowedgridpix[allowedgridpix.day > 30 ][['grid','pixno']]
gridpix = np.intersect1d(gridpix,np.array(allowedgridpix['pixno']))
train_xplain = pd.DataFrame()
gridtraindata_xplain= | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.