prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#%%
# =============================================================================
# 1.前處理
# =============================================================================
import pandas as pd
from PIL import Image
import numpy as np
class Image_preprocessing(object):
def __init__(self, data, xsize, ysize): #xsize, ysize想要的像素
self.xsize = xsize
self.ysize = ysize
self.data = data
def get_face(self):
im3 = []
im = Image.open(path +'/'+ self.filename)
im = im.convert('RGB')
im1 = np.asarray(im, dtype = np.uint8)
d1 = self.data[self.data['filename']== self.filename].reset_index()
for i in range(d1.shape[0]):
xmin = d1['xmin'][i]
xmax = d1['xmax'][i]
ymin = d1['ymin'][i]
ymax = d1['ymax'][i]
im2 = im1[ymin:ymax,xmin:xmax,]
im4 = Image.fromarray(im2,'RGB').resize((self.xsize,self.ysize))
im3.append(np.asarray(im4, dtype = np.uint8))
return im3
def get_data(self):
files = self.data['filename'].unique()
faces = []
for j in range(len(files)):
self.filename = files[j]
faces += self.get_face()
return np.array(faces).reshape([len(faces),self.xsize*self.ysize,3])
train = pd.read_csv('train.csv')
test = | pd.read_csv('test.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""AEOPDay5 - Pandas.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1k__HFxCwgEHTHy5bgF6HCM14fqURfJcr
# Part 1: Starting with Data Analysis
"""
import pandas as pd
from google.colab import drive
df = pd.read_csv('survey_results_public.csv')
df.head(10)
#or df.tail(x) to show other part of data
#df.shape comes with error not sure what it wrong
#pd.set_option['display.max_columns', 85]
#error comes up as CallableDynamicDoc object is not subscriptable
pd.set_option('display.max_columns', 85)
pd.set_option('display.max_rows', 85)
### To display all data without elipses
schema_df= pd.read_csv('survey_results_schema.csv')
schema_df
"""# Part 2: DataFrame and Series Basics"""
import pandas as pd
df = pd.read_csv('survey_results_public.csv')
schema_df = pd.read_csv('survey_results_schema.csv')
pd.set_option('display.max_columns', 85)
pd.set_option('display.max_rows', 85)
df.head()
people = {
"first": ["Corey", "Jane", "John"],
"last": ["Shafer", "Doe", "Doe"],
"email": ["<EMAIL>", "<EMAIL>", "<EMAIL>"]
}
people['email'] #compare to 32, this brings out a list
import pandas as pd
df1 = pd.DataFrame(people)
df1 #prints out the rows and columns with the index on the side
df1['email'] #compare to 30, this brings back a series
type(df1['email'])
df1.email #dot notation also valid instead of key notation,
#bracket better to weed out repititions
#to access multiple columns, list the columns you want, returning a dataframe
df1[['last', 'email']]
df1.columns
#how to find rows?
df1.iloc[[0, 1], 2] # integer location #pass an inner list to get multiple rows
# outer bracket and column brings down columns no names just inegers
# loc also finds rows but gets back dataframe, can use names for columns
df1.loc[[0, 1], ['email', 'last']]
#columns display in order of list
#stack overflow dataset
df.columns
df['Hobbyist'].value_counts()
df.loc[0:2, 'Hobbyist'] #: is slicing and doesn't need brackets
"""# Video 3: Indexes - How to Set, Reset, and Use Indexes"""
people = {
"first": ["Corey", "Jane", "John"],
"last": ["Shafer", "Doe", "Doe"],
"email": ["<EMAIL>", "<EMAIL>", "<EMAIL>"]
}
import pandas as pd
df = pd.DataFrame(people)
df
df['email']
df.set_index('email', inplace = True)
df
# sets that column as index, but doesn't modify df original until inplace is used
df.index
df.loc['<EMAIL>', 'last'] #now you can search by email but not by index number
df.iloc[0] #iloc still uses integer location
import pandas as pd
df = pd.read_csv('survey_results_public.csv', index_col = 'Respondent')
schema_df = pd.read_csv('survey_results_schema.csv')
| pd.set_option('display.max_columns', 85) | pandas.set_option |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 15:02
Desc: 东方财富网-数据中心-新股数据-打新收益率
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
东方财富网-数据中心-新股数据-新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
"""
import pandas as pd
import requests
from tqdm import tqdm
from akshare.utils import demjson
def _get_page_num_dxsyl() -> int:
"""
东方财富网-数据中心-新股数据-打新收益率-总页数
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 总页数
:rtype: int
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": '1',
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
total_page = data_json["pages"]
return total_page
def stock_dxsyl_em() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 指定市场的打新收益率数据
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
page_num = _get_page_num_dxsyl()
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": str(page),
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json["data"]])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
"-",
]
big_df = big_df[[
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
]]
big_df["发行价"] = pd.to_numeric(big_df["发行价"], errors='coerce')
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["网上-发行中签率"] = pd.to_numeric(big_df["网上-发行中签率"])
big_df["网上-有效申购股数"] = pd.to_numeric(big_df["网上-有效申购股数"])
big_df["网上-有效申购户数"] = pd.to_numeric(big_df["网上-有效申购户数"])
big_df["网上-超额认购倍数"] = pd.to_numeric(big_df["网上-超额认购倍数"])
big_df["网下-配售中签率"] = pd.to_numeric(big_df["网下-配售中签率"])
big_df["网下-有效申购股数"] = pd.to_numeric(big_df["网下-有效申购股数"])
big_df["网下-有效申购户数"] = pd.to_numeric(big_df["网下-有效申购户数"])
big_df["网下-配售认购倍数"] = pd.to_numeric(big_df["网下-配售认购倍数"])
big_df["总发行数量"] = pd.to_numeric(big_df["总发行数量"])
big_df["开盘溢价"] = pd.to_numeric(big_df["开盘溢价"])
big_df["首日涨幅"] = pd.to_nu | meric(big_df["首日涨幅"]) | pandas.to_numeric |
import pytest
import numpy as np
import pandas as pd
from sklearn import clone
from sklearn.preprocessing import LabelEncoder, FunctionTransformer, OneHotEncoder, QuantileTransformer, StandardScaler
from sklearn.preprocessing import PowerTransformer, OrdinalEncoder
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from data_dashboard.transformer import Transformer
from data_dashboard.transformer import WrapperFunctionTransformer
@pytest.mark.parametrize(
("test_func",),
(
(lambda x: x,),
(lambda x: 1,),
(lambda x: x**2,),
)
)
def test_wrapper_func_transformer(test_func):
"""Testing if WrapperFunctionTransformer still has functionality of an underlying FunctionTransformer."""
test_arr = np.array([1, 1, 1, 2, 3, 4, 5]).reshape(-1, 1)
tr = FunctionTransformer(func=test_func)
wrap_tr = WrapperFunctionTransformer("test", clone(tr))
expected_arr = tr.fit_transform(test_arr)
actual_arr = wrap_tr.fit_transform(test_arr)
assert np.array_equal(actual_arr, expected_arr)
assert str(wrap_tr) != str(tr)
@pytest.mark.parametrize(
("test_text",),
(
("Test1",),
("Test2",),
("",),
)
)
def test_wrapper_func_transformer_str(test_text):
"""Testing if str() function of WrapperFunctionTransformer returns text provided as an argument."""
wrap_tr = WrapperFunctionTransformer(test_text, FunctionTransformer())
assert str(wrap_tr) == test_text
def test_transformer_create_preprocessor_X(categorical_features, numerical_features):
"""Testing if X preprocessor correctly assigns steps to columns depending on their type."""
categorical_features.remove("Target")
tr = Transformer(categorical_features, numerical_features, "Categorical")
preprocessor = tr._create_preprocessor_X()
expected_steps = [("numerical", numerical_features), ("categorical", categorical_features)]
actual_steps = [(item[0], item[2]) for item in preprocessor.transformers]
for step in expected_steps:
assert step in actual_steps
assert len(actual_steps) == len(expected_steps)
@pytest.mark.parametrize(
("target_type", "expected_function"),
(
("Categorical", LabelEncoder()),
("Numerical", WrapperFunctionTransformer("test", FunctionTransformer(lambda x: x)))
)
)
def test_transformer_create_preprocessor_y(categorical_features, numerical_features, target_type, expected_function):
"""Testing if y preprocessor is created correctly."""
tr = Transformer(categorical_features, numerical_features, target_type)
preprocessor = tr._create_default_transformer_y()
assert type(preprocessor).__name__ == type(expected_function).__name__
@pytest.mark.parametrize(
("transformed_feature",),
(
("Price",),
("bool",),
("AgeGroup",)
)
)
def test_transformer_preprocessor_X_remainder(
categorical_features, numerical_features, data_classification_balanced, expected_raw_mapping,
transformed_feature
):
"""Testing if feature not declared in either categorical or numerical features passes through unchanged."""
categorical_features.remove("Target")
categorical_features = [f for f in categorical_features if f != transformed_feature]
numerical_features = [f for f in numerical_features if f != transformed_feature]
X = data_classification_balanced[0].drop(["Date"], axis=1)
if transformed_feature in expected_raw_mapping.keys():
X[transformed_feature] = X[transformed_feature].replace(expected_raw_mapping[transformed_feature])
tr = Transformer(categorical_features, numerical_features, "Numerical")
tr.fit(X)
transformed = tr.transform(X)
try:
transformed = transformed.toarray()
except AttributeError:
transformed = transformed
cols = tr.transformed_columns() + [transformed_feature]
actual_result = pd.DataFrame(transformed, columns=cols)
assert np.allclose(actual_result[transformed_feature].to_numpy(), X[transformed_feature].to_numpy(), equal_nan=True)
# checking if there is only one column with transformed_feature (no derivations)
assert sum([1 for col in cols if transformed_feature in col]) == 1
@pytest.mark.parametrize(
("transformed_features",),
(
(["Height", "Price"],),
(["Price", "AgeGroup"],),
)
)
def test_transformer_preprocessor_X_remainder_order(
categorical_features, numerical_features, data_classification_balanced, expected_raw_mapping,
transformed_features
):
"""Testing if remainder portion of ColumnTransformer returns the columns in the expected (alphabetical) order."""
categorical_features.remove("Target")
categorical_features = [f for f in categorical_features if f not in transformed_features]
numerical_features = [f for f in numerical_features if f not in transformed_features]
X = data_classification_balanced[0].drop(["Date"], axis=1)
tr = Transformer(categorical_features, numerical_features, "Numerical")
tr.fit(X)
transformed = tr.transform(X)
try:
transformed = transformed.toarray()
except AttributeError:
transformed = transformed
cols = tr.transformed_columns() + sorted(transformed_features)
actual_result = pd.DataFrame(transformed, columns=cols)
for col in transformed_features:
assert np.allclose(actual_result[col].to_numpy(), X[col].to_numpy(), equal_nan=True)
@pytest.mark.parametrize(
("target_type",),
(
(None,),
("Target",),
("categorical",),
(10,),
(True,),
(np.nan,),
)
)
def test_transformer_create_preprocessor_y_invalid_target_type(categorical_features, numerical_features, target_type):
"""Testing if ._create_preprocessor_y raises an Exception when invalid target_type is provided"""
tr = Transformer(categorical_features, numerical_features, "Categorical") # initiating with proper type
tr.target_type = target_type
with pytest.raises(ValueError) as excinfo:
preprocessor = tr._create_default_transformer_y()
assert "should be Categorical or Numerical" in str(excinfo.value)
@pytest.mark.parametrize(
("feature_name",),
(
("AgeGroup",),
("bool",),
("Product",),
("Sex",),
("Target",),
)
)
def test_transformer_transform_y_categorical(
data_classification_balanced, categorical_features, numerical_features, expected_raw_mapping, feature_name
):
"""Testing if fit_y() and transform_y() are changing provided y correctly (when y is categorical)"""
df = pd.concat([data_classification_balanced[0], data_classification_balanced[1]], axis=1)
target = df[feature_name]
mapping = {key: int(item - 1) for key, item in expected_raw_mapping[feature_name].items()}
mapping[np.nan] = max(mapping.values()) + 1
expected_result = target.replace(mapping).array
tr = Transformer(categorical_features, numerical_features, "Categorical")
actual_result = tr.fit_transform_y(target)
assert np.array_equal(actual_result, expected_result)
@pytest.mark.parametrize(
("feature_name",),
(
("Height",),
("Price",),
)
)
def test_transformer_transform_y_numerical(
data_classification_balanced, categorical_features, numerical_features, feature_name
):
"""Testing if fit_y() and transform_y() are changing provided y correctly (when y is numerical)"""
df = pd.concat([data_classification_balanced[0], data_classification_balanced[1]], axis=1)
target = df[feature_name]
expected_result = target.array
tr = Transformer(categorical_features, numerical_features, "Numerical")
actual_result = tr.fit_transform_y(target)
assert np.allclose(actual_result, expected_result, equal_nan=True)
@pytest.mark.parametrize(
("feature", "classification_pos_label",),
(
("Sex", "Female"),
("Sex", "Male"),
("Target", 0),
("Product", "Apples"),
("Product", "Potato")
)
)
def test_transformer_transform_y_classification_pos_label(
data_classification_balanced, categorical_features, numerical_features, feature, classification_pos_label,
):
"""Testing if transformer correctly changes mappings of y when explicit classification_pos_label is provided."""
df = pd.concat([data_classification_balanced[0], data_classification_balanced[1]], axis=1)
expected_result = df[feature].apply(lambda x: 1 if x == classification_pos_label else 0)
tr = Transformer(
categorical_features, numerical_features, "Categorical", classification_pos_label=classification_pos_label
)
actual_result = tr.fit_transform_y(df[feature])
assert np.array_equal(actual_result, expected_result)
@pytest.mark.parametrize(
("classification_pos_label",),
(
("Fruits",),
("Sweets",),
("Dairy",),
)
)
def test_transformer_transform_y_classification_pos_label_multiclass(
data_multiclass, categorical_features, numerical_features, classification_pos_label,
):
"""Testing if transformer correctly changes mappings of y when explicit classification_pos_label is provided
for multiclass problem (so the mapping changes it to classification problem)."""
y = data_multiclass[1]
mapping = {
"Fruits": 0,
"Sweets": 0,
"Dairy": 0,
classification_pos_label: 1 # overwriting with test choice
}
expected_result = y.replace(mapping)
tr = Transformer(
categorical_features, numerical_features, "Categorical", classification_pos_label=classification_pos_label
)
actual_result = tr.fit_transform_y(y)
assert np.array_equal(actual_result, expected_result)
@pytest.mark.parametrize(
("feature_name", "csr_matrix_flag"),
(
("AgeGroup", True),
("bool", False),
("Product", True),
("Sex", False),
("Target", False),
)
)
def test_transformer_transform_X_categorical(data_classification_balanced, feature_name, csr_matrix_flag):
"""Testing if every categorical column from a test data is transformed correctly."""
df = pd.concat([data_classification_balanced[0], data_classification_balanced[1]], axis=1)
# replacing for SimpleImputer which cant handle bool dtype
df["bool"] = df["bool"].replace({False: 0, True: 1})
feature = df[feature_name]
most_frequent = feature.value_counts(dropna=False).index[0]
feature = feature.fillna(most_frequent)
expected_result = OneHotEncoder(handle_unknown="ignore").fit_transform(feature.to_numpy().reshape(-1, 1)).toarray()
tr = Transformer([feature_name], [], "Categorical")
actual_result = tr.fit_transform(pd.DataFrame(df[feature_name]))
# for n > 2 unique values, output is a csr_matrix
if csr_matrix_flag:
actual_result = actual_result.toarray()
assert pd.DataFrame(actual_result).equals(pd.DataFrame(expected_result))
@pytest.mark.parametrize(
("feature_name",),
(
("Height",),
("Price",),
)
)
def test_transformer_transform_X_numerical(data_classification_balanced, feature_name):
"""Testing if every numerical column from a test data is transformed correctly."""
random_state = 1
df = pd.concat([data_classification_balanced[0], data_classification_balanced[1]], axis=1)
feature = df[feature_name]
median = feature.describe()["50%"]
feature = feature.fillna(median).to_numpy().reshape(-1, 1)
feature = QuantileTransformer(output_distribution="normal", random_state=random_state).fit_transform(
feature)
feature = StandardScaler().fit_transform(feature)
expected_result = feature
tr = Transformer([], [feature_name], "Categorical", random_state=random_state)
actual_result = tr.fit_transform( | pd.DataFrame(df[feature_name]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas_xyz
import unittest
import pandas as pd
import pandas.testing as tm
import numpy as np
def my_func(series_a, time=None, scalar_kwarg=10):
if time is None:
time = pd.Series(range(len(series_a)))
return series_a * time * scalar_kwarg
class TestRegister(unittest.TestCase):
@classmethod
def setUpClass(cls):
pandas_xyz.PositionAccessor._add_series_method(my_func)
# pandas_xyz.PositionAccessor._add_series_method(my_func, kwds='scalar_kwarg')
cls.vals = [1.0, 1.0, 1.0]
cls.expected_result_vals = [0.0, 10.0, 20.0]
@classmethod
def tearDownClass(cls):
delattr(pandas_xyz.PositionAccessor, 'my_func')
def test_api(self):
pass
def test_args(self):
# default column name
for kwargs in [
{},
{'series_a': 'series_a'},
# {'series_a':'ser_a'}
]:
result = pd.DataFrame.from_dict(
{'series_a': self.vals}
).xyz.my_func(**kwargs)
expected = pd.Series(
self.expected_result_vals,
# name=kwargs.get('displacement') or 'displacement'
)
tm.assert_series_equal(result, expected)
# specify alternate column name
result = pd.DataFrame.from_dict(
{'ser_a': self.vals}
).xyz.my_func(series_a='ser_a')
expected = pd.Series(
self.expected_result_vals,
# name=kwargs.get('displacement') or 'displacement'
)
tm.assert_series_equal(result, expected)
def test_raises(self):
# specify wrong column label
with self.assertRaises(KeyError):
result = pd.DataFrame.from_dict({'ser_a': [1,2,3]}).xyz.my_func()
with self.assertRaises(KeyError):
result = pd.DataFrame.from_dict(
{'series_a': [1,2,3]}
).xyz.ds_from_s(series_a='ser_a')
# wrong dtype
with self.assertRaisesRegex(AttributeError, 'numeric'):
pd.DataFrame.from_dict({'series_a': ['a', 'b', 'c']}).xyz.my_func()
def test_opt_args(self):
# ACCIDENTALLY discovered something here (no time kwarg should exist)
# result = pd.DataFrame.from_dict({
# 'series_a': self.vals,
# 'series_b': [3, 3, 3]
# }).xyz.my_func(time='time')
# expected = pd.Series(self.expected_result_vals)
# tm.assert_series_equal(result, expected)
result = pd.DataFrame.from_dict({
'series_a': self.vals,
'series_b': [3, 3, 3]
}).xyz.my_func(time='series_b')
expected = pd.Series([val * 10 * 3 for val in self.vals])
tm.assert_series_equal(result, expected)
def test_scalar_kwarg(self):
result = pd.DataFrame.from_dict(
{'series_a': self.vals}
).xyz.my_func(scalar_kwarg=20)
expected = pd.Series(self.expected_result_vals) * 2
tm.assert_series_equal(result, expected)
class TestMethods(unittest.TestCase):
def test_s_from_ds(self):
# The displacement value at a given index represents the distance
# traveled between the previous index and the current index.
result = pd.DataFrame.from_dict({
'displacement': [3.0, 4.0, 3.0, 5.0],
}).xyz.s_from_ds()
expected = pd.Series([3.0, 7.0, 10.0, 15.0])
# expected = pd.Series([0.0, 3.0, 7.0, 10.0])
tm.assert_series_equal(result, expected)
def test_ds_from_s(self):
result = pd.DataFrame.from_dict({
'distance': [3.0, 7.0, 10.0, 15.0],
}).xyz.ds_from_s()
expected = pd.Series([3.0, 4.0, 3.0, 5.0])
tm.assert_series_equal(result, expected)
def test_s_from_v(self):
# TODO: make a uniform scheme for indexing between-index quantities
# like displacement, speed, and grade.
# The speed at a given index is expected to affect the calculated
# displacement between that index and the next one, and thus affect
# only the distance at the *next* index. The speed at the last index
# does not affect the calculated distances (in this scheme).
#
# There are a bajillion ways to go from speed to displacement and
# vice-versa, this one is just my choice for now.
#
# Main issue for me: speed and displacement schemes are not consistent.
df = pd.DataFrame.from_dict({
'speed': [3.0, 4.0, 3.0, 5.0],
'time': [0, 2, 4, 6]
})
result_notime = df.xyz.s_from_v()
expected = pd.Series([0.0, 3.0, 7.0, 10.0])
| tm.assert_series_equal(result_notime, expected) | pandas.testing.assert_series_equal |
#!/usr/bin/env python3
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import glob
import json
import os
import sys
from dataclasses import dataclass
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from hwfutils.string_color import color_str_green as green
from hwfutils.string_color import color_str_red as red
from hwfutils.string_color import color_str_yellow as yellow
from scipy import stats
# ------------------------------------------------------------------------------
# Experiment Parameters
# ------------------------------------------------------------------------------
EXPERIMENT_SUFFIX = "%s-%sm"
DURATION_MINS = 1440
# TOPLEVELS = ["FFTSmall", "Sodor3Stage", "TLI2C", "TLPWM", "TLSPI", "TLUART"]
TOPLEVELS = [
"FFTSmall", "Sodor1Stage", "Sodor3Stage", "Sodor5Stage", "TLI2C", "TLPWM",
"TLSPI", "TLUART"
]
TRIALS = range(0, 10)
# ------------------------------------------------------------------------------
# Plot parameters
# ------------------------------------------------------------------------------
LABEL_FONT_SIZE = 8
TICK_FONT_SIZE = 8
LEGEND_FONT_SIZE = 8
LEGEND_TITLE_FONT_SIZE = 8
TIME_SCALE = "h"
SCALED_MAX_PLOT_TIME = 24
PLOT_FORMAT = "PDF"
PLOT_FILE_NAME = "hwf_vs_rfuzz_woseeds_%dmin_broken.%s" % (DURATION_MINS,
PLOT_FORMAT.lower())
# ------------------------------------------------------------------------------
# Plot labels
# ------------------------------------------------------------------------------
TIME_LABEL = "Time"
TOPLEVEL_LABEL = "Core"
FUZZER_LABEL = "Fuzzer"
COVERAGE_TYPE_LABEL = "Coverage"
COVERAGE_LABEL = "Cov. (%)"
HW_LINE_COVERAGE_LABEL = "HW Line (VLT)"
# ------------------------------------------------------------------------------
# Other Labels
# ------------------------------------------------------------------------------
TEST_ID_LABEL = "Test-ID"
# ------------------------------------------------------------------------------
# Other defines
# ------------------------------------------------------------------------------
TERMINAL_ROWS, TERMINAL_COLS = os.popen('stty size', 'r').read().split()
LINE_SEP = "=" * int(TERMINAL_COLS)
COUNT = 0
@dataclass
class SubplotAxisLimits:
x_lower: int = None
x_upper: int = None
y_lower: int = None
y_upper: int = None
@dataclass
class FigureAxisLimits:
kcov_limits: SubplotAxisLimits
llvm_cov_limits: SubplotAxisLimits
vlt_cov_limits: SubplotAxisLimits
@dataclass
class FuzzingData:
toplevel: str = ""
duration_mins: int = -1
trial_num: int = -1
hwf_data_path: str = ""
rfuzz_data_path: str = ""
def __post_init__(self):
self.hwf_afl_data = self._load_afl_data(self.hwf_data_path)
self.rfuzz_data = self._load_rfuzz_data(self.rfuzz_data_path)
self.hwf_cov_data = self._load_hwf_vlt_cov_data("%s/logs/vlt_cov_cum.csv" %
self.hwf_data_path)
self.rfuzz_cov_data = self._load_rfuzz_vlt_cov_data("%s/vlt_cum_cov.csv" %
self.rfuzz_data_path)
def _load_afl_data(self, data_path):
afl_glob_path = os.path.join(data_path, "out", "afl_*_interactive",
"plot_data")
afl_plot_data_files = glob.glob(afl_glob_path)
if len(afl_plot_data_files) != 1:
print(red("ERROR: AFL plot_data file no found."))
sys.exit(1)
# Load data into Pandas DataFrame
afl_df = self._load_csv_data(afl_plot_data_files[0])
# Remove leading/trailing white space from column names
afl_df = afl_df.rename(columns=lambda x: x.strip())
# Adjust time stamps to be relative to start time
afl_df.loc[:, "# unix_time"] -= afl_df.loc[0, "# unix_time"]
# Set time as index
afl_df = afl_df.set_index("# unix_time")
return afl_df
def _load_rfuzz_data(self, data_path):
rfuzz_entries = sorted(glob.glob("%s/entry_*.json" % data_path))
rfuzz_data_dict = collections.defaultdict(list)
for entry in rfuzz_entries:
with open(entry, "r") as ef:
entry_dict = json.load(ef)
entry_id = entry_dict["entry"]["id"]
discovery_time = entry_dict["entry"]["discovered_after"]
rfuzz_data_dict[TEST_ID_LABEL].append(entry_id)
rfuzz_data_dict["Time (s)"].append(
FuzzingData._disco_time_dict_to_secs(discovery_time))
rfuzz_df = pd.DataFrame.from_dict(rfuzz_data_dict)
rfuzz_df = rfuzz_df.set_index(TEST_ID_LABEL)
return rfuzz_df
@staticmethod
def _disco_time_dict_to_secs(disco_time_dict):
seconds = disco_time_dict["secs"]
nanoseconds = disco_time_dict["nanos"]
seconds += float(nanoseconds) / float(1e9)
return seconds
@staticmethod
def _id_str_to_int(id_str):
return int(id_str.lstrip("id:"))
def _load_hwf_vlt_cov_data(self, cov_data_path):
if not os.path.exists(cov_data_path):
print(red("ERROR: coverage data (%s) does not exist." % cov_data_path))
sys.exit(1)
# Load data into Pandas DataFrame
cov_df = self._load_csv_data(cov_data_path)
if cov_df.shape[0] < int(self.hwf_afl_data.iloc[-1, 2]):
# print(cov_df.shape[0], int(self.hwf_afl_data.iloc[-1, 2]))
print(
red("WARNING: some coverage data is missing for %s" % cov_data_path))
# Convert Test-ID labels to ints
cov_df.loc[:, TEST_ID_LABEL] = cov_df.loc[:, TEST_ID_LABEL].apply(
FuzzingData._id_str_to_int)
# Set ID column as the row indicies
cov_df = cov_df.set_index(TEST_ID_LABEL)
return cov_df
def _load_rfuzz_vlt_cov_data(self, cov_data_path):
if not os.path.exists(cov_data_path):
print(red("ERROR: coverage data (%s) does not exist." % cov_data_path))
sys.exit(1)
# Load data into Pandas DataFrame
cov_df = self._load_csv_data(cov_data_path)
# Check dimensions match, i.e., no data is missing
if cov_df.shape[0] != self.rfuzz_data.shape[0]:
print(red("ERROR: coverage data is missing. Aborting!"))
sys.exit(1)
# Set ID column as the row indicies
cov_df = cov_df.set_index(TEST_ID_LABEL)
return cov_df
def _load_csv_data(self, csv_file):
return pd.read_csv(csv_file,
delimiter=',',
index_col=None,
engine='python')
def get_paths_total_at_time(time, afl_data):
while time not in afl_data.index:
time -= 1
return afl_data.loc[time, "paths_total"]
def get_cov_at_time(paths_total, cov_data, cov_data_key):
return cov_data.loc[paths_total, cov_data_key] * 100.0
def get_vlt_cov_at_time(test_id, vlt_cov_data):
if test_id >= vlt_cov_data.shape[0]:
last_test_id = vlt_cov_data.shape[0] - 1
vlt_cov = (float(vlt_cov_data.loc[last_test_id, "Lines-Covered"]) /
float(vlt_cov_data.loc[last_test_id, "Total-Lines"])) * 100.0
else:
vlt_cov = (float(vlt_cov_data.loc[test_id, "Lines-Covered"]) /
float(vlt_cov_data.loc[test_id, "Total-Lines"])) * 100.0
return vlt_cov
def get_max_vlt_cov(vlt_cov_data):
last_test_id = vlt_cov_data.shape[0] - 1
vlt_cov = (float(vlt_cov_data.loc[last_test_id, "Lines-Covered"]) /
float(vlt_cov_data.loc[last_test_id, "Total-Lines"])) * 100.0
return vlt_cov
def scale_time(time_seconds, time_units):
if time_units == "h":
return float(time_seconds) / float(3600)
elif time_units == "m":
return float(time_seconds) / float(60)
else:
return time_seconds
def load_fuzzing_data(hwf_exp_prefix, rfuzz_exp_prefix):
print(yellow("Loading data ..."))
exp2data = collections.defaultdict(list)
for toplevel in TOPLEVELS:
for trial in TRIALS:
# Build complete path to data files
exp_suffix = EXPERIMENT_SUFFIX % (toplevel.lower(), DURATION_MINS)
hwf_data_path = "{}-{}-{}".format(hwf_exp_prefix, exp_suffix, trial)
rfuzz_data_path = "{}/rfuzz-{}-{}".format(rfuzz_exp_prefix, exp_suffix,
trial)
# Load fuzzing data into an object
exp2data[exp_suffix].append(
FuzzingData(toplevel, DURATION_MINS, trial, hwf_data_path,
rfuzz_data_path))
return exp2data
def build_max_rfuzz_coverage_df(exp2data,
time_units="m",
normalize_to_start=False,
consolidation="max"):
print(yellow("Building RFUZZ coverage dataframe ..."))
# Create empty dictionary that will be used to create a Pandas DataFrame that
# looks like the following:
# +--------------------------------------------------------------------+
# | toplevel | fuzzer | coverage type | time | coverage (%) |
# +--------------------------------------------------------------------+
# | ... | ... | ... | ... | ... |
coverage_dict = {
TOPLEVEL_LABEL: [],
FUZZER_LABEL: [],
COVERAGE_TYPE_LABEL: [],
TIME_LABEL: [],
COVERAGE_LABEL: [],
}
cov_dict = collections.defaultdict(list) # maps toplevel --> [coverage list]
for exp_name, fd_list in exp2data.items():
# get max coverage experiment
max_cov = get_max_vlt_cov(fd_list[0].rfuzz_cov_data)
max_cov_fd = fd_list[0]
for fd in fd_list:
cov = get_max_vlt_cov(fd.rfuzz_cov_data)
cov_dict[fd.toplevel].append(cov)
if cov > max_cov:
max_cov = cov
max_cov_fd = fd
for test_id, row in max_cov_fd.rfuzz_data.iterrows():
# scale time
scaled_time = scale_time(row["Time (s)"], time_units)
# add circuit, fuzzer, and time values to dataframe row
coverage_dict[TOPLEVEL_LABEL].append(max_cov_fd.toplevel)
coverage_dict[TIME_LABEL].append(scaled_time)
# compute average coverage at all points in time
rfuzz_vlt_cov = get_vlt_cov_at_time(test_id, max_cov_fd.rfuzz_cov_data)
# save time 0 coverage to normalize if requested
if test_id == 0:
rfuzz_vlt_cov_t0 = rfuzz_vlt_cov
if normalize_to_start:
rfuzz_vlt_cov /= rfuzz_vlt_cov_t0
# add coverage to dataframe row
coverage_dict[FUZZER_LABEL].append("RFUZZ")
coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_LABEL].append(rfuzz_vlt_cov)
# extend lines to max time value
if coverage_dict[TIME_LABEL][-1] != SCALED_MAX_PLOT_TIME:
coverage_dict[TOPLEVEL_LABEL].append(max_cov_fd.toplevel)
coverage_dict[TIME_LABEL].append(SCALED_MAX_PLOT_TIME)
coverage_dict[FUZZER_LABEL].append("RFUZZ")
coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_LABEL].append(coverage_dict[COVERAGE_LABEL][-1])
print("Max HW Line coverage (%15s): %.3f%%" %
(max_cov_fd.toplevel, coverage_dict[COVERAGE_LABEL][-1]))
print(green("Done."))
print(LINE_SEP)
return pd.DataFrame.from_dict(coverage_dict), cov_dict
def build_min_hwf_coverage_df(exp2data,
time_units="m",
normalize_to_start=False,
consolidation="max"):
print(yellow("Building HWF coverage dataframe ..."))
# Create empty dictionary that will be used to create a Pandas DataFrame that
# looks like the following:
# +--------------------------------------------------------------------+
# | toplevel | fuzzer | coverage type | time | coverage (%) |
# +--------------------------------------------------------------------+
# | ... | ... | ... | ... | ... |
coverage_dict = {
TOPLEVEL_LABEL: [],
FUZZER_LABEL: [],
COVERAGE_TYPE_LABEL: [],
TIME_LABEL: [],
COVERAGE_LABEL: [],
}
cov_dict = collections.defaultdict(list) # maps toplevel --> [coverage list]
for exp_name, fd_list in exp2data.items():
# get min coverage experiment
min_cov = get_max_vlt_cov(fd_list[0].hwf_cov_data)
min_cov_fd = fd_list[0]
for fd in fd_list:
cov = get_max_vlt_cov(fd.hwf_cov_data)
cov_dict[fd.toplevel].append(cov)
if cov < min_cov:
min_cov = cov
min_cov_fd = fd
# build data frame for plotting
for time, row in min_cov_fd.hwf_afl_data.iterrows():
# scale time
scaled_time = scale_time(time, time_units)
# add circuit, fuzzer, and time values to dataframe row
coverage_dict[TOPLEVEL_LABEL].append(min_cov_fd.toplevel)
coverage_dict[TIME_LABEL].append(scaled_time)
# get the AFL paths_total at the current time
paths_total = get_paths_total_at_time(time, min_cov_fd.hwf_afl_data) - 1
# get HWF coverage data
hwf_vlt_cov = get_vlt_cov_at_time(paths_total, min_cov_fd.hwf_cov_data)
# normalize to start time if requested
if time == 0:
hwf_vlt_cov_t0 = hwf_vlt_cov
if normalize_to_start:
hwf_vlt_cov /= hwf_vlt_cov_t0
# add to data frame
coverage_dict[FUZZER_LABEL].append("HWFP")
coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_LABEL].append(hwf_vlt_cov)
# extend lines to max time value
if coverage_dict[TIME_LABEL][-1] != SCALED_MAX_PLOT_TIME:
coverage_dict[TOPLEVEL_LABEL].append(min_cov_fd.toplevel)
coverage_dict[TIME_LABEL].append(SCALED_MAX_PLOT_TIME)
coverage_dict[FUZZER_LABEL].append("HWFP")
coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_LABEL].append(coverage_dict[COVERAGE_LABEL][-1])
print("Min. HW Line coverage (%15s): %.3f%%" %
(min_cov_fd.toplevel, coverage_dict[COVERAGE_LABEL][-1]))
print(green("Done."))
print(LINE_SEP)
return pd.DataFrame.from_dict(coverage_dict), cov_dict
def plot_avg_coverage_vs_time(hwf_cov_df, rfuzz_cov_df, time_units="m"):
print(yellow("Generating plot ..."))
# Set plot style and extract only HDL line coverage
sns.set_theme(context="notebook", style="darkgrid")
hdl_cov_df = pd.concat([hwf_cov_df, rfuzz_cov_df])
# create figure and plot the data
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
sns.lineplot(data=hdl_cov_df,
x=TIME_LABEL,
y=COVERAGE_LABEL,
hue=TOPLEVEL_LABEL,
style=FUZZER_LABEL,
ax=ax)
# format the plot
if time_units == "m":
time_units_label = "min."
elif time_units == "h":
time_units_label = "hours"
else:
time_units_label = "s"
ax.set_xlabel(TIME_LABEL + " (%s)" % time_units_label,
fontsize=LABEL_FONT_SIZE)
ax.set_ylabel("HDL Line " + COVERAGE_LABEL, fontsize=LABEL_FONT_SIZE)
ax.tick_params("x", labelsize=TICK_FONT_SIZE)
ax.tick_params("y", labelsize=TICK_FONT_SIZE)
plt.legend(fontsize=LEGEND_FONT_SIZE,
title_fontsize=LEGEND_TITLE_FONT_SIZE,
bbox_to_anchor=(1.01, 0.75),
loc='upper left')
plt.tight_layout()
# save the plot
plt.savefig(PLOT_FILE_NAME, format=PLOT_FORMAT)
print(green("Done."))
print(LINE_SEP)
def plot_avg_coverage_vs_time_broken(hwf_cov_df, rfuzz_cov_df, time_units="m"):
print(yellow("Generating plot ..."))
# Set plot style and extract only HDL line coverage
# sns.set_theme(context="notebook", style="darkgrid")
hdl_cov_df = | pd.concat([hwf_cov_df, rfuzz_cov_df]) | pandas.concat |
# authors: <NAME>, Manish
# date: 2020-01-23
"""Fits a linear regression model on the pre-processed training data from the Vegas strip data (from https://archive.ics.uci.edu/ml/machine-learning-databases/00397/LasVegasTripAdvisorReviews-Dataset.csv).
Saves the model results.
Usage: src/fit_vegas_predict_model.py --train=<train> --out_dir=<out_dir>
Options:
--train=<train> Path (including filename) to training data
--out_dir=<out_dir> Path to directory where the serialized model results should be written
"""
# importing required libraries
from docopt import docopt
import os
import matplotlib.pyplot as plt
from pandas.plotting import table
import numpy as np
import selenium
import pickle
import pandas as pd
# regressors / models
from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso, Ridge
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
# Feature selection
from sklearn.feature_selection import RFE
# other
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.feature_extraction.text import CountVectorizer
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import altair as alt
opt = docopt(__doc__)
def main(train, out_dir):
out_dir = "results"
os.makedirs(out_dir)
# Loading Dividing data into training and test set features
vegas_data = pd.read_csv(train )
vegas_data.head()
X = vegas_data.drop('score', axis =1)
y = vegas_data['score']
# splitting data further into training and test set to see if our results can generalize
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =0.2, random_state =5)
#functions used
def fit_and_report(model, X, y, Xv, yv, mode = 'regression'):
"""
The functon fits a model(regression or classification) based on mode
and then calcualtes the training set and validaton set error
Parameters
---------
model - name of model object
X - training set features(predictors)
y - training set label(response variable)
Xv - validation set features(predictors)
yv - validation set label(response variable)
mode - can take two values - regression or classification
Returns
--------
errors - A list of two elements with first element as error on training set
and second element as error on test set
"""
model.fit(X, y)
if mode.lower().startswith('regress'):
errors = [mean_squared_error(y, model.predict(X)), mean_squared_error(yv, model.predict(Xv))]
if mode.lower().startswith('classif'):
errors = [1 - model.score(X,y), 1 - model.score(Xv,yv)]
return errors
## functions to fit model
def grid_fit_model(model, X_train, y_train, parameters= None, cv = 5):
"""
Function to fit a model based parameters specified.
"""
if parameters is None:
model.fit(X_train,y_train)
return model
else :
grid_model = GridSearchCV(model, param_grid = parameters, cv= cv,
scoring = 'neg_mean_squared_error', return_train_score=True, iid = True)
grid_model.fit(X_train, y_train)
return grid_model
def model_results(model):
"""
Function to show results of model
"""
results = pd.DataFrame(model.cv_results_)[['params', 'mean_train_score', 'mean_test_score']]
results['mean_train_error'] = -results['mean_train_score']
results['mean_valid_error'] = -results['mean_test_score']
results = results.drop(['mean_train_score','mean_test_score'], axis = 1)
return results
#FEATURE ENGINEERING
# feature Engineering
results = { 'features_selected' :[],
'train_error': [],
'validation_error':[]}
for i in range(1,X_train.shape[1]):
lin_model = LinearRegression()
rec_model = RFE(lin_model, n_features_to_select=i, step=1)
results['features_selected'].append(i)
results['train_error'].append(fit_and_report(rec_model, X_train, y_train,X_test, y_test, "regression" )[0])
results['validation_error'].append(fit_and_report(rec_model, X_train, y_train,X_test, y_test, "regression" )[1])
errors_df = | pd.DataFrame(results) | pandas.DataFrame |
# Correr desde HOME
import re
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import pandas as pd
import os
import httplib2
from geopy.geocoders import GoogleV3
from Dicc_Tipo_Danhos import camb_tipos
import tqdm
import datetime
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/sheets.googleapis.com-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = 'creds/secreto_cliente.json'
APPLICATION_NAME = 'Temblor'
geolocator = GoogleV3(api_key=os.environ.get('GM_KEY'))
# Dirección debe ser de la forma "Num Calle Ciudad"
def dir_correct(calle, numero, ciudad, estado):
k = []
k.append('Calle ' + calle + ' ' + numero)
k.append(ciudad)
k.append(estado + ', ' + 'MX')
dirr = ', '.join(k)
return dirr
def obtain_latlong(dirr):
try:
location = geolocator.geocode(dirr, region='MX')
lat = location.latitude
lon = location.longitude
except:
lat = ''
lon = ''
return lat, lon
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(
credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def get_Data_temblor():
"""Shows basic usage of the Sheets API.
Creates a Sheets API service object and prints the names and majors of
students in a sample spreadsheet:
https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets',
'v4',
http=http,
discoveryServiceUrl=discoveryUrl)
# DAÑOS Y DERRUMBES VERIFICADOS
# Para descargar otras páginas cambiar el onmbre en el campo range
result = service.spreadsheets().values().get(
spreadsheetId='1i__c44wIg760LmxZcM8oTjDR0cGFVdL9YrjbCcb9Op0',
range='Form Responses 1!A1:AH10000').execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
return values
def insert_Data_temblor(datos):
"""Shows basic usage of the Sheets API.
Creates a Sheets API service object and prints the names and majors of
students in a sample spreadsheet:
https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets',
'v4',
http=http,
discoveryServiceUrl=discoveryUrl)
result = service.spreadsheets().values().get(
spreadsheetId='1wLHf5ITtTsfErWoPHwhu7Vfy-96eQKKxZO2AmZbP9XY',
range='Datos!A1:H1000').execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print(values)
def estructura_sheet(listas):
columnas = listas[0]
info = pd.DataFrame()
for lista in listas:
dicc_aux = {}
for col in range(len(lista)):
dicc_aux[columnas[col]] = lista[col]
info = info.append(dicc_aux, ignore_index=True)
return info
if __name__ == '__main__':
data = get_Data_temblor()
info = estructura_sheet(data)
info_pub = info.drop([
'Nombre del Informante (ESTA INFORMACIÓN SE HARÁ PUBLICA)',
'Teléfono de Contacto (ESTA INFORMACIÓN SE HARÁ PUBLICA)'],
axis=1)
calles = info_pub['Calle'].tolist()
numeros = info_pub['Número Exterior o Aproximado (escribe sólo el número)'].tolist()
munis = info_pub['Municipio'].tolist()
estados = info_pub['Estado'].tolist()
info_pub=info_pub.loc[1:,]
info_pub.Timestamp = | pd.to_datetime(info_pub.Timestamp, format='%m/%d/%Y %H:%M:%S') | pandas.to_datetime |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.records import order_dt, trade_dt, position_dt, drawdown_dt
from vectorbt.portfolio.enums import FilledOrder, SizeType, AccumulateExitMode, ConflictMode
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1, 2, 3, 2, 1], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), name='Price')
entries = pd.DataFrame({
'a': [True, True, True, False, False],
'b': [True, False, True, False, True],
'c': [False, True, False, True, False]
}, index=price.index)
exits = pd.DataFrame({
'a': [False, False, True, True, True],
'b': [False, True, False, True, False],
'c': [True, False, True, False, True]
}, index=price.index)
order_size = pd.DataFrame({
'a': [1, 0.1, -1, -0.1, -1],
'b': [1, 1, 1, 1, -np.inf],
'c': [np.inf, -np.inf, np.inf, -np.inf, np.inf]
}, index=price.index)
@njit
def order_func_nb(order_context, price, fees, fixed_fees, slippage):
col = order_context.col
i = order_context.i
size = col + 1
if i % 2 == 1:
size *= -1
return vbt.portfolio.nb.Order(
size, SizeType.Shares, price[i, col], fees[i, col], fixed_fees[i, col], slippage[i, col])
# test_portfolio
init_capital = [100, 200, 300]
levy_alpha = [1., 2., 3.]
risk_free = [0.01, 0.02, 0.03]
required_return = [0.1, 0.2, 0.3]
cutoff = [0.01, 0.02, 0.03]
factor_returns = price.vbt.combine_with_multiple(
[0.9, 1., 1.1],
combine_func=np.multiply,
concat=True,
keys=entries.columns
)
test_portfolio = vbt.Portfolio.from_signals(
price, entries, exits,
fees=0.01,
init_capital=init_capital,
freq='1 days',
year_freq='252 days',
levy_alpha=levy_alpha,
risk_free=risk_free,
required_return=required_return,
cutoff=cutoff,
factor_returns=factor_returns
)
# ############# nb.py ############# #
class TestNumba:
def test_buy_in_cash_nb(self):
from vectorbt.portfolio.nb import buy_in_cash_nb
assert buy_in_cash_nb(5, 0, 10, 4, 0, 0, 0) == \
(1.0, 0.4, FilledOrder(size=0.4, price=10, fees=0, side=0))
assert buy_in_cash_nb(5, 0, 10, 5, 0, 0, 0) == \
(0.0, 0.5, FilledOrder(size=0.5, price=10, fees=0, side=0))
assert buy_in_cash_nb(5, 0, 10, 6, 0, 0, 0) == \
(0.0, 0.5, FilledOrder(size=0.5, price=10, fees=0, side=0))
assert buy_in_cash_nb(100, 0, 10, 5, 0, 4, 0) == \
(95.0, 0.1, FilledOrder(size=0.1, price=10, fees=4, side=0))
assert buy_in_cash_nb(100, 0, 10, 5, 0, 5, 0) == \
(100.0, 0.0, None)
assert buy_in_cash_nb(100, 0, 10, 5, 0, 6, 0) == \
(100.0, 0.0, None)
assert buy_in_cash_nb(100, 0, 10, 5, 0, 0, 0.1) == \
(95.0, 0.45454545454545453, FilledOrder(size=0.45454545454545453, price=11.0, fees=0, side=0))
assert buy_in_cash_nb(100, 0, 10, 5, 0.1, 0, 0) == \
(95.0, 0.45, FilledOrder(size=0.45, price=10, fees=0.5, side=0))
assert buy_in_cash_nb(100, 0, 10, 5, 1, 0, 0) == \
(95.0, 0.0, FilledOrder(size=0.0, price=10, fees=5, side=0))
def test_sell_in_cash_nb(self):
from vectorbt.portfolio.nb import sell_in_cash_nb
assert sell_in_cash_nb(0, 100, 10, 50, 0, 0, 0) == \
(50, 95.0, FilledOrder(size=5.0, price=10, fees=0.0, side=1))
assert sell_in_cash_nb(0, 100, 10, 50, 0, 0, 0.1) == \
(50.0, 94.44444444444444, FilledOrder(size=5.555555555555555, price=9.0, fees=0.0, side=1))
assert sell_in_cash_nb(0, 100, 10, 50, 0, 40, 0) == \
(50, 91.0, FilledOrder(size=9.0, price=10, fees=40.0, side=1))
assert sell_in_cash_nb(0, 100, 10, 50, 0.1, 0, 0) == \
(50.0, 94.44444444444444, FilledOrder(size=5.555555555555555, price=10, fees=5.555555555555557, side=1))
assert sell_in_cash_nb(0, 5, 10, 100, 0, 0, 0) == \
(50, 0.0, FilledOrder(size=5.0, price=10, fees=0.0, side=1))
assert sell_in_cash_nb(0, 5, 10, 100, 0, 0, 0.1) == \
(45.0, 0.0, FilledOrder(size=5.0, price=9.0, fees=0.0, side=1))
assert sell_in_cash_nb(0, 5, 10, 100, 0, 40, 0) == \
(10, 0.0, FilledOrder(size=5.0, price=10, fees=40.0, side=1))
assert sell_in_cash_nb(0, 5, 10, 100, 0.1, 0, 0) == \
(45.0, 0.0, FilledOrder(size=5.0, price=10, fees=5.0, side=1))
assert sell_in_cash_nb(100, 5, 10, 100, 0, 100, 0) == \
(50, 0.0, FilledOrder(size=5.0, price=10, fees=100.0, side=1))
assert sell_in_cash_nb(0, 5, 10, 100, 0, 100, 0) == \
(0, 5.0, None)
def test_buy_in_shares_nb(self):
from vectorbt.portfolio.nb import buy_in_shares_nb
assert buy_in_shares_nb(100, 0, 10, 5, 0, 0, 0) == \
(50.0, 5.0, FilledOrder(size=5.0, price=10, fees=0.0, side=0))
assert buy_in_shares_nb(100, 0, 10, 5, 0, 0, 0.1) == \
(45.0, 5.0, FilledOrder(size=5.0, price=11.0, fees=0.0, side=0))
assert buy_in_shares_nb(100, 0, 10, 5, 0, 40, 0) == \
(10.0, 5.0, FilledOrder(size=5.0, price=10, fees=40.0, side=0))
assert buy_in_shares_nb(100, 0, 10, 5, 0.1, 0, 0) == \
(44.99999999999999, 5.0, FilledOrder(size=5.0, price=10, fees=5.000000000000007, side=0))
assert buy_in_shares_nb(40, 0, 10, 5, 0, 0, 0) == \
(0.0, 4.0, FilledOrder(size=4.0, price=10, fees=0.0, side=0))
assert buy_in_shares_nb(40, 0, 10, 5, 0, 0, 0.1) == \
(0.0, 3.6363636363636362, FilledOrder(size=3.6363636363636362, price=11.0, fees=0.0, side=0))
assert buy_in_shares_nb(40, 0, 10, 5, 0, 40, 0) == \
(40.0, 0.0, None)
assert buy_in_shares_nb(40, 0, 10, 5, 0.1, 0, 0) == \
(0.0, 3.636363636363636, FilledOrder(size=3.636363636363636, price=10, fees=3.6363636363636402, side=0))
def test_sell_in_shares_nb(self):
from vectorbt.portfolio.nb import sell_in_shares_nb
assert sell_in_shares_nb(0, 5, 10, 4, 0, 0, 0) == \
(40, 1.0, FilledOrder(size=4, price=10, fees=0, side=1))
assert sell_in_shares_nb(0, 5, 10, 5, 0, 0, 0) == \
(50, 0.0, FilledOrder(size=5, price=10, fees=0, side=1))
assert sell_in_shares_nb(0, 5, 10, 6, 0, 0, 0) == \
(50, 0.0, FilledOrder(size=5, price=10, fees=0, side=1))
assert sell_in_shares_nb(0, 5, 10, 5, 0, 40, 0) == \
(10, 0.0, FilledOrder(size=5, price=10, fees=40, side=1))
assert sell_in_shares_nb(0, 5, 10, 5, 0, 50, 0) == \
(0, 5.0, None)
assert sell_in_shares_nb(0, 5, 10, 5, 0, 60, 0) == \
(0, 5.0, None)
assert sell_in_shares_nb(100, 5, 10, 5, 0, 60, 0) == \
(90, 0.0, FilledOrder(size=5, price=10, fees=60, side=1))
assert sell_in_shares_nb(0, 5, 10, 5, 0, 0, 0.1) == \
(45.0, 0.0, FilledOrder(size=5, price=9.0, fees=0.0, side=1))
assert sell_in_shares_nb(0, 5, 10, 5, 0.1, 0, 0) == \
(45.0, 0.0, FilledOrder(size=5, price=10, fees=5.0, side=1))
# ############# base.py ############# #
class TestPortfolio:
def test_from_signals(self):
portfolio = vbt.Portfolio.from_signals(price, entries['a'], exits['a'], size=1)
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 3, 1., 2., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_series_equal(
portfolio.shares,
pd.Series(np.array([1., 1., 1., 0., 0.]), index=price.index, name=('a', 'Price'))
)
pd.testing.assert_series_equal(
portfolio.cash,
pd.Series(np.array([99., 99., 99., 101., 101.]), index=price.index, name=('a', 'Price'))
)
portfolio2 = vbt.Portfolio.from_signals(price, entries, exits, size=1)
record_arrays_close(
portfolio2.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 3, 1., 2., 0., 1),
(1, 0, 1., 1., 0., 0), (1, 1, 1., 2., 0., 1),
(1, 2, 1., 3., 0., 0), (1, 3, 1., 2., 0., 1),
(1, 4, 1., 1., 0., 0), (2, 1, 1., 2., 0., 0),
(2, 2, 1., 3., 0., 1), (2, 3, 1., 2., 0., 0),
(2, 4, 1., 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio2.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1., 0., 1.],
[1., 1., 0.],
[0., 0., 1.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio2.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[99., 101., 98.],
[99., 98., 101.],
[101., 100., 99.],
[101., 99., 100.]
]), index=price.index, columns=entries.columns)
)
def test_from_signals_size(self):
portfolio = vbt.Portfolio.from_signals(price, entries, exits, size=[1, 2, np.inf])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 3, 1., 2., 0., 1),
(1, 0, 2., 1., 0., 0), (1, 1, 2., 2., 0., 1),
(1, 2, 2., 3., 0., 0), (1, 3, 2., 2., 0., 1),
(1, 4, 2., 1., 0., 0), (2, 1, 50., 2., 0., 0),
(2, 2, 50., 3., 0., 1), (2, 3, 75., 2., 0., 0),
(2, 4, 75., 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 2., 0.],
[1., 0., 50.],
[1., 2., 0.],
[0., 0., 75.],
[0., 2., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 98., 100.],
[99., 102., 0.],
[99., 96., 150.],
[101., 100., 0.],
[101., 98., 75.]
]), index=price.index, columns=entries.columns)
)
def test_from_signals_init_capital(self):
portfolio = vbt.Portfolio.from_signals(price, entries, exits, init_capital=[1, 10, 100])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 3, 1., 2., 0., 1),
(1, 0, 10., 1., 0., 0), (1, 1, 10., 2., 0., 1),
(1, 2, 6.66666667, 3., 0., 0), (1, 3, 6.66666667, 2., 0., 1),
(1, 4, 13.33333333, 1., 0., 0), (2, 1, 50., 2., 0., 0),
(2, 2, 50., 3., 0., 1), (2, 3, 75., 2., 0., 0),
(2, 4, 75., 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 10., 0.],
[1., 0., 50.],
[1., 6.66666667, 0.],
[0., 0., 75.],
[0., 13.33333333, 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[0., 0., 100.],
[0., 20., 0.],
[0., 0., 150.],
[2., 13.33333333, 0.],
[2., 0., 75.]
]), index=price.index, columns=entries.columns)
)
def test_from_signals_fees(self):
portfolio = vbt.Portfolio.from_signals(price, entries, exits, size=1, fees=[0., 0.01, np.inf])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0.e+00, 0), (0, 3, 1., 2., 0.e+00, 1),
(1, 0, 1., 1., 1.e-02, 0), (1, 1, 1., 2., 2.e-02, 1),
(1, 2, 1., 3., 3.e-02, 0), (1, 3, 1., 2., 2.e-02, 1),
(1, 4, 1., 1., 1.e-02, 0), (2, 1, 0., 2., 1.e+02, 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 98.99, 100.],
[99., 100.97, 0.],
[99., 97.94, 0.],
[101., 99.92, 0.],
[101., 98.91, 0.]
]), index=price.index, columns=entries.columns)
)
def test_from_signals_fixed_fees(self):
portfolio = vbt.Portfolio.from_signals(price, entries, exits, size=1, fixed_fees=[0., 1., np.inf])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 3, 1., 2., 0., 1),
(1, 0, 1., 1., 1., 0), (1, 1, 1., 2., 1., 1),
(1, 2, 1., 3., 1., 0), (1, 3, 1., 2., 1., 1),
(1, 4, 1., 1., 1., 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 98., 100.],
[99., 99., 100.],
[99., 95., 100.],
[101., 96., 100.],
[101., 94., 100.]
]), index=price.index, columns=entries.columns)
)
def test_from_signals_slippage(self):
portfolio = vbt.Portfolio.from_signals(price, entries, exits, size=1, slippage=[0., 0.01, np.inf])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 3, 1., 2., 0., 1),
(1, 0, 1., 1.01, 0., 0), (1, 1, 1., 1.98, 0., 1),
(1, 2, 1., 3.03, 0., 0), (1, 3, 1., 1.98, 0., 1),
(1, 4, 1., 1.01, 0., 0), (2, 1, 0., np.inf, 0., 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 98.99, 100.],
[99., 100.97, 0.],
[99., 97.94, 0.],
[101., 99.92, 0.],
[101., 98.91, 0.]
]), index=price.index, columns=entries.columns)
)
def test_from_signals_price(self):
portfolio = vbt.Portfolio.from_signals(
price, entries, exits, size=1, entry_price=price * 0.9, exit_price=price * 1.1)
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 0.9, 0., 0), (0, 3, 1., 2.2, 0., 1),
(1, 0, 1., 0.9, 0., 0), (1, 1, 1., 2.2, 0., 1),
(1, 2, 1., 2.7, 0., 0), (1, 3, 1., 2.2, 0., 1),
(1, 4, 1., 0.9, 0., 0), (2, 1, 1., 1.8, 0., 0),
(2, 2, 1., 3.3, 0., 1), (2, 3, 1., 1.8, 0., 0),
(2, 4, 1., 1.1, 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1., 0., 1.],
[1., 1., 0.],
[0., 0., 1.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99.1, 99.1, 100.],
[99.1, 101.3, 98.2],
[99.1, 98.6, 101.5],
[101.3, 100.8, 99.7],
[101.3, 99.9, 100.8]
]), index=price.index, columns=entries.columns)
)
def test_from_signals_size_type(self):
portfolio = vbt.Portfolio.from_signals(price, entries, exits, size=1, size_type=SizeType.Shares)
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 3, 1., 2., 0., 1),
(1, 0, 1., 1., 0., 0), (1, 1, 1., 2., 0., 1),
(1, 2, 1., 3., 0., 0), (1, 3, 1., 2., 0., 1),
(1, 4, 1., 1., 0., 0), (2, 1, 1., 2., 0., 0),
(2, 2, 1., 3., 0., 1), (2, 3, 1., 2., 0., 0),
(2, 4, 1., 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1., 0., 1.],
[1., 1., 0.],
[0., 0., 1.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[99., 101., 98.],
[99., 98., 101.],
[101., 100., 99.],
[101., 99., 100.]
]), index=price.index, columns=entries.columns)
)
portfolio2 = vbt.Portfolio.from_signals(price, entries, exits, size=1, size_type=SizeType.Cash)
record_arrays_close(
portfolio2.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 3, 1., 2., 0., 1),
(1, 0, 1., 1., 0., 0), (1, 1, 1., 2., 0., 1),
(1, 2, 0.33333333, 3., 0., 0), (1, 3, 0.33333333, 2., 0., 1),
(1, 4, 1., 1., 0., 0), (2, 1, 0.5, 2., 0., 0),
(2, 2, 0.5, 3., 0., 1), (2, 3, 0.5, 2., 0., 0),
(2, 4, 0.5, 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio2.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1., 0., 0.5],
[1., 0.33333333, 0.],
[0., 0., 0.5],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio2.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[99., 101., 99.],
[99., 100., 100.5],
[101., 100.66666667, 99.5],
[101., 99.66666667, 100.]
]), index=price.index, columns=entries.columns)
)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_signals(price, entries, exits, size=1, size_type=SizeType.TargetShares)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_signals(price, entries, exits, size=1, size_type=SizeType.TargetCash)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_signals(price, entries, exits, size=1, size_type=SizeType.TargetValue)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_signals(price, entries, exits, size=1, size_type=SizeType.TargetPercent)
def test_from_signals_accumulate(self):
portfolio = vbt.Portfolio.from_signals(
price, entries, exits, size=1,
size_type=SizeType.Shares,
accumulate=True,
accumulate_exit_mode=AccumulateExitMode.Close)
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 1., 2., 0., 0),
(0, 3, 2., 2., 0., 1), (1, 0, 1., 1., 0., 0),
(1, 1, 1., 2., 0., 1), (1, 2, 1., 3., 0., 0),
(1, 3, 1., 2., 0., 1), (1, 4, 1., 1., 0., 0),
(2, 1, 1., 2., 0., 0), (2, 2, 1., 3., 0., 1),
(2, 3, 1., 2., 0., 0), (2, 4, 1., 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[2., 0., 1.],
[2., 1., 0.],
[0., 0., 1.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[97., 101., 98.],
[97., 98., 101.],
[101., 100., 99.],
[101., 99., 100.]
]), index=price.index, columns=entries.columns)
)
portfolio2 = vbt.Portfolio.from_signals(
price, entries, exits, size=1,
size_type=SizeType.Cash,
accumulate=True,
accumulate_exit_mode=AccumulateExitMode.Close)
record_arrays_close(
portfolio2.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 0.5, 2., 0., 0),
(0, 3, 1.5, 2., 0., 1), (1, 0, 1., 1., 0., 0),
(1, 1, 1., 2., 0., 1), (1, 2, 0.33333333, 3., 0., 0),
(1, 3, 0.33333333, 2., 0., 1), (1, 4, 1., 1., 0., 0),
(2, 1, 0.5, 2., 0., 0), (2, 2, 0.5, 3., 0., 1),
(2, 3, 0.5, 2., 0., 0), (2, 4, 0.5, 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio2.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1.5, 0., 0.5],
[1.5, 0.33333333, 0.],
[0., 0., 0.5],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio2.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[98., 101., 99.],
[98., 100., 100.5],
[101., 100.66666667, 99.5],
[101., 99.66666667, 100.]
]), index=price.index, columns=entries.columns)
)
portfolio3 = vbt.Portfolio.from_signals(
price, entries, exits, size=1,
size_type=SizeType.Shares,
accumulate=True,
accumulate_exit_mode=AccumulateExitMode.Reduce)
record_arrays_close(
portfolio3.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 1., 2., 0., 0),
(0, 3, 1., 2., 0., 1), (0, 4, 1., 1., 0., 1),
(1, 0, 1., 1., 0., 0), (1, 1, 1., 2., 0., 1),
(1, 2, 1., 3., 0., 0), (1, 3, 1., 2., 0., 1),
(1, 4, 1., 1., 0., 0), (2, 1, 1., 2., 0., 0),
(2, 2, 1., 3., 0., 1), (2, 3, 1., 2., 0., 0),
(2, 4, 1., 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio3.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[2., 0., 1.],
[2., 1., 0.],
[1., 0., 1.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio3.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[97., 101., 98.],
[97., 98., 101.],
[99., 100., 99.],
[100., 99., 100.]
]), index=price.index, columns=entries.columns)
)
portfolio4 = vbt.Portfolio.from_signals(
price, entries, exits, size=1,
size_type=SizeType.Cash,
accumulate=True,
accumulate_exit_mode=AccumulateExitMode.Reduce)
record_arrays_close(
portfolio4.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 0.5, 2., 0., 0),
(0, 3, 0.5, 2., 0., 1), (0, 4, 1., 1., 0., 1),
(1, 0, 1., 1., 0., 0), (1, 1, 0.5, 2., 0., 1),
(1, 2, 0.33333333, 3., 0., 0), (1, 3, 0.5, 2., 0., 1),
(1, 4, 1., 1., 0., 0), (2, 1, 0.5, 2., 0., 0),
(2, 2, 0.33333333, 3., 0., 1), (2, 3, 0.5, 2., 0., 0),
(2, 4, 0.66666667, 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio4.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1.5, 0.5, 0.5],
[1.5, 0.83333333, 0.16666667],
[1., 0.33333333, 0.66666667],
[0., 1.33333333, 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio4.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[98., 100., 99.],
[98., 99., 100.],
[99., 100., 99.],
[100., 99., 99.66666667]
]), index=price.index, columns=entries.columns)
)
def test_from_signals_conflicts(self):
portfolio = vbt.Portfolio.from_signals(
price, entries, exits, size=1,
size_type=SizeType.Shares,
accumulate=True,
conflict_mode=ConflictMode.Exit)
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 1., 2., 0., 0),
(0, 2, 2., 3., 0., 1), (1, 0, 1., 1., 0., 0),
(1, 1, 1., 2., 0., 1), (1, 2, 1., 3., 0., 0),
(1, 3, 1., 2., 0., 1), (1, 4, 1., 1., 0., 0),
(2, 1, 1., 2., 0., 0), (2, 2, 1., 3., 0., 1),
(2, 3, 1., 2., 0., 0), (2, 4, 1., 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[2., 0., 1.],
[0., 1., 0.],
[0., 0., 1.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[97., 101., 98.],
[103., 98., 101.],
[103., 100., 99.],
[103., 99., 100.]
]), index=price.index, columns=entries.columns)
)
portfolio2 = vbt.Portfolio.from_signals(
price, entries, exits, size=1,
size_type=SizeType.Cash,
accumulate=True,
conflict_mode=ConflictMode.Exit)
record_arrays_close(
portfolio2.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 0.5, 2., 0., 0),
(0, 2, 1.5, 3., 0., 1), (1, 0, 1., 1., 0., 0),
(1, 1, 1., 2., 0., 1), (1, 2, 0.33333333, 3., 0., 0),
(1, 3, 0.33333333, 2., 0., 1), (1, 4, 1., 1., 0., 0),
(2, 1, 0.5, 2., 0., 0), (2, 2, 0.5, 3., 0., 1),
(2, 3, 0.5, 2., 0., 0), (2, 4, 0.5, 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio2.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1.5, 0., 0.5],
[0., 0.33333333, 0.],
[0., 0., 0.5],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio2.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[98., 101., 99.],
[102.5, 100., 100.5],
[102.5, 100.66666667, 99.5],
[102.5, 99.66666667, 100.]
]), index=price.index, columns=entries.columns)
)
portfolio3 = vbt.Portfolio.from_signals(
price, entries, exits, size=1,
size_type=SizeType.Shares,
accumulate=True,
conflict_mode=ConflictMode.ExitAndEntry)
record_arrays_close(
portfolio3.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 1., 2., 0., 0),
(0, 2, 1., 3., 0., 1), (0, 3, 1., 2., 0., 1),
(1, 0, 1., 1., 0., 0), (1, 1, 1., 2., 0., 1),
(1, 2, 1., 3., 0., 0), (1, 3, 1., 2., 0., 1),
(1, 4, 1., 1., 0., 0), (2, 1, 1., 2., 0., 0),
(2, 2, 1., 3., 0., 1), (2, 3, 1., 2., 0., 0),
(2, 4, 1., 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio3.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[2., 0., 1.],
[1., 1., 0.],
[0., 0., 1.],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio3.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[97., 101., 98.],
[100., 98., 101.],
[102., 100., 99.],
[102., 99., 100.]
]), index=price.index, columns=entries.columns)
)
portfolio4 = vbt.Portfolio.from_signals(
price, entries, exits, size=1,
size_type=SizeType.Cash,
accumulate=True,
conflict_mode=ConflictMode.ExitAndEntry)
record_arrays_close(
portfolio4.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 0.5, 2., 0., 0),
(0, 2, 1.16666667, 3., 0., 1), (0, 3, 0.33333333, 2., 0., 1),
(1, 0, 1., 1., 0., 0), (1, 1, 1., 2., 0., 1),
(1, 2, 0.33333333, 3., 0., 0), (1, 3, 0.33333333, 2., 0., 1),
(1, 4, 1., 1., 0., 0), (2, 1, 0.5, 2., 0., 0),
(2, 2, 0.5, 3., 0., 1), (2, 3, 0.5, 2., 0., 0),
(2, 4, 0.5, 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio4.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1.5, 0., 0.5],
[0.33333333, 0.33333333, 0.],
[0., 0., 0.5],
[0., 1., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio4.cash,
pd.DataFrame(np.array([
[99., 99., 100.],
[98., 101., 99.],
[101.5, 100., 100.5],
[102.16666667, 100.66666667, 99.5],
[102.16666667, 99.66666667, 100.]
]), index=price.index, columns=entries.columns)
)
def test_from_orders(self):
portfolio = vbt.Portfolio.from_orders(price, order_size['a'])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 0.1, 2., 0., 0),
(0, 2, 1., 3., 0., 1), (0, 3, 0.1, 2., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_series_equal(
portfolio.shares,
pd.Series(np.array([1., 1.1, 0.1, 0., 0.]), index=price.index, name=('a', 'Price'))
)
pd.testing.assert_series_equal(
portfolio.cash,
pd.Series(np.array([99., 98.8, 101.8, 102., 102.]), index=price.index, name=('a', 'Price'))
)
portfolio2 = vbt.Portfolio.from_orders(price, order_size)
record_arrays_close(
portfolio2.orders.records_arr,
np.array([
(0, 0, 1.00000000e+00, 1., 0., 0),
(0, 1, 1.00000000e-01, 2., 0., 0),
(0, 2, 1.00000000e+00, 3., 0., 1),
(0, 3, 1.00000000e-01, 2., 0., 1),
(1, 0, 1.00000000e+00, 1., 0., 0),
(1, 1, 1.00000000e+00, 2., 0., 0),
(1, 2, 1.00000000e+00, 3., 0., 0),
(1, 3, 1.00000000e+00, 2., 0., 0),
(1, 4, 4.00000000e+00, 1., 0., 1),
(2, 0, 1.00000000e+02, 1., 0., 0),
(2, 1, 1.00000000e+02, 2., 0., 1),
(2, 2, 6.66666667e+01, 3., 0., 0),
(2, 3, 6.66666667e+01, 2., 0., 1),
(2, 4, 1.33333333e+02, 1., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio2.shares,
pd.DataFrame(np.array([
[1.00000000e+00, 1.00000000e+00, 1.00000000e+02],
[1.10000000e+00, 2.00000000e+00, 0.00000000e+00],
[1.00000000e-01, 3.00000000e+00, 6.66666667e+01],
[0.00000000e+00, 4.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 1.33333333e+02]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio2.cash,
pd.DataFrame(np.array([
[99., 99., 0.],
[98.8, 97., 200.],
[101.8, 94., 0.],
[102., 92., 133.33333333],
[102., 96., 0.]
]), index=price.index, columns=entries.columns)
)
def test_from_orders_init_capital(self):
portfolio = vbt.Portfolio.from_orders(price, order_size, init_capital=[1, 10, 100])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 2, 1., 3., 0., 1),
(1, 0, 1., 1., 0., 0), (1, 1, 1., 2., 0., 0),
(1, 2, 1., 3., 0., 0), (1, 3, 1., 2., 0., 0),
(1, 4, 4., 1., 0., 1), (2, 0, 100., 1., 0., 0),
(2, 1, 100., 2., 0., 1), (2, 2, 66.66666667, 3., 0., 0),
(2, 3, 66.66666667, 2., 0., 1), (2, 4, 133.33333333, 1., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 100.],
[1., 2., 0.],
[0., 3., 66.66666667],
[0., 4., 0.],
[0., 0., 133.33333333]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[0., 9., 0.],
[0., 7., 200.],
[3., 4., 0.],
[3., 2., 133.33333333],
[3., 6., 0.]
]), index=price.index, columns=entries.columns)
)
def test_from_orders_fees(self):
portfolio = vbt.Portfolio.from_orders(price, order_size, fees=[0., 0.01, np.inf])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0.e+00, 0), (0, 1, 0.1, 2., 0.e+00, 0),
(0, 2, 1., 3., 0.e+00, 1), (0, 3, 0.1, 2., 0.e+00, 1),
(1, 0, 1., 1., 1.e-02, 0), (1, 1, 1., 2., 2.e-02, 0),
(1, 2, 1., 3., 3.e-02, 0), (1, 3, 1., 2., 2.e-02, 0),
(1, 4, 4., 1., 4.e-02, 1), (2, 0, 0., 1., 1.e+02, 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1.1, 2., 0.],
[0.1, 3., 0.],
[0., 4., 0.],
[0., 0., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 98.99, 0.],
[98.8, 96.97, 0.],
[101.8, 93.94, 0.],
[102., 91.92, 0.],
[102., 95.88, 0.]
]), index=price.index, columns=entries.columns)
)
def test_from_orders_fixed_fees(self):
portfolio = vbt.Portfolio.from_orders(price, order_size, fixed_fees=[0., 1., np.inf])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 0.1, 2., 0., 0),
(0, 2, 1., 3., 0., 1), (0, 3, 0.1, 2., 0., 1),
(1, 0, 1., 1., 1., 0), (1, 1, 1., 2., 1., 0),
(1, 2, 1., 3., 1., 0), (1, 3, 1., 2., 1., 0),
(1, 4, 4., 1., 1., 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1.1, 2., 0.],
[0.1, 3., 0.],
[0., 4., 0.],
[0., 0., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 98., 100.],
[98.8, 95., 100.],
[101.8, 91., 100.],
[102., 88., 100.],
[102., 91., 100.]
]), index=price.index, columns=entries.columns)
)
def test_from_orders_slippage(self):
portfolio = vbt.Portfolio.from_orders(price, order_size, slippage=[0., 0.01, np.inf])
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 0.1, 2., 0., 0),
(0, 2, 1., 3., 0., 1), (0, 3, 0.1, 2., 0., 1),
(1, 0, 1., 1.01, 0., 0), (1, 1, 1., 2.02, 0., 0),
(1, 2, 1., 3.03, 0., 0), (1, 3, 1., 2.02, 0., 0),
(1, 4, 4., 0.99, 0., 1), (2, 0, 0., np.inf, 0., 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1., 1., 0.],
[1.1, 2., 0.],
[0.1, 3., 0.],
[0., 4., 0.],
[0., 0., 0.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99., 98.99, 0.],
[98.8, 96.97, 0.],
[101.8, 93.94, 0.],
[102., 91.92, 0.],
[102., 95.88, 0.]
]), index=price.index, columns=entries.columns)
)
def test_from_orders_price(self):
portfolio = vbt.Portfolio.from_orders(price, order_size, order_price=0.9 * price)
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1.00000000e+00, 0.9, 0., 0),
(0, 1, 1.00000000e-01, 1.8, 0., 0),
(0, 2, 1.00000000e+00, 2.7, 0., 1),
(0, 3, 1.00000000e-01, 1.8, 0., 1),
(1, 0, 1.00000000e+00, 0.9, 0., 0),
(1, 1, 1.00000000e+00, 1.8, 0., 0),
(1, 2, 1.00000000e+00, 2.7, 0., 0),
(1, 3, 1.00000000e+00, 1.8, 0., 0),
(1, 4, 4.00000000e+00, 0.9, 0., 1),
(2, 0, 1.11111111e+02, 0.9, 0., 0),
(2, 1, 1.11111111e+02, 1.8, 0., 1),
(2, 2, 7.40740741e+01, 2.7, 0., 0),
(2, 3, 7.40740741e+01, 1.8, 0., 1),
(2, 4, 1.48148148e+02, 0.9, 0., 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[1.00000000e+00, 1.00000000e+00, 1.11111111e+02],
[1.10000000e+00, 2.00000000e+00, 0.00000000e+00],
[1.00000000e-01, 3.00000000e+00, 7.40740741e+01],
[0.00000000e+00, 4.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 1.48148148e+02]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[99.1, 99.1, 0.],
[98.92, 97.3, 200.],
[101.62, 94.6, 0.],
[101.8, 92.8, 133.33333333],
[101.8, 96.4, 0.]
]), index=price.index, columns=entries.columns)
)
def test_from_orders_size_type(self):
portfolio = vbt.Portfolio.from_orders(price, [1, 2, 3, 4, 5], size_type=SizeType.Shares)
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 2., 2., 0., 0), (0, 2, 3., 3., 0., 0),
(0, 3, 4., 2., 0., 0), (0, 4, 5., 1., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_series_equal(
portfolio.shares,
pd.Series(np.array([ 1., 3., 6., 10., 15.]), index=price.index, name=price.name)
)
pd.testing.assert_series_equal(
portfolio.cash,
pd.Series(np.array([99., 95., 86., 78., 73.]), index=price.index, name=price.name)
)
portfolio2 = vbt.Portfolio.from_orders(price, [1, 2, 3, 4, 5], size_type=SizeType.TargetShares)
record_arrays_close(
portfolio2.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 1., 2., 0., 0),
(0, 2, 1., 3., 0., 0), (0, 3, 1., 2., 0., 0),
(0, 4, 1., 1., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_series_equal(
portfolio2.shares,
pd.Series(np.array([1., 2., 3., 4., 5.]), index=price.index, name=price.name)
)
pd.testing.assert_series_equal(
portfolio2.cash,
pd.Series(np.array([99., 97., 94., 92., 91.]), index=price.index, name=price.name)
)
portfolio3 = vbt.Portfolio.from_orders(price, [1, 2, 3, 4, 5], size_type=SizeType.Cash)
record_arrays_close(
portfolio3.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 1, 1., 2., 0., 0),
(0, 2, 1., 3., 0., 0), (0, 3, 2., 2., 0., 0),
(0, 4, 5., 1., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_series_equal(
portfolio3.shares,
pd.Series(np.array([ 1., 2., 3., 5., 10.]), index=price.index, name=price.name)
)
pd.testing.assert_series_equal(
portfolio3.cash,
pd.Series(np.array([99., 97., 94., 90., 85.]), index=price.index, name=price.name)
)
portfolio4 = vbt.Portfolio.from_orders(price, [1, 2, 3, 4, 5], size_type=SizeType.TargetCash)
record_arrays_close(
portfolio4.orders.records_arr,
np.array([
(0, 0, 99., 1., 0., 0), (0, 1, 0.5, 2., 0., 1),
(0, 2, 0.33333333, 3., 0., 1), (0, 3, 0.5, 2., 0., 1),
(0, 4, 1., 1., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_series_equal(
portfolio4.shares,
pd.Series(np.array([99., 98.5, 98.16666667, 97.66666667, 96.66666667]), index=price.index, name=price.name)
)
pd.testing.assert_series_equal(
portfolio4.cash,
pd.Series(np.array([1., 2., 3., 4., 5.]), index=price.index, name=price.name)
)
portfolio5 = vbt.Portfolio.from_orders(price, [1, 2, 3, 4, 5], size_type=SizeType.TargetValue)
record_arrays_close(
portfolio5.orders.records_arr,
np.array([
(0, 0, 1., 1., 0., 0), (0, 3, 1., 2., 0., 0),
(0, 4, 3., 1., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_series_equal(
portfolio5.shares,
pd.Series(np.array([1., 1., 1., 2., 5.]), index=price.index, name=price.name)
)
pd.testing.assert_series_equal(
portfolio5.cash,
pd.Series(np.array([99., 99., 99., 97., 94.]), index=price.index, name=price.name)
)
portfolio6 = vbt.Portfolio.from_orders(price, [0.1, 0.2, 0.3, 0.4, 0.5], size_type=SizeType.TargetPercent)
record_arrays_close(
portfolio6.orders.records_arr,
np.array([
(0, 0, 10., 1., 0., 0), (0, 1, 1., 2., 0., 0),
(0, 2, 1.1, 3., 0., 0), (0, 3, 9.68, 2., 0., 0),
(0, 4, 21.78, 1., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_series_equal(
portfolio6.shares,
pd.Series(np.array([10., 11., 12.1, 21.78, 43.56]), index=price.index, name=price.name)
)
pd.testing.assert_series_equal(
portfolio6.cash,
pd.Series(np.array([90., 88., 84.7, 65.34, 43.56]), index=price.index, name=price.name)
)
def test_from_order_func(self):
portfolio = vbt.Portfolio.from_order_func(
price,
order_func_nb,
price.values[:, None],
np.full(price.shape, 0.01)[:, None],
np.full(price.shape, 1)[:, None],
np.full(price.shape, 0.01)[:, None]
)
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(0, 0, 1., 1.01, 1.0101, 0), (0, 1, 1., 1.98, 1.0198, 1),
(0, 2, 1., 3.03, 1.0303, 0), (0, 3, 1., 1.98, 1.0198, 1),
(0, 4, 1., 1.01, 1.0101, 0)
], dtype=order_dt)
)
pd.testing.assert_series_equal(
portfolio.shares,
pd.Series(np.array([1., 0., 1., 0., 1.]), index=price.index, name=price.name)
)
pd.testing.assert_series_equal(
portfolio.cash,
pd.Series(np.array([97.9799, 98.9401, 94.8798, 95.84, 93.8199]), index=price.index, name=price.name)
)
portfolio2 = vbt.Portfolio.from_order_func(
price.vbt.tile(3, keys=entries.columns),
order_func_nb,
price.vbt.tile(3).values,
np.full((price.shape[0], 3), 0.01),
np.full((price.shape[0], 3), 1),
np.full((price.shape[0], 3), 0.01)
)
record_arrays_close(
portfolio2.orders.records_arr,
np.array([
(0, 0, 1., 1.01, 1.0101, 0), (0, 1, 1., 1.98, 1.0198, 1),
(0, 2, 1., 3.03, 1.0303, 0), (0, 3, 1., 1.98, 1.0198, 1),
(0, 4, 1., 1.01, 1.0101, 0), (1, 0, 2., 1.01, 1.0202, 0),
(1, 1, 2., 1.98, 1.0396, 1), (1, 2, 2., 3.03, 1.0606, 0),
(1, 3, 2., 1.98, 1.0396, 1), (1, 4, 2., 1.01, 1.0202, 0),
(2, 0, 3., 1.01, 1.0303, 0), (2, 1, 3., 1.98, 1.0594, 1),
(2, 2, 3., 3.03, 1.0909, 0), (2, 3, 3., 1.98, 1.0594, 1),
(2, 4, 3., 1.01, 1.0303, 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio2.shares,
pd.DataFrame(np.array([
[1., 2., 3.],
[0., 0., 0.],
[1., 2., 3.],
[0., 0., 0.],
[1., 2., 3.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio2.cash,
pd.DataFrame(np.array([
[97.9799, 96.9598, 95.9397],
[98.9401, 99.8802, 100.8203],
[94.8798, 92.7596, 90.6394],
[95.84, 95.68, 95.52],
[93.8199, 92.6398, 91.4597]
]), index=price.index, columns=entries.columns)
)
portfolio3 = vbt.Portfolio.from_order_func(
price.vbt.tile(3, keys=entries.columns),
order_func_nb,
price.vbt.tile(3).values,
np.full((price.shape[0], 3), 0.01),
np.full((price.shape[0], 3), 1),
np.full((price.shape[0], 3), 0.01),
row_wise=True
)
record_arrays_close(
portfolio2.orders.records_arr,
portfolio3.orders.records_arr
)
pd.testing.assert_frame_equal(
portfolio2.shares,
portfolio3.shares
)
pd.testing.assert_frame_equal(
portfolio2.cash,
portfolio3.cash
)
@njit
def row_prep_func_nb(rc, price, fees, fixed_fees, slippage):
np.random.seed(rc.i)
w = np.random.uniform(0, 1, size=rc.target_shape[1])
return (w / np.sum(w),)
@njit
def order_func2_nb(oc, w, price, fees, fixed_fees, slippage):
current_value = oc.run_cash / price[oc.i, oc.col] + oc.run_shares
target_size = w[oc.col] * current_value
return vbt.portfolio.nb.Order(target_size - oc.run_shares, SizeType.Shares,
price[oc.i, oc.col], fees[oc.i, oc.col], fixed_fees[oc.i, oc.col],
slippage[oc.i, oc.col])
portfolio4 = vbt.Portfolio.from_order_func(
price.vbt.tile(3, keys=entries.columns),
order_func2_nb,
price.vbt.tile(3).values,
np.full((price.shape[0], 3), 0.01),
np.full((price.shape[0], 3), 1),
np.full((price.shape[0], 3), 0.01),
row_wise=True,
row_prep_func_nb=row_prep_func_nb
)
record_arrays_close(
portfolio4.orders.records_arr,
np.array([
(0, 0, 29.39915509, 1.01, 1.29693147, 0),
(0, 1, 5.97028539, 1.98, 1.11821165, 1),
(0, 2, 1.87882685, 2.97, 1.05580116, 1),
(0, 3, 1.07701246, 2.02, 1.02175565, 0),
(0, 4, 17.68302427, 1.01, 1.17859855, 0),
(1, 0, 38.31167227, 1.01, 1.38694789, 0),
(1, 1, 4.92245855, 2.02, 1.09943366, 0),
(1, 2, 41.70851928, 2.97, 2.23874302, 1),
(1, 3, 38.12586746, 2.02, 1.77014252, 0),
(1, 4, 10.7427999, 0.99, 1.10635372, 1),
(2, 0, 32.28917264, 1.01, 1.32612064, 0),
(2, 1, 32.28260453, 1.98, 1.63919557, 1),
(2, 2, 23.2426913, 3.03, 1.70425355, 0),
(2, 3, 13.60989657, 1.98, 1.26947595, 1),
(2, 4, 26.15949742, 1.01, 1.26421092, 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio4.shares,
pd.DataFrame(np.array([
[2.93991551e+01, 3.83116723e+01, 3.22891726e+01],
[2.34288697e+01, 4.32341308e+01, 6.56811360e-03],
[2.15500428e+01, 1.52561154e+00, 2.32492594e+01],
[2.26270553e+01, 3.96514790e+01, 9.63936284e+00],
[4.03100796e+01, 2.89086791e+01, 3.57988603e+01]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio4.cash,
pd.DataFrame(np.array([
[69.00992189, 59.91826312, 66.06181499],
[79.71287532, 48.87546318, 128.34217638],
[84.23718992, 170.51102243, 56.2125682],
[81.0398691, 91.72662765, 81.89068746],
[62.00141604, 101.25564583, 54.20538413]
]), index=price.index, columns=entries.columns)
)
def test_from_order_func_init_capital(self):
portfolio = vbt.Portfolio.from_order_func(
price.vbt.tile(3, keys=entries.columns),
order_func_nb,
price.vbt.tile(3).values,
np.full((price.shape[0], 3), 0.01),
np.full((price.shape[0], 3), 1),
np.full((price.shape[0], 3), 0.01),
init_capital=[1, 10, 100]
)
record_arrays_close(
portfolio.orders.records_arr,
np.array([
(1, 0, 2., 1.01, 1.0202, 0), (1, 1, 2., 1.98, 1.0396, 1),
(1, 2, 2., 3.03, 1.0606, 0), (1, 3, 2., 1.98, 1.0396, 1),
(1, 4, 2., 1.01, 1.0202, 0), (2, 0, 3., 1.01, 1.0303, 0),
(2, 1, 3., 1.98, 1.0594, 1), (2, 2, 3., 3.03, 1.0909, 0),
(2, 3, 3., 1.98, 1.0594, 1), (2, 4, 3., 1.01, 1.0303, 0)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(
portfolio.shares,
pd.DataFrame(np.array([
[0., 2., 3.],
[0., 0., 0.],
[0., 2., 3.],
[0., 0., 0.],
[0., 2., 3.]
]), index=price.index, columns=entries.columns)
)
pd.testing.assert_frame_equal(
portfolio.cash,
pd.DataFrame(np.array([
[1., 6.9598, 95.9397],
[1., 9.8802, 100.8203],
[1., 2.7596, 90.6394],
[1., 5.68, 95.52],
[1., 2.6398, 91.4597]
]), index=price.index, columns=entries.columns)
)
def test_single_params(self):
portfolio = vbt.Portfolio.from_signals(
price, entries, exits,
init_capital=init_capital[0],
freq='1 days',
year_freq='252 days',
levy_alpha=levy_alpha[0],
risk_free=risk_free[0],
required_return=required_return[0],
cutoff=cutoff[0],
factor_returns=factor_returns['a']
)
pd.testing.assert_series_equal(
portfolio.init_capital,
pd.Series(np.full(3, init_capital[0]), index=entries.columns)
)
assert portfolio.freq == day_dt
assert portfolio.year_freq == 252 * day_dt
assert portfolio.levy_alpha == levy_alpha[0]
assert portfolio.risk_free == risk_free[0]
assert portfolio.required_return == required_return[0]
assert portfolio.cutoff == cutoff[0]
pd.testing.assert_series_equal(portfolio.factor_returns, factor_returns['a'])
# indexing
assert portfolio['a'].init_capital == init_capital[0]
assert portfolio.freq == day_dt
assert portfolio.year_freq == 252 * day_dt
assert portfolio['a'].levy_alpha == levy_alpha[0]
assert portfolio['a'].risk_free == risk_free[0]
assert portfolio['a'].required_return == required_return[0]
assert portfolio['a'].cutoff == cutoff[0]
pd.testing.assert_series_equal(portfolio['a'].factor_returns, factor_returns['a'])
def test_multiple_params(self):
pd.testing.assert_series_equal(
test_portfolio.init_capital,
pd.Series(init_capital, index=entries.columns)
)
assert test_portfolio.freq == day_dt
assert test_portfolio.year_freq == 252 * day_dt
np.testing.assert_array_equal(test_portfolio.levy_alpha, np.array(levy_alpha))
np.testing.assert_array_equal(test_portfolio.risk_free, np.array(risk_free))
np.testing.assert_array_equal(test_portfolio.required_return, np.array(required_return))
np.testing.assert_array_equal(test_portfolio.cutoff, np.array(cutoff))
pd.testing.assert_frame_equal(test_portfolio.factor_returns, factor_returns)
# indexing
assert test_portfolio['a'].init_capital == init_capital[0]
assert test_portfolio['a'].freq == day_dt
assert test_portfolio['a'].year_freq == 252 * day_dt
assert test_portfolio['a'].levy_alpha == levy_alpha[0]
assert test_portfolio['a'].risk_free == risk_free[0]
assert test_portfolio['a'].required_return == required_return[0]
assert test_portfolio['a'].cutoff == cutoff[0]
pd.testing.assert_series_equal(test_portfolio['a'].factor_returns, factor_returns['a'])
def test_indexing(self):
pd.testing.assert_series_equal(
test_portfolio.iloc[:, 0].main_price,
test_portfolio.main_price.iloc[:, 0]
)
pd.testing.assert_series_equal(
test_portfolio.loc[:, 'a'].main_price,
test_portfolio.main_price.loc[:, 'a']
)
pd.testing.assert_series_equal(
test_portfolio['a'].main_price,
test_portfolio.main_price['a']
)
pd.testing.assert_frame_equal(
test_portfolio.iloc[:, [0, 1]].main_price,
test_portfolio.main_price.iloc[:, [0, 1]]
)
pd.testing.assert_frame_equal(
test_portfolio.loc[:, ['a', 'b']].main_price,
test_portfolio.main_price.loc[:, ['a', 'b']]
)
pd.testing.assert_frame_equal(
test_portfolio[['a', 'b']].main_price,
test_portfolio.main_price[['a', 'b']]
)
with pytest.raises(Exception) as e_info:
_ = test_portfolio.iloc[::2, :] # changing time not supported
_ = test_portfolio.iloc[np.arange(test_portfolio.wrapper.shape[0]), :] # won't change time
def test_records(self):
# orders
record_arrays_close(
test_portfolio.orders.records_arr,
np.array([
(0, 0, 99.00990099, 1., 0.99009901, 0),
(0, 3, 99.00990099, 2., 1.98019802, 1),
(1, 0, 198.01980198, 1., 1.98019802, 0),
(1, 1, 198.01980198, 2., 3.96039604, 1),
(1, 2, 129.39907852, 3., 3.88197236, 0),
(1, 3, 129.39907852, 2., 2.58798157, 1),
(1, 4, 253.67344106, 1., 2.53673441, 0),
(2, 1, 148.51485149, 2., 2.97029703, 0),
(2, 2, 148.51485149, 3., 4.45544554, 1),
(2, 3, 218.36094501, 2., 4.3672189, 0),
(2, 4, 218.36094501, 1., 2.18360945, 1)
], dtype=order_dt)
)
pd.testing.assert_frame_equal(test_portfolio.orders.main_price, test_portfolio.main_price)
assert test_portfolio.orders.wrapper.freq == day_dt
# trades
record_arrays_close(
test_portfolio.trades.records_arr,
np.array([
(0, 99.00990099, 0, 1., 0.99009901, 3, 2., 1.98019802, 96.03960396, 0.96039604, 1, 0),
(1, 198.01980198, 0, 1., 1.98019802, 1, 2., 3.96039604, 192.07920792, 0.96039604, 1, 1),
(1, 129.39907852, 2, 3., 3.88197236, 3, 2., 2.58798157, -135.86903245, -0.34653465, 1, 2),
(1, 253.67344106, 4, 1., 2.53673441, 4, 1., 0., -2.53673441, -0.00990099, 0, 3),
(2, 148.51485149, 1, 2., 2.97029703, 2, 3., 4.45544554, 141.08910891, 0.47029703, 1, 4),
(2, 218.36094501, 3, 2., 4.3672189, 4, 1., 2.18360945, -224.91177336, -0.50990099, 1, 5)
], dtype=trade_dt)
)
pd.testing.assert_frame_equal(test_portfolio.trades.main_price, test_portfolio.main_price)
assert test_portfolio.trades.wrapper.freq == day_dt
# positions
record_arrays_close(
test_portfolio.positions.records_arr,
np.array([
(0, 99.00990099, 0, 1., 0.99009901, 3, 2., 1.98019802, 96.03960396, 0.96039604, 1),
(1, 198.01980198, 0, 1., 1.98019802, 1, 2., 3.96039604, 192.07920792, 0.96039604, 1),
(1, 129.39907852, 2, 3., 3.88197236, 3, 2., 2.58798157, -135.86903245, -0.34653465, 1),
(1, 253.67344106, 4, 1., 2.53673441, 4, 1., 0., -2.53673441, -0.00990099, 0),
(2, 148.51485149, 1, 2., 2.97029703, 2, 3., 4.45544554, 141.08910891, 0.47029703, 1),
(2, 218.36094501, 3, 2., 4.3672189, 4, 1., 2.18360945, -224.91177336, -0.50990099, 1)
], dtype=position_dt)
)
| pd.testing.assert_frame_equal(test_portfolio.positions.main_price, test_portfolio.main_price) | pandas.testing.assert_frame_equal |
import joblib
import pandas as pd
from hgtk import text, letter, checker
from .const import ALPHABET_LIST, CHOSUNG_LIST, JONGSUNG_LIST, JUNGSUNG_LIST, NUMBER_LIST, SPECIAL_CHARACTERS_LIST
CHOSUNG = 3
JUNGSUNG = 2
JONGSUNG = 1
class ModelByWord:
def __init__(self):
text.decompose = self.__decompose
self._model = joblib.load("./dataset/model_sgd.pkl")
self._word_list = [CHOSUNG_LIST, JUNGSUNG_LIST, JONGSUNG_LIST,
SPECIAL_CHARACTERS_LIST, NUMBER_LIST, ALPHABET_LIST]
@staticmethod
def __decompose(text, latin_filter=True, compose_code=u" "):
result = u""
for c in list(text):
if checker.is_hangul(c):
if checker.is_jamo(c):
result = result + c + compose_code
else:
result = result + "".join(letter.decompose(c)) + compose_code
else:
if latin_filter: # 한글 외엔 Latin1 범위까지만 포함 (한글+영어)
if checker.is_latin1(c):
result = result + c + compose_code
else:
result = result + c + compose_code
return result
def _preprocess(self, comment):
comment_decompose = self.__decompose(comment)
removed_space_word = list(filter(lambda word: word != ' ', comment_decompose.split(' ')))
split_word = list(filter(lambda element: element != '', removed_space_word))
df_result = self._word_store_in_dataframe(split_word)
return df_result
def predict(self, comment):
data = self._preprocess(comment)
predict = self._model.predict(data)
return predict
def _word_store_in_dataframe(self, split_word):
df_list = ["cho", "jung", "jong", "special_characters", "number", "alphabet"]
temp_dict = {}
for key, word_type in zip(df_list, self._word_list):
temp_dict[key] = pd.DataFrame(0, columns=word_type, index=range(1), dtype=float)
total_letter_count = 0
for word in split_word:
temp_dict, letter_count = self._insert_dataframe(temp_dict, word)
total_letter_count += letter_count
result = | pd.concat(temp_dict, axis=1) | pandas.concat |
import os
from datetime import datetime
import pandas as pd
from django.test import TestCase
from series_tiempo_ar_api.libs.indexing import constants
from series_tiempo_ar_api.libs.indexing.constants import PANDAS_DAY, PANDAS_MONTH, PANDAS_WEEK, PANDAS_QUARTER, \
PANDAS_SEMESTER, PANDAS_YEAR
from series_tiempo_ar_api.libs.indexing.indexer.operations import index_transform
SAMPLES_DIR = os.path.join(os.path.dirname(__file__), 'samples')
class DailyPeriodicityDistributionIndexingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.short_dist = {
'data': os.path.join(SAMPLES_DIR, 'daily_periodicity.csv'),
'freq': constants.DAILY_FREQ,
'year_ago_offset': {
'daily': 365,
'week': 53,
'month': 12,
'quarter': 4,
'semester': 2,
'year': 1,
},
'columns': ["tasas_interes_call", "tasas_interes_badlar", "tasas_interes_pm"],
'index': 'dist1',
'empty_intervals': ['quarter', 'semester', 'year']
}
cls.large_dist = {
'data': os.path.join(SAMPLES_DIR, 'large_daily_periodicity.csv'),
'freq': constants.DAILY_FREQ,
'year_ago_offset': {
'daily': 365,
'week': 53,
'month': 12,
'quarter': 4,
'semester': 2,
'year': 1,
},
'columns': ["tasas_interes_call", "tasas_interes_badlar", "tasas_interes_pm"],
'index': 'dist',
'empty_intervals': []
}
cls.short_dist_data = _get_dist_data(cls.short_dist)
cls.large_dist_data = _get_dist_data(cls.large_dist)
# short dist tests
def test_short_dist_quarter_semester_year_are_empty(self):
for empty_interval_key in self.short_dist['empty_intervals']:
self.assertEqual(len(self.short_dist_data[empty_interval_key].values), 0)
def test_short_dist_daily_has_value_column(self):
self._assert_key_only_included_in(self.short_dist_data["daily"],
constants.VALUE,
0)
def test_short_dist_week_has_value_column(self):
self._assert_key_only_included_in(self.short_dist_data["week"],
constants.VALUE,
0)
def test_short_dist_month_has_value_column(self):
self._assert_key_only_included_in(self.short_dist_data["month"],
constants.VALUE,
0)
def test_short_dist_daily_has_change_column(self):
self._assert_key_only_included_in(self.short_dist_data["daily"],
constants.CHANGE,
1)
def test_short_dist_week_has_change_column(self):
self._assert_key_only_included_in(self.short_dist_data["week"],
constants.CHANGE,
1)
def test_short_dist_month_has_change_column(self):
self._assert_key_only_included_in(self.short_dist_data["month"],
constants.CHANGE,
1)
def test_short_dist_daily_has_pct_change_column(self):
self._assert_key_only_included_in(self.short_dist_data["daily"],
constants.PCT_CHANGE,
1)
def test_short_dist_week_has_pct_change_column(self):
self._assert_key_only_included_in(self.short_dist_data["week"],
constants.PCT_CHANGE,
1)
def test_short_dist_month_has_pct_change_column(self):
self._assert_key_only_included_in(self.short_dist_data["month"],
constants.PCT_CHANGE,
1)
def test_short_dist_daily_has_change_year_ago_column(self):
self._assert_key_only_included_in(self.short_dist_data["daily"],
constants.CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["daily"])
def test_short_dist_week_has_change_year_ago_column(self):
self._assert_key_only_included_in(self.short_dist_data["week"],
constants.CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["week"])
def test_short_dist_month_has_change_year_ago_column(self):
self._assert_key_only_included_in(self.short_dist_data["month"],
constants.CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["month"])
def test_short_dist_daily_has_pct_change_year_ago_column(self):
self._assert_key_only_included_in(self.short_dist_data["daily"],
constants.PCT_CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["daily"])
def test_short_dist_week_has_pct_change_year_ago_column(self):
self._assert_key_only_included_in(self.short_dist_data["week"],
constants.PCT_CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["week"])
def test_short_dist_month_has_pct_change_year_ago_column(self):
self._assert_key_only_included_in(self.short_dist_data["month"],
constants.PCT_CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["month"])
def test_short_dist_daily_has_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.short_dist_data["daily"],
constants.CHANGE_BEG_YEAR,
0)
def test_short_dist_week_has_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.short_dist_data["week"],
constants.CHANGE_BEG_YEAR,
0)
def test_short_dist_month_has_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.short_dist_data["month"],
constants.CHANGE_BEG_YEAR,
0)
def test_short_dist_daily_has_pct_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.short_dist_data["daily"],
constants.PCT_CHANGE_BEG_YEAR,
0)
def test_short_dist_week_has_pct_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.short_dist_data["week"],
constants.PCT_CHANGE_BEG_YEAR,
0)
def test_short_dist_month_has_pct_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.short_dist_data["month"],
constants.PCT_CHANGE_BEG_YEAR,
0)
def test_short_dist_daily_has_correct_change_value(self):
self._assert_value_is_correct(self.short_dist_data["daily"],
constants.CHANGE,
1,
1,
lambda x, y: x - y)
def test_short_dist_week_has_correct_change_value(self):
self._assert_value_is_correct(self.short_dist_data["week"],
constants.CHANGE,
1,
1,
lambda x, y: x - y)
def test_short_dist_month_has_correct_change_value(self):
self._assert_value_is_correct(self.short_dist_data["month"],
constants.CHANGE,
1,
1,
lambda x, y: x - y)
def test_short_dist_daily_has_correct_pct_change_value(self):
self._assert_value_is_correct(self.short_dist_data["daily"],
constants.PCT_CHANGE,
1,
1,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_short_dist_week_has_correct_pct_change_value(self):
self._assert_value_is_correct(self.short_dist_data["week"],
constants.PCT_CHANGE,
1,
1,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_short_dist_month_has_correct_pct_change_value(self):
self._assert_value_is_correct(self.short_dist_data["month"],
constants.PCT_CHANGE,
1,
1,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_short_dist_daily_has_correct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.short_dist_data["daily"],
constants.CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["daily"],
self.short_dist['year_ago_offset']["daily"],
lambda x, y: x - y)
def test_short_dist_week_has_correct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.short_dist_data["week"],
constants.CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["week"],
self.short_dist['year_ago_offset']["week"],
lambda x, y: x - y)
def test_short_dist_month_has_correct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.short_dist_data["month"],
constants.CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["month"],
self.short_dist['year_ago_offset']["month"],
lambda x, y: x - y)
def test_short_dist_daily_has_correct_pct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.short_dist_data["daily"],
constants.PCT_CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["daily"],
self.short_dist['year_ago_offset']["daily"],
lambda x, y: (x - y) / y if y != 0 else 0)
def test_short_dist_week_has_correct_pct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.short_dist_data["week"],
constants.PCT_CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["week"],
self.short_dist['year_ago_offset']["week"],
lambda x, y: (x - y) / y if y != 0 else 0)
def test_short_dist_month_has_correct_pct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.short_dist_data["month"],
constants.PCT_CHANGE_YEAR_AGO,
self.short_dist['year_ago_offset']["month"],
self.short_dist['year_ago_offset']["month"],
lambda x, y: (x - y) / y if y != 0 else 0)
def test_short_dist_daily_has_correct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.short_dist_data["daily"],
constants.CHANGE_BEG_YEAR,
lambda x, y: x - y)
def test_short_dist_week_has_correct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.short_dist_data["week"],
constants.CHANGE_BEG_YEAR,
lambda x, y: x - y)
def test_short_dist_month_has_correct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.short_dist_data["month"],
constants.CHANGE_BEG_YEAR,
lambda x, y: x - y)
def test_short_dist_daily_has_correct_pct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.short_dist_data["daily"],
constants.PCT_CHANGE_BEG_YEAR,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_short_dist_week_has_correct_pct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.short_dist_data["week"],
constants.PCT_CHANGE_BEG_YEAR,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_short_dist_month_has_correct_pct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.short_dist_data["month"],
constants.PCT_CHANGE_BEG_YEAR,
lambda x, y: (x - y) / y if y != 0 else 0)
# long dist tests
def test_large_dist_quarter_semester_year_are_empty(self):
for empty_interval_key in self.large_dist['empty_intervals']:
self.assertEqual(len(self.large_dist_data[empty_interval_key].values), 0)
def test_large_dist_daily_has_value_column(self):
self._assert_key_only_included_in(self.large_dist_data["daily"],
constants.VALUE,
0)
def test_large_dist_week_has_value_column(self):
self._assert_key_only_included_in(self.large_dist_data["week"],
constants.VALUE,
0)
def test_large_dist_month_has_value_column(self):
self._assert_key_only_included_in(self.large_dist_data["month"],
constants.VALUE,
0)
def test_large_dist_quarter_has_value_column(self):
self._assert_key_only_included_in(self.large_dist_data["quarter"],
constants.VALUE,
0)
def test_large_dist_semester_has_value_column(self):
self._assert_key_only_included_in(self.large_dist_data["semester"],
constants.VALUE,
0)
def test_large_dist_year_has_value_column(self):
self._assert_key_only_included_in(self.large_dist_data["year"],
constants.VALUE,
0)
def test_large_dist_daily_has_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["daily"],
constants.CHANGE,
1)
def test_large_dist_week_has_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["week"],
constants.CHANGE,
1)
def test_large_dist_month_has_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["month"],
constants.CHANGE,
1)
def test_large_dist_quarter_has_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["quarter"],
constants.CHANGE,
1)
def test_large_dist_semester_has_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["semester"],
constants.CHANGE,
1)
def test_large_dist_year_has_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["year"],
constants.CHANGE,
1)
def test_large_dist_daily_has_pct_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["daily"],
constants.PCT_CHANGE,
1)
def test_large_dist_week_has_pct_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["week"],
constants.PCT_CHANGE,
1)
def test_large_dist_month_has_pct_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["month"],
constants.PCT_CHANGE,
1)
def test_large_dist_quarter_has_pct_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["quarter"],
constants.PCT_CHANGE,
1)
def test_large_dist_semester_has_pct_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["semester"],
constants.PCT_CHANGE,
1)
def test_large_dist_year_has_pct_change_column(self):
self._assert_key_only_included_in(self.large_dist_data["year"],
constants.PCT_CHANGE,
1)
def test_large_dist_daily_has_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["daily"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["daily"])
def test_large_dist_month_has_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["month"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["month"])
def test_large_dist_quarter_has_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["quarter"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["quarter"])
def test_large_dist_semester_has_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["semester"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["semester"])
def test_large_dist_year_has_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["year"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["year"])
def test_large_dist_daily_has_pct_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["daily"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["daily"])
def test_large_dist_month_has_pct_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["month"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["month"])
def test_large_dist_quarter_has_pct_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["quarter"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["quarter"])
def test_large_dist_semester_has_pct_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["semester"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["semester"])
def test_large_dist_year_has_pct_change_year_ago_column(self):
self._assert_key_only_included_in(self.large_dist_data["year"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["year"])
def test_large_dist_daily_has_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["daily"],
constants.CHANGE_BEG_YEAR,
0)
def test_large_dist_month_has_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["month"],
constants.CHANGE_BEG_YEAR,
0)
def test_large_dist_quarter_has_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["quarter"],
constants.CHANGE_BEG_YEAR,
0)
def test_large_dist_semester_has_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["semester"],
constants.CHANGE_BEG_YEAR,
0)
def test_large_dist_year_has_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["year"],
constants.CHANGE_BEG_YEAR,
0)
def test_large_dist_daily_has_pct_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["daily"],
constants.PCT_CHANGE_BEG_YEAR,
0)
def test_large_dist_month_has_pct_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["month"],
constants.PCT_CHANGE_BEG_YEAR,
0)
def test_large_dist_quarter_has_pct_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["quarter"],
constants.PCT_CHANGE_BEG_YEAR,
0)
def test_large_dist_semester_has_pct_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["semester"],
constants.PCT_CHANGE_BEG_YEAR,
0)
def test_large_dist_year_has_pct_change_beg_of_year_column(self):
self._assert_key_only_included_in(self.large_dist_data["year"],
constants.PCT_CHANGE_BEG_YEAR,
0)
def test_large_dist_daily_has_correct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["daily"],
constants.CHANGE,
1,
1,
lambda x, y: x - y)
def test_large_dist_week_has_correct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["week"],
constants.CHANGE,
1,
1,
lambda x, y: x - y)
def test_large_dist_month_has_correct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["month"],
constants.CHANGE,
1,
1,
lambda x, y: x - y)
def test_large_dist_quarter_has_correct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["quarter"],
constants.CHANGE,
1,
1,
lambda x, y: x - y)
def test_large_dist_semester_has_correct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["semester"],
constants.CHANGE,
1,
1,
lambda x, y: x - y)
def test_large_dist_year_has_correct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["year"],
constants.CHANGE,
1,
1,
lambda x, y: x - y)
def test_large_dist_daily_has_correct_pct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["daily"],
constants.PCT_CHANGE,
1,
1,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_week_has_correct_pct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["week"],
constants.PCT_CHANGE,
1,
1,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_month_has_correct_pct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["month"],
constants.PCT_CHANGE,
1,
1,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_quarter_has_correct_pct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["quarter"],
constants.PCT_CHANGE,
1,
1,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_semester_has_correct_pct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["semester"],
constants.PCT_CHANGE,
1,
1,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_year_has_correct_pct_change_value(self):
self._assert_value_is_correct(self.large_dist_data["year"],
constants.PCT_CHANGE,
1,
1,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_daily_has_correct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["daily"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["daily"],
self.large_dist['year_ago_offset']["daily"],
lambda x, y: x - y)
def test_large_dist_month_has_correct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["month"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["month"],
self.large_dist['year_ago_offset']["month"],
lambda x, y: x - y)
def test_large_dist_quarter_has_correct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["quarter"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["quarter"],
self.large_dist['year_ago_offset']["quarter"],
lambda x, y: x - y)
def test_large_dist_semester_has_correct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["semester"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["semester"],
self.large_dist['year_ago_offset']["semester"],
lambda x, y: x - y)
def test_large_dist_year_has_correct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["year"],
constants.CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["year"],
self.large_dist['year_ago_offset']["year"],
lambda x, y: x - y)
def test_large_dist_daily_has_correct_pct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["daily"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["daily"],
self.large_dist['year_ago_offset']["daily"],
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_month_has_correct_pct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["month"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["month"],
self.large_dist['year_ago_offset']["month"],
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_quarter_has_correct_pct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["quarter"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["quarter"],
self.large_dist['year_ago_offset']["quarter"],
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_semester_has_correct_pct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["semester"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["semester"],
self.large_dist['year_ago_offset']["semester"],
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_year_has_correct_pct_change_a_year_ago_value(self):
self._assert_value_is_correct(self.large_dist_data["year"],
constants.PCT_CHANGE_YEAR_AGO,
self.large_dist['year_ago_offset']["year"],
self.large_dist['year_ago_offset']["year"],
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_daily_has_correct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["daily"],
constants.CHANGE_BEG_YEAR,
lambda x, y: x - y)
def test_large_dist_month_has_correct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["month"],
constants.CHANGE_BEG_YEAR,
lambda x, y: x - y)
def test_large_dist_quarter_has_correct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["quarter"],
constants.CHANGE_BEG_YEAR,
lambda x, y: x - y)
def test_large_dist_semester_has_correct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["semester"],
constants.CHANGE_BEG_YEAR,
lambda x, y: x - y)
def test_large_dist_year_has_correct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["year"],
constants.CHANGE_BEG_YEAR,
lambda x, y: x - y)
def test_large_dist_daily_has_correct_pct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["daily"],
constants.PCT_CHANGE_BEG_YEAR,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_month_has_correct_pct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["month"],
constants.PCT_CHANGE_BEG_YEAR,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_quarter_has_correct_pct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["quarter"],
constants.PCT_CHANGE_BEG_YEAR,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_semester_has_correct_pct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["semester"],
constants.PCT_CHANGE_BEG_YEAR,
lambda x, y: (x - y) / y if y != 0 else 0)
def test_large_dist_year_has_correct_pct_change_beg_of_year_value(self):
self._assert_beg_of_year_value_is_correct(self.large_dist_data["year"],
constants.PCT_CHANGE_BEG_YEAR,
lambda x, y: (x - y) / y if y != 0 else 0)
def _assert_key_only_included_in(self, _col, _key, _from):
for row_index in range(0, min(_from, len(_col))):
self.assertNotIn(_key, _col[row_index]["_source"])
for row_index in range(_from, len(_col)):
self.assertIn(_key, _col[row_index]["_source"])
def _assert_value_is_correct(self, col, _key, _from, _offset, _function):
for row_index in range(_from, len(col)):
relative_index = row_index - _offset
actual_values = col[row_index]["_source"]
actual_value = actual_values[constants.VALUE]
relative_value = col[relative_index]["_source"][constants.VALUE]
expected_value = _function(actual_value, relative_value)
self.assertAlmostEqual(expected_value, actual_values[_key])
def _assert_year_ago_value_is_correct(self, data, _dict_keys, _key, _from, _offset, _function):
for dict_key in _dict_keys:
col = data[dict_key]
for row_index in range(_from[dict_key], len(col)):
relative_index = row_index - _offset[dict_key]
actual_values = col[row_index]["_source"]
actual_value = actual_values[constants.VALUE]
relative_value = col[relative_index]["_source"][constants.VALUE]
expected_value = _function(actual_value, relative_value)
self.assertAlmostEqual(expected_value, actual_values[_key])
def _assert_beg_of_year_value_is_correct(self, col, _key, _function):
has_beg_value = False
beg_of_year_value = 0
for row in col:
actual_values = row["_source"]
actual_date = datetime.strptime(actual_values["timestamp"], '%Y-%m-%d').date()
actual_value = actual_values[constants.VALUE]
if actual_date.month == 1 and actual_date.day == 1:
has_beg_value = True
beg_of_year_value = actual_value
expected_value = _function(actual_value, beg_of_year_value) if has_beg_value else 0
self.assertAlmostEqual(expected_value, actual_values[_key])
def _get_dist_data(dist):
df = _get_data_frame(dist["data"], dist['freq'], dist['columns'])
col = df[df.columns[0]]
series_id = col.name
index = dist['index']
data = dict()
data["daily"] = index_transform(col, lambda x: x.mean(), index, series_id, PANDAS_DAY, 'avg')
data["week"] = index_transform(col, lambda x: x.mean(), index, series_id, PANDAS_WEEK, 'avg')
data["month"] = index_transform(col, lambda x: x.mean(), index, series_id, PANDAS_MONTH, 'avg')
data["quarter"] = index_transform(col, lambda x: x.mean(), index, series_id, PANDAS_QUARTER, 'avg')
data["semester"] = index_transform(col, lambda x: x.mean(), index, series_id, PANDAS_SEMESTER, 'avg')
data["year"] = index_transform(col, lambda x: x.mean(), index, series_id, PANDAS_YEAR, 'avg')
return data
def _get_data_frame(distribution_data, freq, columns):
df = _read_catalog_csv(distribution_data)
data = df.values
new_index = pd.date_range(df.index[0], df.index[-1], freq=freq)
# Chequeo de series de días hábiles (business days)
if freq == constants.DAILY_FREQ and new_index.size > df.index.size:
new_index = pd.date_range(df.index[0],
df.index[-1],
freq=constants.BUSINESS_DAILY_FREQ)
return pd.DataFrame(index=new_index, data=data, columns=columns)
def _read_catalog_csv(distribution_data):
return | pd.read_csv(distribution_data, index_col="indice_tiempo") | pandas.read_csv |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import itertools
import warnings
from collections import defaultdict
from typing import Callable, Iterable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
clone as clone_estimator,
)
from sklearn.utils import check_random_state as sklearn_check_random_state
from ..utils import column_or_1d, convert_to_tensor_or_dataframe
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ... import opcodes, tensor as mt
from ...core import OutputType, get_output_types, recursive_tile
from ...core.context import Context
from ...core.operand import OperandStage
from ...dataframe.core import DATAFRAME_TYPE
from ...dataframe.utils import parse_index
from ...deploy.oscar.session import execute
from ...serialization.serializables import (
AnyField,
BoolField,
DictField,
Int8Field,
Int64Field,
Float32Field,
TupleField,
FunctionField,
ReferenceField,
FieldTypes,
)
from ...tensor.core import TENSOR_CHUNK_TYPE
from ...tensor.random import RandomStateField
from ...tensor.utils import gen_random_seeds
from ...typing import TileableType
from ...utils import has_unknown_shape
from ..operands import LearnOperand, LearnOperandMixin, LearnShuffleProxy
from ..utils.shuffle import LearnShuffle
def _extract_bagging_io(io_list: Iterable, op: LearnOperand, output: bool = False):
if not isinstance(io_list, Iterable):
io_list = [io_list]
input_iter = iter(io_list)
out = [
next(input_iter),
next(input_iter) if op.with_labels else None,
next(input_iter) if op.with_weights else None,
next(input_iter) if output and op.with_feature_indices else None,
]
return out
def _get_by_iloc(x, idx, axis=0):
if hasattr(x, "iloc"):
item_getter = x.iloc
else:
item_getter = x
if axis == 0:
return item_getter[idx]
else:
return item_getter[:, idx]
def _concat_on_axis(data_list, axis=0, out_chunk=None):
if isinstance(out_chunk, TENSOR_CHUNK_TYPE):
return np.concatenate(data_list, axis=axis)
else:
return pd.concat(data_list, axis=axis)
def _concat_by_row(row, out_chunk=None):
arr = np.empty((1,), dtype=object)
arr[0] = _concat_on_axis(row.tolist(), axis=0, out_chunk=out_chunk)
return arr
def _set_random_states(estimator, random_state=None):
random_state = sklearn_check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == "random_state" or key.endswith("__random_state"):
to_set[key] = random_state.randint(np.iinfo(np.int32).max)
if to_set:
estimator.set_params(**to_set)
def _make_estimator(estimator, random_state=None):
"""Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone_estimator(estimator)
if random_state is not None:
_set_random_states(estimator, random_state)
return estimator
class BaggingSample(LearnShuffle, LearnOperandMixin):
_op_type_ = opcodes.BAGGING_SHUFFLE_SAMPLE
n_estimators: int = Int64Field("n_estimators")
max_samples = AnyField("max_samples")
max_features = AnyField("max_features")
bootstrap: bool = BoolField("bootstrap")
bootstrap_features: bool = BoolField("bootstrap_features")
random_state = RandomStateField("random_state")
sample_random_state = RandomStateField("sample_random_state")
feature_random_state = RandomStateField("feature_random_state")
reducer_ratio: float = Float32Field("reducer_ratio")
n_reducers: int = Int64Field("n_reducers", default=None)
column_offset: int = Int64Field("column_offset", default=None)
chunk_shape: Tuple[int] = TupleField("chunk_shape", FieldTypes.int64)
with_labels: bool = BoolField("with_labels")
with_weights: bool = BoolField("with_weights")
with_feature_indices: bool = BoolField("with_feature_indices")
def __init__(
self,
max_samples: Union[int, float] = 1.0,
max_features: Union[int, float] = 1.0,
bootstrap: bool = True,
bootstrap_features: bool = False,
random_state: np.random.RandomState = None,
reducer_ratio: float = 1.0,
**kw,
):
super().__init__(
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
max_samples=max_samples,
max_features=max_features,
reducer_ratio=reducer_ratio,
random_state=random_state,
**kw,
)
if self.random_state is None:
self.random_state = np.random.RandomState()
@property
def output_limit(self) -> int:
if self.stage != OperandStage.map:
return 1 + self.with_labels + self.with_weights + self.with_feature_indices
return 1
def __call__(
self,
in_sample: TileableType,
in_labels: Optional[TileableType] = None,
in_weights: Optional[TileableType] = None,
):
self._output_types = get_output_types(in_sample, in_labels, in_weights)
self.with_labels = in_labels is not None
self.with_weights = in_weights is not None
axis_keep_shape = [
isinstance(self.max_samples, float) and self.max_samples == 1.0,
isinstance(self.max_features, float) and self.max_features == 1.0,
]
self.with_feature_indices = not axis_keep_shape[1] or self.bootstrap_features
if self.with_feature_indices:
self._output_types += (OutputType.tensor,)
new_shape = tuple(
s if keep_shape else np.nan
for s, keep_shape in zip(in_sample.shape, axis_keep_shape)
)
kws = []
data_params = in_sample.params
data_params["shape"] = new_shape
kws.append(data_params)
if in_labels is not None:
labels_params = in_labels.params
labels_params["shape"] = (new_shape[0],)
kws.append(labels_params)
if in_weights is not None:
weights_params = in_weights.params
weights_params["shape"] = (new_shape[0],)
kws.append(weights_params)
if self.with_feature_indices:
feature_params = {
"shape": (self.n_estimators, new_shape[1]),
"dtype": np.dtype(int),
}
kws.append(feature_params)
inputs = [in_sample]
if in_labels is not None:
inputs.append(in_labels)
if in_weights is not None:
inputs.append(in_weights)
return self.new_tileables(inputs, kws=kws)
@classmethod
def _scatter_samples(
cls,
max_samples: Union[int, float],
nsplits: Tuple[int],
random_state: np.random.RandomState,
n_estimators: int,
) -> np.ndarray:
nsp_array = np.array(nsplits)
dim_size = nsp_array.sum()
if isinstance(max_samples, int):
expect_sample_count = max_samples
else:
expect_sample_count = int(max_samples * nsp_array.sum())
if expect_sample_count == dim_size:
return np.array([list(nsplits)] * n_estimators)
split_probs = nsp_array / dim_size
return random_state.multinomial(
expect_sample_count, split_probs, size=n_estimators
)
@classmethod
def tile(cls, op: "BaggingSample"):
in_sample, in_labels, in_weights, _ = _extract_bagging_io(
op.inputs, op, output=False
)
out_data, out_labels, out_weights, out_feature_indices = _extract_bagging_io(
op.outputs, op, output=True
)
# make sure all shapes are computed
if (
has_unknown_shape(in_sample)
or (in_labels is not None and has_unknown_shape(in_labels))
or (in_weights is not None and has_unknown_shape(in_weights))
):
yield
to_tile = []
if in_labels is not None:
in_labels = in_labels.rechunk({0: in_sample.nsplits[0]})
to_tile.append(in_labels)
if in_weights is not None:
in_weights = in_weights.rechunk({0: in_sample.nsplits[0]})
to_tile.append(in_weights)
# tile rechunks
if to_tile:
tiled = yield from recursive_tile(to_tile)
tiled_iter = iter(tiled)
if in_labels is not None:
in_labels = next(tiled_iter)
if in_weights is not None:
in_weights = next(tiled_iter)
random_seeds = [
gen_random_seeds(n, op.random_state) for n in in_sample.chunk_shape
]
axis_keep_shape = [
isinstance(op.max_samples, float)
and op.max_samples == 1.0
and not op.bootstrap,
isinstance(op.max_features, float)
and op.max_features == 1.0
and not op.bootstrap_features,
]
n_reducers = (
op.n_reducers
if op.n_reducers is not None
else max(1, int(in_sample.chunk_shape[0] * op.reducer_ratio))
)
# todo implement sampling without replacements
map_chunks = []
max_samples_splits = cls._scatter_samples(
op.max_samples, in_sample.nsplits[0], op.random_state, op.n_estimators
)
max_features_splits = cls._scatter_samples(
op.max_features, in_sample.nsplits[1], op.random_state, op.n_estimators
)
column_cum_offset = np.concatenate([[0], np.cumsum(in_sample.nsplits[1])])
for chunk in in_sample.chunks:
new_op = op.copy().reset_key()
new_op.random_state = None
new_op.sample_random_state = np.random.RandomState(
random_seeds[0][chunk.index[0]]
)
new_op.feature_random_state = np.random.RandomState(
random_seeds[1][chunk.index[1]]
)
new_op.stage = OperandStage.map
new_op.max_samples = max_samples_splits[:, chunk.index[0]]
new_op.max_features = max_features_splits[:, chunk.index[1]]
new_op.n_reducers = n_reducers
new_op.column_offset = int(column_cum_offset[chunk.index[1]])
if chunk.index[0] != 0:
new_op.with_feature_indices = False
if chunk.index[1] != in_sample.chunk_shape[1] - 1:
new_op.with_weights = False
new_op.with_labels = False
params = chunk.params
params["shape"] = tuple(
s if keep_shape else np.nan
for s, keep_shape in zip(chunk.shape, axis_keep_shape)
)
input_chunks = [chunk]
if new_op.with_labels:
input_chunks.append(in_labels.cix[chunk.index[0]])
if new_op.with_weights:
input_chunks.append(in_weights.cix[chunk.index[0]])
map_chunks.append(new_op.new_chunk(input_chunks, **params))
shuffle_op = LearnShuffleProxy(output_types=[OutputType.tensor]).new_chunk(
map_chunks, dtype=np.dtype(int), shape=()
)
remain_reducers = op.n_estimators % n_reducers
reduce_data_chunks = []
reduce_labels_chunks = []
reduce_weights_chunks = []
reduce_feature_chunks = []
for idx in range(n_reducers):
new_op = op.copy().reset_key()
new_op.random_state = None
new_op.stage = OperandStage.reduce
new_op.chunk_shape = in_sample.chunk_shape
new_op.n_estimators = op.n_estimators // n_reducers
if remain_reducers:
remain_reducers -= 1
new_op.n_estimators += 1
if new_op.n_estimators == 0:
continue
kws = []
data_params = out_data.params
data_params["index"] = (idx, 0)
data_params["shape"] = (np.nan, out_data.shape[1])
kws.append(data_params)
if op.with_labels:
labels_params = out_labels.params
labels_params["index"] = (idx,)
labels_params["shape"] = (np.nan,)
kws.append(labels_params)
if op.with_weights:
weights_params = out_weights.params
weights_params["index"] = (idx,)
weights_params["shape"] = (np.nan,)
kws.append(weights_params)
if op.with_feature_indices:
feature_params = {
"index": (idx, 0),
"shape": (new_op.n_estimators, out_feature_indices.shape[1]),
"dtype": np.dtype(int),
}
kws.append(feature_params)
chunks = new_op.new_chunks([shuffle_op], kws=kws)
(
data_chunk,
labels_chunk,
weights_chunk,
feature_chunk,
) = _extract_bagging_io(chunks, op, output=True)
reduce_data_chunks.append(data_chunk)
if labels_chunk is not None:
reduce_labels_chunks.append(labels_chunk)
if weights_chunk is not None:
reduce_weights_chunks.append(weights_chunk)
if feature_chunk is not None:
reduce_feature_chunks.append(feature_chunk)
new_op = op.copy().reset_key()
kws = [
{
"chunks": reduce_data_chunks,
"nsplits": ((np.nan,) * len(reduce_data_chunks), (out_data.shape[1],)),
**out_data.params,
}
]
if op.with_labels:
kws.append(
{
"chunks": reduce_labels_chunks,
"nsplits": ((np.nan,) * len(reduce_data_chunks),),
**out_labels.params,
}
)
if op.with_weights:
kws.append(
{
"chunks": reduce_weights_chunks,
"nsplits": ((np.nan,) * len(reduce_data_chunks),),
**out_weights.params,
}
)
if op.with_feature_indices:
estimator_nsplit = tuple(c.op.n_estimators for c in reduce_data_chunks)
kws.append(
{
"chunks": reduce_feature_chunks,
"nsplits": (estimator_nsplit, (out_feature_indices.shape[1],)),
**out_feature_indices.params,
}
)
return new_op.new_tileables(op.inputs, kws=kws)
@classmethod
def _gen_sample_indices(
cls,
max_range: int,
size: int,
random_state: np.random.RandomState,
with_replacement: bool = False,
):
if not with_replacement:
result = random_state.choice(np.arange(max_range), size, False)
else:
result = random_state.randint(0, max_range - 1, size)
result.sort()
return result
@classmethod
def _execute_map(cls, ctx, op: "BaggingSample"):
in_sample, in_labels, in_weights, _ = _extract_bagging_io(
op.inputs, op, output=False
)
in_sample_data = ctx[in_sample.key]
in_labels_data = ctx[in_labels.key] if op.with_labels else None
in_weights_data = ctx[in_weights.key] if op.with_weights else None
out_samples = op.outputs[0]
remains = op.n_estimators % op.n_reducers
reducer_iters = [
itertools.repeat(idx, 1 + op.n_estimators // op.n_reducers)
for idx in range(remains)
]
reducer_iters += [
itertools.repeat(idx, op.n_estimators // op.n_reducers)
for idx in range(remains, op.n_reducers)
]
reducer_iter = itertools.chain(*reducer_iters)
result_store = defaultdict(lambda: ([], [], [], []))
for est_id in range(op.n_estimators):
sampled_data = in_sample_data
sampled_labels = in_labels_data
sampled_weights = in_weights_data
if op.max_samples[est_id] != in_sample_data.shape[0]:
sample_indices = cls._gen_sample_indices(
in_sample_data.shape[0],
op.max_samples[est_id],
op.sample_random_state,
op.bootstrap,
)
sampled_data = _get_by_iloc(sampled_data, sample_indices)
if sampled_labels is not None:
sampled_labels = _get_by_iloc(sampled_labels, sample_indices)
if sampled_weights is not None:
sampled_weights = _get_by_iloc(sampled_weights, sample_indices)
if op.max_features[est_id] != in_sample_data.shape[1]:
feature_indices = cls._gen_sample_indices(
in_sample_data.shape[1],
op.max_features[est_id],
op.feature_random_state,
op.bootstrap_features,
)
sampled_data = _get_by_iloc(sampled_data, feature_indices, axis=1)
if not op.with_feature_indices:
feature_indices = None
else:
feature_indices = None
samples, labels, weights, feature_idx_array = result_store[
next(reducer_iter)
]
samples.append(sampled_data)
if sampled_labels is not None:
labels.append(sampled_labels)
if sampled_weights is not None:
weights.append(sampled_weights)
if feature_indices is not None:
feature_idx_array.append(feature_indices + op.column_offset)
for (
reducer_id,
(
samples,
labels,
weights,
feature_idx_array,
),
) in result_store.items():
ctx[out_samples.key, (reducer_id, 0)] = tuple(
samples + labels + weights + feature_idx_array
)
@classmethod
def _execute_reduce(cls, ctx, op: "BaggingSample"):
out_data, out_labels, out_weights, out_feature_indices = _extract_bagging_io(
op.outputs, op, output=True
)
input_keys = op.inputs[0].op.source_keys
input_idxes = op.inputs[0].op.source_idxes
sample_holder = [
np.empty(op.chunk_shape, dtype=object) for _ in range(op.n_estimators)
]
labels_holder = (
[np.empty(op.chunk_shape[0], dtype=object) for _ in range(op.n_estimators)]
if op.with_labels
else None
)
weights_holder = (
[np.empty(op.chunk_shape[0], dtype=object) for _ in range(op.n_estimators)]
if op.with_weights
else None
)
feature_indices_holder = (
[np.empty(op.chunk_shape[1], dtype=object) for _ in range(op.n_estimators)]
if op.with_feature_indices
else None
)
for input_key, input_idx in zip(input_keys, input_idxes):
add_feature_index = input_idx[0] == 0
add_label_weight = input_idx[1] == op.chunk_shape[1] - 1
chunk_data = ctx[input_key, out_data.index]
num_groups = 1
if add_feature_index and op.with_feature_indices:
# contains feature indices
num_groups += 1
if add_label_weight: # contains label or weight
num_groups += int(op.with_weights) + int(op.with_labels)
sample_count = len(chunk_data) // num_groups
assert len(chunk_data) % num_groups == 0
group_iter = (
chunk_data[i * sample_count : (i + 1) * sample_count]
for i in range(num_groups)
)
for data_idx, sample in enumerate(next(group_iter)):
sample_holder[data_idx][input_idx] = sample
if add_label_weight:
if op.with_labels:
for data_idx, label in enumerate(next(group_iter)):
labels_holder[data_idx][input_idx[0]] = label
if op.with_weights:
for data_idx, weight in enumerate(next(group_iter)):
weights_holder[data_idx][input_idx[0]] = weight
if add_feature_index and op.with_feature_indices:
for data_idx, feature_index in enumerate(next(group_iter)):
feature_indices_holder[data_idx][input_idx[1]] = feature_index
data_results: List[Optional[np.ndarray]] = [None] * len(sample_holder)
for est_idx, sample_mat in enumerate(sample_holder):
row_chunks = np.apply_along_axis(
_concat_by_row, axis=0, arr=sample_mat, out_chunk=out_data
)
data_results[est_idx] = _concat_on_axis(
row_chunks[0].tolist(), axis=1, out_chunk=out_data
)
ctx[out_data.key] = tuple(data_results)
for out, holder in zip(
(out_labels, out_weights, out_feature_indices),
(labels_holder, weights_holder, feature_indices_holder),
):
if out is None:
continue
results: List[Optional[np.ndarray]] = [None] * len(holder)
for est_idx, labels_vct in enumerate(holder):
results[est_idx] = _concat_on_axis(labels_vct.tolist(), out_chunk=out)
if holder is feature_indices_holder:
ctx[out.key] = np.stack(results)
else:
ctx[out.key] = tuple(results)
@classmethod
def execute(cls, ctx, op: "BaggingSample"):
if op.stage == OperandStage.map:
cls._execute_map(ctx, op)
else:
cls._execute_reduce(ctx, op)
class BaggingSampleReindex(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.BAGGING_SHUFFLE_REINDEX
n_estimators: int = Int64Field("n_estimators")
feature_indices: TileableType = ReferenceField("feature_indices", default=None)
start_col_index: int = Int64Field("start_col_index", 0)
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
if self.feature_indices is not None:
self.feature_indices = inputs[-1]
def __call__(self, data: TileableType, feature_indices: TileableType = None):
self._output_types = get_output_types(data)
inputs = [data]
self.feature_indices = feature_indices
params = data.params
if feature_indices is not None:
inputs.append(feature_indices)
params["shape"] = (data.shape[0], np.nan)
if isinstance(data, DATAFRAME_TYPE):
params["index_value"] = parse_index( | pd.Index([], dtype=np.int64) | pandas.Index |
import pandas as pa
import json
import datetime as dt
dfc = pa.read_json('./data/clients.json')
dft = pa.read_json('./data/tarifs.json')
dfv = pa.read_json('./data/vehicules.json')
def enregistrer_json(df, path):
"""
enregistre un dataframe dans un fichier json
in :
df : dataframe à enregistrer
path : adresse du fichier json a creer
"""
json_df = json.loads(df.to_json(orient="records"))
f = open(path, 'w')
json.dump(json_df, f, indent=2)
f.close()
def vehicules_libres(df):
"""
renvoie les véhicules disponibles
in :
df : dataframe de la base de donnees des vehicules
return :
dataframe contenant les véhicules non loués ou réservés
"""
mask = df['date_debut'] == ''
return df[mask]
def km_ok(df, id, km):
"""
vérifie si le kilométrage renseigné à la cloture
de la location est valide
in :
df : dataframe de la base de donnees des vehicules
id : nouméro d'idendification du véhicule en question
km : kilométrage renseigné à la cloture
return : booléen
(True si le kilométrage est valide, False sinon)
"""
mask = df['id'] == id
kil = df[mask].iloc[0]['kilometrage']
return kil < km
def vehicules_loues(dfv):
"""
renvoie les véhicules loués ou reservés
in :
df : dataframe de la base de donnees des vehicules
return : dataframe contenant les véhicules loués ou réservés
"""
mask = dfv['date_debut'] != ''
d = dfv[mask]
return d
def annuler_location(dfc, dfv, id):
"""
libère un véhicule réservé
in :
dfc : dataframe de la base de donnees des clients
dfv : dataframe de la base de donnees des vehicules
id : numéro d'identification du véhicule
"""
mask = dfv['id']==id
dfv.loc[mask, ['date_debut', 'date_fin']] = ['', '']
mask = dfc['id_vehicule']==id
dfc.loc[mask, ['id_vehicule', 'prix_location']] = [-1, 0]
def terminer_location(dfc, dfv, id, km):
"""
libère un véhicule à la fin de sa location
in :
dfc : dataframe de la base de donnees des clients
dfv : dataframe de la base de donnees des vehicules
id : numéro d'identification du véhicule
"""
if km_ok(dfv, id, km):
mask = dfv['id']==id
dfv.loc[mask, ['date_debut', 'date_fin', 'kilometrage']] = ['', '', km]
mask = dfc['id_vehicule']==id
dfc.loc[mask, ['id_vehicule', 'prix_location']] = [-1, 0]
def ajouter_vehicule(dfv, t, mark, mod, carb, gam, km):
"""
ajoute un véhicule à la base de données
in :
dfv : dataframe de la base de donnees des vehicules
dic : dictionnaire contenant toutes les caractéristiques du véhicule à ajouter,
sauf le numéro d'identification ('id' : None)
"""
id = dfv['id'][len(dfv)-1] + 1
dfv.loc[dfv.shape[0]] = [id, t, mark, mod, carb, gam, km, '', '']
def retirer_vehicule(dfv, id):
"""
supprime un véhicule de la base de données
in :
dfv : dataframe de la base de donnees des vehicules
id : numéro d'identification du véhicule à supprimer
"""
mask = dfv['id'] != id
return(dfv[mask])
def export_bdd(df, path_csv):
"""
exporte une base de donnees json sous format csv
in :
path_json : adresse de la base de donnees a exporter
path : chemin où sauvegarder le fichier
"""
df.to_csv(path_csv, sep=',')
def import_bdd(path_csv, path_json):
df = pa.read_csv(path_csv, sep=';')
enregistrer_json(df, path_json)
global dfc, dft, dfv
dfc = pa.read_json('data/clients.json')
dft = pa.read_json('data/tarifs.json')
dfv = pa.read_json('data/vehicules.json')
def ajouter_client(dfc, nom, prenom, age, num_permis):
"""
ajoute un client à la base de donnees
in :
path : adresse de la base de donnees des clients
nom : nom du client à ajouter
prenom : prenom du client à ajouter
age : age du client à ajouter
num_permis : numéro du permis du client à ajouter
"""
dfc.loc[dfc.shape[0]] = [nom, prenom, age, num_permis, -1, 0]
def retirer_client(dfc, num_permis):
"""
supprime un client de la base de donnees
in :
dfc : dataframe de la base de donnees des clients
num_permis : numéro du permis du client à retirer
"""
mask = dfc['num_permis'] != num_permis
return(dfc[mask])
def changer_tarif(dft, gamme, t, prix, assur, caut):
"""
change le tarif d'une gamme de véhicules
in :
dft : dataframe de la base de donnees des tarifs
gamme : nom de la gamme dont on veut changer les tarfis
prix : nouveau prix a attribuer
assur : nouveau montant d'assurance a attribuer
caut : nouvelle caution a attribuer
"""
mask = (dft.gamme==gamme) & (dft.type==t)
dft.loc[mask, ['prix', 'assurance', 'caution']] = [prix, assur, caut]
def modifier_véhicule(dfv, id, type, mark, mod, carb, gamme, kilo):
mask = dfv['id'] == id
dfv.loc[mask, ['type', 'marque', 'modele', 'carburant', 'gamme', 'kilometrage']] = [type, mark, mod, carb, gamme, kilo]
def modifier_client(dfc, num, nom, prenom, age):
mask = dfc['num_permis'] == num
dfc.loc[mask, ['nom', 'prenom', 'age']] = [nom, prenom, age]
def louer(dfv, dfc, num_permis, id, date_debut, date_fin, prix):
mask = dfv["id"] == id
dfv.loc[mask, ["date_debut", "date_fin"]] = [date_debut, date_fin]
mask = dfc["num_permis"] == num_permis
dfc.loc[mask, ["id_vehicule", "prix_location"]] = [id, prix]
def calculer_prix(dft, date_debut, date_fin, gamme, choix_assu):
L_debut = date_debut.split('-')
L_fin = date_fin.split('-')
debut = dt.date(int(L_debut[2]), int(L_debut[1]), int(L_debut[0]))
fin = dt.date(int(L_fin[2]), int(L_fin[1]), int(L_fin[0]))
duree = (fin-debut).days
mask = (dft.gamme==gamme)
prix, assurance = int(dft[mask]['prix']), int(dft[mask]['assurance'])
if choix_assu:
return duree*(prix+assurance)
else :
return duree*prix
#afficher des informations personnelles
def InformationPersonnel(dfc):
return(list(dfc["nom"] + " " + dfc["prenom"]))
def InformationPersonnelClientReserver(dfc):
mask = dfc["id_vehicule"] != -1
return(list(dfc[mask]["nom"] + " " + dfc[mask]["prenom"]))
def aff_client(dfc):
return dfc.to_string(index=False)
def aff_info_client(dfc, nom_prenom):
mask = (dfc.nom==nom_prenom[0]) & (dfc.prenom==nom_prenom[1])
return dfc[mask].values[0].tolist()
def aff_vehicule(dfv):
return dfv.to_string(index=False)
def aff_vehicule_id(dfv, id):
mask = dfv["id"] == id
return dfv[mask].values[0].tolist()
# Afficher le tarif au utilisateur
def aff_tarifs():
return dft.to_string(index=False)
def aff_reservation(dfc, dfv):
index = ['Nom', 'Prenom', "date_debut", "date_fin", "id_vehicule", "gamme", "modele", "type", "prix"]
df_loc = pa.DataFrame([], columns=index)
mask_c = dfc["id_vehicule"] != -1
dfc_loc = dfc[mask_c]
for i in dfc_loc.values.tolist():
mask_v = dfv["id"] == i[4]
dfv_loc = dfv[mask_v]
lv_loc = dfv_loc.values.tolist()[0]
data = [i[0], i[1], lv_loc[7], lv_loc[8], lv_loc[0], lv_loc[5], lv_loc[3], lv_loc[1], i[5]]
df = | pa.DataFrame([data], columns=index) | pandas.DataFrame |
import pandas as pd
class NeuralNetwork:
def __init__(self, input_width, input_height, classes):
self.input_width = input_width
self.input_height = input_height
self.classes = classes
self.model = {}
def save(self, model_path, history_path):
self.model.save(model_path, overwrite=True)
history_frame = | pd.DataFrame.from_dict(self.history.history) | pandas.DataFrame.from_dict |
# <NAME>, 20307118
"""
This program creates a Patient Management System in which patients admitted to a hospital's ICU are assigned a
unique ID, severity status between 0 - 3 and noted whether they are covid positive or not. A user can add new patients,
transfer patients from hospital to hospital as long as there is space available, discharge patients and update the
severity status of a patient.
"""
import csv
import random
import string
import pandas as pd
from hospitals import Hospital
from patients import Patient
class PatientManagementSystem:
"""
PatientManagementSystem holds a dictionary of the hospitals and all the functions that are needed for the
Patient Management Program.
"""
def __init__(self):
self.hospitals = {"kingston": Hospital("Kingston", 10, 0, {}),
"hamilton": Hospital("Hamilton", 13, 0, {}),
"toronto": Hospital("Toronto", 20, 0, {})}
def get_values_or_keys(self, option):
"""
get_values_or_keys returns a list of all patients with either the Patient classes or the patient ids based on
if option = "keys" or "values"
:param option: str
:return: List of Patient or List of str
"""
data = []
if option == "values":
data = list(self.hospitals.get("kingston").get_patients().values()) + \
list(self.hospitals.get("hamilton").get_patients().values()) + \
list(self.hospitals.get("toronto").get_patients().values())
if option == "keys":
data = list(self.hospitals.get("kingston").get_patients().keys()) + \
list(self.hospitals.get("hamilton").get_patients().keys()) + \
list(self.hospitals.get("toronto").get_patients().keys())
return data
def read_in_csv(self):
"""
reads in the initial_hospital_state.csv file into the hospitals dictionary of a PatientManagementSystem object
:return: None
"""
try:
with open('initial_hospital_state.csv', 'r') as csv_file:
patient_reader = csv.reader(csv_file, delimiter=',')
next(patient_reader) # skip header
for row in patient_reader:
# make a new Patient object
new_patient = Patient(row[1], row[2], row[3], row[4])
# add Patient object to the correct hospital
self.hospitals.get(row[2].lower()).add_patients(row[1], new_patient)
except:
print("There was a problem with opening initial_hospital_state.csv, the system is currently empty.")
def write_out_csv(self):
"""
writes out to final_hospital_state.csv from the hospitals dictionary of a PatientManagementSystem object
:return: None
"""
with open('final_hospital_state.csv', 'w') as csv_file:
patient_writer = csv.writer(csv_file, delimiter=',', lineterminator='\n')
# all existing ids
all_patients = self.get_values_or_keys("values")
fieldnames = ["", "Patient_ID", "Hospital", "Status", "Covid_Positive"] # header
patient_writer.writerow(fieldnames)
patient_number = 0
for patient in all_patients:
patient_writer.writerow([str(patient_number)] + patient.get_info()) # write patient info to csv file
patient_number += 1
def addPatient(self, hospital_name, sev_status, covid_positive):
"""
addPatient generates a new unique id across all the hospitals and creates a new Patient using this newly
created key, sev_status and covid_positive value. This Patient is then assigned to the Hospital that
corresponds with hospital_name. When this method is called, the Hospital should have available space or
a new patient cannot be admitted.
:param hospital_name: str
:param sev_status: str
:param covid_positive: str
:return: None
"""
# all existing ids
all_patients = self.get_values_or_keys("keys")
# generating new unique id
random_id = str(random.randint(0, 9)) \
+ str(random.randint(0, 9)) \
+ str(random.randint(0, 9)) \
+ random.choice(string.ascii_letters)
while random_id in all_patients:
random_id = str(random.randint(0, 9)) + str(random.randint(0, 9)) + str(random.randint(0, 9)) \
+ random.choice(string.ascii_letters)
# create new Patient object
patient = Patient(random_id, "Kingston", sev_status, covid_positive)
# assign Patient to appropriate hospital
self.hospitals.get(hospital_name.lower()).add_patients(random_id, patient)
print("Patient successfully added to the {} hospital.".format(hospital_name))
def transferPatient(self, patient_id, new_hospital_name):
"""
transferPatient will transfer a patient from their current hospital to another hospital by moving the Patient
object from the current Hospital object it is stored in to another Hospital object. The current hospital
will discharge the patient from their hospital. When this method is called, the Patient object must have
a severity status value of less than 3 to be transferred.
:param patient_id: str
:param new_hospital_name: str
:return: None
"""
curr_hospital = self.find_patient_hospital(patient_id) # get the current hospital of the patient
new_hospital = self.hospitals.get(new_hospital_name.lower()) # get the new hospital of the patient
patient = curr_hospital.discharge_patient(patient_id) # discharges patient from old hospital
# transfer to new hospital
new_hospital.add_patients(patient_id, patient)
print("Patient transferred successfully to the {} hospital.".format(new_hospital_name))
def dischargePatient(self, patient_id):
"""
dischargePatient removes the patient from the hospital they are admitted to by removing the Patient object
that is stored in that Hospital object. When this method is called the patient must have a severity status
of 0.
:param patient_id: str
:return: None
"""
hospital = self.find_patient_hospital(patient_id) # find the patient's current hospital
# remove the Patient object from the Hospital object by calling Hospital object's discharge_patient() method
hospital.discharge_patient(patient_id)
print("Patient {} has been discharged from the {} hospital.".format(patient_id,
hospital.get_name().capitalize()))
def updateStatus(self, patient_id, new_status):
"""
updateStatus changes the severity status of the patient by changing the stored status value in the
Patient object
:param patient_id: str
:param new_status: str
:return: None
"""
hospital = self.find_patient_hospital(patient_id) # get the hospital of the patient
patient = hospital.get_patient(patient_id) # get the patient object from the Hospital object
if patient.get_status() == new_status: # if the new status of the patient is the same as the current one
print("The patient already has a status of {}".format(new_status))
else:
patient.update_status(new_status)
print("The status of {} has been successfully updated.".format(patient_id))
def hospital_availability(self):
"""
hospital_availability iterates through all the Hospital objects and appends the names of those that have
space available to take new patients by using the available_beds() method from the Hospital class. The
list of strings is then returned.
:return: List of Str
"""
available = []
for hospital in self.hospitals.values(): # iterate Hospital objects
if hospital.available_beds() > 0: # determine if beds are available at the current Hospital
available.append(hospital.get_name().lower())
return available
def get_patient(self):
"""
get_patient is used to get the user_id of a patient from the user (used for transferring, discharging or
updating a Patient object). The method checks if the user_id inputted exists in any of the hospitals
and then returns it if it does, otherwise the user is asked to enter an existing patient id
:return: str
"""
patient_id = input()
# find patients' hospital if patient exists
patient_hospital = self.find_patient_hospital(patient_id.lower())
while patient_hospital == False: # keep asking until a valid id is entered
print("This patient ID does not exist, please make sure the ID "
"consists of 3 numbers followed by a letter.")
patient_id = input()
# check if a Patient object with this id exists
patient_hospital = self.find_patient_hospital(patient_id.lower())
return patient_id
def find_patient_hospital(self, patient_id):
"""
find_patient_hospital uses the string patient_id to find the Hospital of the patient and returns the Hospital
object if its found, otherwise it will return False.
:param patient_id: str
:return: Hospital or Bool
"""
# returns the kingston hospital
if patient_id in self.hospitals.get("kingston").get_patients().keys():
return self.hospitals.get("kingston")
# returns the hamilton hospital
if patient_id in self.hospitals.get("hamilton").get_patients().keys():
return self.hospitals.get("hamilton")
# returns the toronto hospital
if patient_id in self.hospitals.get("toronto").get_patients().keys():
return self.hospitals.get("toronto")
return False # patient id doesnt exist
def get_hospital_choice(self, available_hospitals):
"""
get_hospital_choice is a method that is used to interact with the user and get the name of the hospital that
they will perform an action with. It will list all hospitals with the number of available beds and only
let the user choose from those hospitals. Returns the a string with the name of the chosen hospital
:param available_hospitals: List of str
:return: str
"""
# list all the Hospitals
for hospital in available_hospitals:
print("{} -> Beds available: {}".format(
hospital.capitalize(), self.hospitals[hospital].available_beds()))
choice = input("\nType in the name of the hospital you want to choose here: ")
# checks if the input is one of the names of the hospitals available
while choice.lower() not in available_hospitals:
print("Please type the name of one of the hospitals from the available hospitals!")
choice = input("\nPlease try again: ")
return choice
def make_rows_df(self, max_length):
"""
make_rows_df returns a list of (list of str) which is a list of patient ids for each hospital used to create
a dataframe
e.g.
["123f", "345d", "654h"]
Kingston Hamilton Toronto
123f 345d 654h
:param max_length: int
:return: list of (list of str)
"""
# adding all patients ids to patients list to create each row for the table. patient ids appended as long as
# they exist in that index, otherwise an empty string appended
patients = []
for i in range(max_length):
temp = []
if i < len(self.hospitals.get("kingston").get_patients()):
temp.append(list(self.hospitals.get("kingston").get_patients().keys())[i])
else:
temp.append("")
if i < len(self.hospitals.get("hamilton").get_patients()):
temp.append(list(self.hospitals.get("hamilton").get_patients().keys())[i])
else:
temp.append("")
if i < len(self.hospitals.get("toronto").get_patients()):
temp.append(list(self.hospitals.get("toronto").get_patients().keys())[i])
else:
temp.append("")
if len(temp) > 0:
patients.append(temp)
temp = []
return patients
def print_patients(self):
"""
print_patients prints all the patient ids under each Hospital in a table format using the pandas module
for the user to see the current patients.
:return: None
"""
# finding the greatest number of patients in any Hospital
max_length = max([len(self.hospitals.get("kingston").get_patients()),
len(self.hospitals.get("hamilton").get_patients()),
len(self.hospitals.get("toronto").get_patients())])
# adding all patients ids to patients list to create each row for the table. patient ids appended as long as
# they exist in that index, otherwise an empty string appended
patients = self.make_rows_df(max_length)
# creating a pandas dataframe to use to represent the table
df = | pd.DataFrame(patients, columns=["Kingston", "Hamilton", "Toronto"]) | pandas.DataFrame |
from functools import partial
import json
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from solarforecastarbiter.io import utils
# data for test Dataframe
TEST_DICT = {'value': [2.0, 43.9, 338.0, -199.7, 0.32],
'quality_flag': [1, 1, 9, 5, 2]}
DF_INDEX = pd.date_range(start=pd.Timestamp('2019-01-24T00:00'),
freq='1min',
periods=5,
tz='UTC', name='timestamp')
DF_INDEX.freq = None
TEST_DATA = pd.DataFrame(TEST_DICT, index=DF_INDEX)
EMPTY_SERIES = pd.Series(dtype=float)
EMPTY_TIMESERIES = pd.Series([], name='value', index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
EMPTY_DATAFRAME = pd.DataFrame(dtype=float)
EMPTY_TIME_DATAFRAME = pd.DataFrame([], index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
TEST_DATAFRAME = pd.DataFrame({
'25.0': [0.0, 1, 2, 3, 4, 5],
'50.0': [1.0, 2, 3, 4, 5, 6],
'75.0': [2.0, 3, 4, 5, 6, 7]},
index=pd.date_range(start='20190101T0600',
end='20190101T1100',
freq='1h',
tz='America/Denver',
name='timestamp')).tz_convert('UTC')
@pytest.mark.parametrize('dump_quality,default_flag,flag_value', [
(False, None, 1),
(True, 2, 2)
])
def test_obs_df_to_json(dump_quality, default_flag, flag_value):
td = TEST_DATA.copy()
if dump_quality:
del td['quality_flag']
converted = utils.observation_df_to_json_payload(td, default_flag)
converted_dict = json.loads(converted)
assert 'values' in converted_dict
values = converted_dict['values']
assert len(values) == 5
assert values[0]['timestamp'] == '2019-01-24T00:00:00Z'
assert values[0]['quality_flag'] == flag_value
assert isinstance(values[0]['value'], float)
def test_obs_df_to_json_no_quality():
td = TEST_DATA.copy()
del td['quality_flag']
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_obs_df_to_json_no_values():
td = TEST_DATA.copy().rename(columns={'value': 'val1'})
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_forecast_series_to_json():
series = pd.Series([0, 1, 2, 3, 4], index=pd.date_range(
start='2019-01-01T12:00Z', freq='5min', periods=5))
expected = [{'value': 0.0, 'timestamp': '2019-01-01T12:00:00Z'},
{'value': 1.0, 'timestamp': '2019-01-01T12:05:00Z'},
{'value': 2.0, 'timestamp': '2019-01-01T12:10:00Z'},
{'value': 3.0, 'timestamp': '2019-01-01T12:15:00Z'},
{'value': 4.0, 'timestamp': '2019-01-01T12:20:00Z'}]
json_out = utils.forecast_object_to_json(series)
assert json.loads(json_out)['values'] == expected
def test_json_payload_to_observation_df(observation_values,
observation_values_text):
out = utils.json_payload_to_observation_df(
json.loads(observation_values_text))
pdt.assert_frame_equal(out, observation_values)
def test_json_payload_to_forecast_series(forecast_values,
forecast_values_text):
out = utils.json_payload_to_forecast_series(
json.loads(forecast_values_text))
pdt.assert_series_equal(out, forecast_values)
def test_empty_payload_to_obsevation_df():
out = utils.json_payload_to_observation_df({'values': []})
assert set(out.columns) == {'value', 'quality_flag'}
assert isinstance(out.index, pd.DatetimeIndex)
def test_empty_payload_to_forecast_series():
out = utils.json_payload_to_forecast_series({'values': []})
assert isinstance(out.index, pd.DatetimeIndex)
def test_null_json_payload_to_observation_df():
observation_values_text = b"""
{
"_links": {
"metadata": ""
},
"observation_id": "OBSID",
"values": [
{
"quality_flag": 1,
"timestamp": "2019-01-01T12:00:00-0700",
"value": null
},
{
"quality_flag": 1,
"timestamp": "2019-01-01T12:05:00-0700",
"value": null
}
]
}"""
ind = pd.DatetimeIndex([
pd.Timestamp("2019-01-01T19:00:00Z"),
pd.Timestamp("2019-01-01T19:05:00Z")
], name='timestamp')
observation_values = pd.DataFrame({
'value': pd.Series([None, None], index=ind, dtype=float),
'quality_flag': pd.Series([1, 1], index=ind)
})
out = utils.json_payload_to_observation_df(
json.loads(observation_values_text))
pdt.assert_frame_equal(out, observation_values)
def test_null_json_payload_to_forecast_series():
forecast_values_text = b"""
{
"_links": {
"metadata": ""
},
"forecast_id": "OBSID",
"values": [
{
"timestamp": "2019-01-01T12:00:00-0700",
"value": null
},
{
"timestamp": "2019-01-01T12:05:00-0700",
"value": null
}
]
}"""
ind = pd.DatetimeIndex([
pd.Timestamp("2019-01-01T19:00:00Z"),
pd.Timestamp("2019-01-01T19:05:00Z")
], name='timestamp')
forecast_values = pd.Series([None, None], index=ind, dtype=float,
name='value')
out = utils.json_payload_to_forecast_series(
json.loads(forecast_values_text))
pdt.assert_series_equal(out, forecast_values)
@pytest.mark.parametrize('label,exp,start,end', [
('instant', TEST_DATA, None, None),
(None, TEST_DATA, None, None),
('ending', TEST_DATA.iloc[1:], None, None),
('beginning', TEST_DATA.iloc[:-1], None, None),
pytest.param('er', TEST_DATA, None, None,
marks=pytest.mark.xfail(raises=ValueError)),
# start/end outside data
('ending', TEST_DATA, pd.Timestamp('20190123T2300Z'), None),
('beginning', TEST_DATA, None, pd.Timestamp('20190124T0100Z')),
# more limited
('ending', TEST_DATA.iloc[2:], pd.Timestamp('20190124T0001Z'), None),
('beginning', TEST_DATA.iloc[:-2], None,
pd.Timestamp('20190124T0003Z')),
('instant', TEST_DATA.iloc[1:-1], pd.Timestamp('20190124T0001Z'),
pd.Timestamp('20190124T0003Z')),
])
def test_adjust_timeseries_for_interval_label(label, exp, start, end):
start = start or pd.Timestamp('2019-01-24T00:00Z')
end = end or pd.Timestamp('2019-01-24T00:04Z')
out = utils.adjust_timeseries_for_interval_label(
TEST_DATA, label, start, end)
pdt.assert_frame_equal(exp, out)
def test_adjust_timeseries_for_interval_label_no_tz():
test_data = TEST_DATA.tz_localize(None)
label = None
start = pd.Timestamp('2019-01-24T00:00Z')
end = pd.Timestamp('2019-01-24T00:04Z')
with pytest.raises(ValueError):
utils.adjust_timeseries_for_interval_label(
test_data, label, start, end)
def test_adjust_timeseries_for_interval_label_no_tz_empty():
test_data = pd.DataFrame()
label = None
start = pd.Timestamp('2019-01-24T00:00Z')
end = | pd.Timestamp('2019-01-24T00:04Z') | pandas.Timestamp |
import operator
import re
import numpy as np
import pandas as pd
import utils
def get_sites_from_kd_dict(transcript_id, sequence, kd_dict, overlap_dist):
if len(sequence) < 9:
return pd.DataFrame(None)
mir_info = {
'prev_loc': -100,
'prev_seq': '',
'prev_kd': 100,
'keep_kds': [],
'keep_locs': [],
'keep_seqs': []
}
pad_seq = 'XXX' + sequence + 'XXX'
seq = 'A' + pad_seq[:11]
# iterate through 12mers in the sequence
for loc, nt in enumerate(pad_seq[11:]):
seq = seq[1:] + nt
if seq in kd_dict:
new_kd = kd_dict[seq]
# if new site is too close to previous site, take site with higher affinity
if (loc - mir_info['prev_loc']) <= overlap_dist:
if new_kd < mir_info['prev_kd']:
mir_info['keep_kds'][-1] = new_kd
mir_info['keep_locs'][-1] = loc
mir_info['keep_seqs'][-1] = seq
mir_info['prev_loc'] = loc
mir_info['prev_kd'] = new_kd
# print('replace')
else:
# print('skipped')
continue
else:
# print('added')
mir_info['keep_kds'].append(new_kd)
mir_info['keep_locs'].append(loc)
mir_info['keep_seqs'].append(seq)
mir_info['prev_loc'] = loc
mir_info['prev_kd'] = new_kd
all_sites = pd.DataFrame({
'transcript': transcript_id,
'12mer': mir_info['keep_seqs'],
'log_kd': mir_info['keep_kds'],
'loc': mir_info['keep_locs']
})
return all_sites
def get_sites_from_kd_dict_improved(transcript_id, sequence, kd_dict, overlap_dist):
if len(sequence) < 9:
return pd.DataFrame(None)
pad_seq = 'XXX' + sequence + 'XXX'
seq = 'A' + pad_seq[:11]
all_sites = []
# iterate through 12mers in the sequence
for loc, nt in enumerate(pad_seq[11:]):
seq = seq[1:] + nt
if seq in kd_dict:
new_kd = kd_dict[seq]
all_sites.append([seq, new_kd, loc])
if len(all_sites) == 0:
return | pd.DataFrame(None) | pandas.DataFrame |
import os
import time
import pickle
import argparse
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
###################
# Global parameters
# arguments parser
parser = argparse.ArgumentParser()
parser.add_argument("--freq", type=str, default="W", help="Temporal resolution: W for weekly, or M for monthly")
parser.add_argument("--n_trees", type=int, default=10, help="Number of decision trees in Random Forest ensemble: 10, 100")
parser.add_argument("--subset_jjas", type=bool, default=False, help="True for subseting only June, July, August, and September. False for considering the entire timeseries")
parser.add_argument("--only_t2m_mean", type=bool, default=True, help="True for considering only mean temperature. False for considering mean, min, max temperatures")
parser.add_argument("--start", type=int, default=0, help="The first index to start data collection over available glaciers")
parser.add_argument("--stop", type=int, default=-1, help="The last index to finalize data collection over available glaciers")
args = parser.parse_args()
freq = args.freq
n_trees = args.n_trees
subset_jjas = args.subset_jjas
only_t2m_mean = args.only_t2m_mean
start = args.start
stop = args.stop
###################
if freq == "M":
freq_prefix = "monthly"
elif freq == "W":
freq_prefix = "weekly"
if subset_jjas:
subset_prefix = "JJAS"
else:
subset_prefix = "full"
# Dir for results
results_path = f"../results/{freq_prefix}_{subset_prefix}_domain/"
results_path_models = f"../results/{freq_prefix}_{subset_prefix}_domain/trained_models/"
results_path_simulations = f"../results/{freq_prefix}_{subset_prefix}_domain/simulations/"
if not os.path.exists(results_path):
os.mkdir(results_path)
if not os.path.exists(results_path_models):
os.mkdir(results_path_models)
if not os.path.exists(results_path_simulations):
os.mkdir(results_path_simulations)
# Defining sources of data
# RGI IDs
glacier_ids = np.load("../data/misc/glacier_IDs.npy", allow_pickle=True)
# TSLs
tsl_store = | pd.read_pickle("../data/tsl/TSLs.pkl") | pandas.read_pickle |
"""
Collect information about famous artists on Wikipedia.
"""
import threading
import requests
import re
import codecs
import os
import json
from datetime import datetime
from time import sleep
from copy import copy
from glob import glob
from bs4 import BeautifulSoup
import pandas as pd
import pyarrow
g_mutex = threading.Condition()
artist_queue = {'artist': []}
class Artist:
def __init__(self, name):
self.name = name
self.born = None
self.died = None
self.nationality = None
self.pic_url = None
self.desc = None
self.wiki_url = self.get_wiki_link()
def get_wiki_link(self):
base_url = 'https://en.wikipedia.org/wiki/'
wiki_link = base_url + self.name.replace(' ', '_')
return wiki_link
class Crawler:
def __init__(self, artist_list, threadnum):
self.artist_list = artist_list
self.threadnum = threadnum
self.threadpool = []
def craw(self):
i = 0
while i < len(self.artist_list):
tid = 0
while tid < self.threadnum and i+tid < len(self.artist_list):
self.download(self.artist_list[i+tid], tid)
tid += 1
i += tid
for thread in self.threadpool:
thread.join(30)
def download(self, artist, tid):
crawthread = CrawlerThread(artist, tid)
self.threadpool.append(crawthread)
crawthread.start()
class CrawlerThread(threading.Thread):
def __init__(self, artist, tid):
threading.Thread.__init__(self)
self.artist = Artist(artist)
self.tid = tid
self.url = self.artist.wiki_url
def run(self):
global g_mutex
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
}
try:
r = requests.get(self.url, headers=headers)
except requests.exceptions.ConnectionError:
sleep(60)
r = requests.get(self.url, headers=headers)
if r.status_code == 429:
sleep(60)
r = requests.get(self.url, headers=headers)
if r.status_code == 404:
return
if not self.filter_artist(r.text):
return
try:
self.parse_wiki(r.text)
except Exception as e:
print("ParseException: ", e, self.url)
return
g_mutex.acquire()
print("Thread", self.tid, " is crawling ", self.url)
self.save_to_queue()
g_mutex.release()
def filter_artist(self, wiki_html_content):
soup = BeautifulSoup(wiki_html_content, features="html.parser")
# filter out famous artists according to the number of references
refer_num = 0
try:
references = soup.findAll('div', attrs={'class': 'reflist'})
if len(references) > 1:
l1 = references[0].find('ol', attrs={'class': 'references'}).findAll('li')
l2 = references[1].find('ol', attrs={'class': 'references'}).findAll('li')
refer_num = len(l1) + len(l2)
else:
l1 = references[0].find('ol', attrs={'class': 'references'}).findAll('li')
refer_num = len(l1)
except Exception as e:
print("FilterException", e, self.url)
return False
if refer_num < 5:
return False
return True
def parse_wiki(self, wiki_html_content):
soup = BeautifulSoup(wiki_html_content, features="html.parser")
text = soup.find('div', attrs={'class': 'mw-parser-output'}).findAll('p')
desc = text[1].text + text[2].text
attributes = soup.find('table', attrs={'class': 'infobox'}).findAll('tr')
img = attributes[1].a['href']
born = attributes[2].text.replace('Born', '')
for attr in attributes[3:]:
if 'Died' in attr.text:
self.artist.died = attr.text.replace('Died', '')
if 'Nationality' in attr.text:
self.artist.nationality = attr.text.replace('Nationality', '')
self.artist.born = born
self.artist.desc = desc.strip('\n')
self.artist.pic_url = self.url + '#/media/' + re.match('.*(File.*)', img)[1]
print(self.artist.__dict__)
def save_to_queue(self):
global artist_queue
artist_queue['artist'].append(self.artist.__dict__)
# --------------------------------------------------------------------------------------------
headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',}
request_continue = True
file_artists_txt = 'artists.txt'
file_artists_parquet = '{}.parquet.gzip'
html_fetch_count = 0
# def udf_wiki_request(name):
# global html_fetch_count
# global request_continue
# if request_continue is False:
# return ""
# url = 'https://en.wikipedia.org/wiki/'
# url = url + name.replace(' ', '_')
# try:
# r = requests.get(url, headers=headers)
# if r.status_code == 200:
# r = r.text
# elif r.status_code == 404:
# r = "404"
# elif r.status_code == 301:
# r = "301"
# elif r.status_code == 429:
# request_continue = False
# return ""
# else:
# request_continue = False
# return ""
# except requests.exceptions.ConnectionError:
# request_continue = False
# return ""
# html_fetch_count += 1
# return r
def load_artists():
if os.path.exists(file_artists_parquet):
r_df = pd.read_parquet(file_artists_parquet)
print("{} [System]: {} artists in parquet file loaded.".format(datetime.now(), r_df.shape[0]))
else:
# artist names with special characters will be ignored.
with codecs.open(file_artists_txt, 'r', 'utf-8') as f:
artist_list = f.read().replace("'", '').strip('[]').split(', ')
artist_set = set(artist_list)
artist_set.remove("Unknown")
r_df = pd.DataFrame(list(artist_set), columns=['name'])
r_df['html'] = ''
r_df.to_parquet(file_artists_parquet, compression='gzip')
print("{} [System]: {} artists reads in.".format(datetime.now(), r_df.shape[0]))
return r_df
# in_df = pd.merge(in_df, t_df, on="name", how="outer", suffixes=('_',''))
#
# print(in_df.head(1))
# in_df = in_df.merge(t_df, on='name', how='outer')
# # pd.option_context('display.max_rows', None)
# print(in_df.to_string())
# t_df = in_df[in_df['html'] == ''].head(30)
# def run(self):
# global g_mutex
#
# try:
# r = requests.get(self.url, headers=headers)
# except requests.exceptions.ConnectionError:
# sleep(60)
# r = requests.get(self.url, headers=headers)
#
# if r.status_code == 429:
# sleep(60)
# r = requests.get(self.url, headers=headers)
# if r.status_code == 404:
# return
#
# if not self.filter_artist(r.text):
# return
# try:
# self.parse_wiki(r.text)
# except Exception as e:
# print("ParseException: ", e, self.url)
# return
# g_mutex.acquire()
# print("Thread", self.tid, " is crawling ", self.url)
# self.save_to_queue()
# g_mutex.release()
def udf_filter_artists(name):
return name.replace(" ", "").isalpha() and (name.count(' ') < 3) and (' ' in name)
class Spider:
def __init__(self):
self.df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from yellowbrick.model_selection import feature_importances
# ===============================================================================================#
# Classification Models Class
# ===============================================================================================#
class Classification():
"""
This class is for performing classifcation algorithms such as Logistic Regression, Decision Tree, Random Forest, and SVM.
Parameters
----------
model_type: 'Logistic Regression', 'Decision Tree', 'Random Forest', 'SVM'
the type of classifcation algorithm you would like to apply
x_train: dataframe
the independant variables of the training data
x_val: dataframe
the independant variables of the validation data
y_train: series
the target variable of the training data
y_val: series
the target variable of the validation data
"""
def __init__(self, model_type, x_train, x_val, y_train, y_val):
self.model_type = model_type
self.x_train = x_train
self.y_train = y_train
self.x_val = x_val
self.y_val = y_val
self.scores_table = pd.DataFrame()
self.feature_importances = | pd.DataFrame() | pandas.DataFrame |
import streamlit as st
import pandas as pd
import plotly.express as px
st.set_page_config(
page_title="Personal Spending Dashboard", page_icon="💰",
)
st.write("""\
# Streamlit Personal Spending Dashboard 💰
Built for [#30DaysOfStreamlit](https://share.streamlit.io/streamlit/30days) Day 4 (a day late 😄) with ❤️ from [Gar's Bar](https://tech.gerardbentley.com/)
""")
with st.expander("What's This?"):
st.write("""\
Check out the example bank dataset or upload your own bank / debit card / credit card / spending spreadsheet!
Analyze your total and average spending over each month / week / day / year / quarter.
See what the minimum / maximum / total number of purchases were in each period.
If your data has a description / name / category column, view how many times you've made those purchases.
Or use it on other roughly timeseries aggregated univariate data!
""")
upload_data = st.file_uploader(
"Bank / Credit Card Spreadsheet", type=["csv", "xls", "xlsx", "xlsm"]
)
if upload_data is None:
st.info(
"No File uploaded. Using example data from a [Kaggle Dataset](https://www.kaggle.com/datasets/apoorvwatsky/bank-transaction-data). Upload a CSV to use your own data!"
)
upload_data = open("data/bank.xlsx", mode="rb")
separator = ","
use_sample = True
else:
separator = st.text_input(
"CSV Delimiter",
value=",",
max_chars=1,
help="How your CSV values are separated (doesn't matter for excel)",
)
use_sample = False
@st.experimental_memo
def read_csv_or_excel(data, sep):
try:
raw_df = pd.read_csv(data, sep=sep)
except UnicodeDecodeError:
try:
raw_df = | pd.read_excel(data) | pandas.read_excel |
import os
import pandas as pd
import numpy as np
from datetime import datetime
import seaborn as sns
import re
import lightgbm as lgb
from sklearn.preprocessing import LabelEncoder
class trainModel:
def setConstants(self, model_id):
# 新房数据表路径
self.newdisk_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_NewDisk.csv'
# 房源属性数据表路径
self.property_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_Property.csv'
# 地址数据表路径
self.address_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_NewDiskAddress.csv'
# 挂牌数据路径
self.data_path = os.path.dirname(os.path.realpath(__file__)) + '/data/'
self.model_dir = os.path.dirname(os.path.realpath(__file__)) + '/cache/model_%s/' % (model_id)
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
# 房源中位数价格路径
self.medprice_path = self.model_dir + '/medprice.csv'
# 区名特征化路径
self.arealabel_path = self.model_dir + '/arealabel.csv'
# 板块名特征化路径
self.platelabel_path = self.model_dir + '/platelabel.csv'
# 内中外环特征化路径
self.modulelabel_path = self.model_dir + 'modulelabel.csv'
# 模型缓存路径
self.cache_path = self.model_dir + '/model.txt'
def setParams(self, model_id):
from ..models import models_logs
model = models_logs.objects.get(id=model_id)
self.beginDate = model.startMonth.strftime('%Y-%m')
self.endDate = model.endMonth.strftime('%Y-%m')
self.objective = model.objective
self.metric = model.metric
self.learning_rate = model.learning_rate
self.feature_fraction = model.feature_fraction
self.bagging_fraction = model.bagging_fraction
self.max_depth = model.max_depth
self.num_leaves = model.num_leaves
self.bagging_freq = model.bagging_freq
self.min_data_in_leaf = model.min_data_in_leaf
self.min_gain_to_spilt = model.min_gain_to_split
self.lambda_l1 = model.lambda_l1
self.lambda_l2 = model.lambda_l2
self.verbose = model.verbose
def name_filter(self, name):
"""小区名正则过滤"""
n = re.compile('\(|\(|一期|二期').split(name)[0]
n = re.sub(r'\(.*?\)', '', re.sub(r'\(.*?\)', '', n))
n = n.strip('*0123456789(())')
n = n.split('第')[0]
return n
def address_filter(self, address):
"""小区地址清洗"""
n = re.compile(',|,|、').split(address)[0]
n = re.sub(r'\(.*?\)', '', re.sub(r'\(.*?\)', '', n))
n = n.strip('*0123456789')
return n
def time_map(self, time):
if type(time) == str:
split_char = '/' if '/' in time else '-'
return int(time.split(split_char)[0])
return None
def floor_map(self, floor):
# 楼层映射
return list(pd.cut(floor, [0, 3, 6, 9, np.inf], labels=['低层', '多层', '小高层', '高层']))
def make_coordinates(self, data):
coors = []
# for i in tqdm(data):
for i in data:
if type(i) == str and i != '公寓' and i != '商业' and i != '其它':
coors.append(i.split(','))
else:
coors.append([None, None])
coors = pd.DataFrame(coors, columns=['loc_x', 'loc_y'])
# coors=pd.DataFrame([coor.split(',') for coor in all_df.Coordinates],columns=['loc_x','loc_y'],index=all_df.index)
coors = coors.astype(float)
return coors
def load_guapai(self, name, month):
"""读取挂牌数据"""
# 训练模型,使用本地数据,提高效率。
with open(os.path.join(self.data_path, name, '挂牌.txt'), encoding='utf-8') as f:
l = []
for i in f.readlines():
l.append(i.split('\t'))
df = pd.DataFrame(l)
drop_col = [0, 15, 16, 18, 19, 20, 21]
if len(df.columns) == 23:
drop_col.append(22)
df.drop(drop_col, axis=1, inplace=True) # 去除无用列
df.columns = ['area', 'address', 'name', 'price', 'unit_price', 'acreage', 'room_type', 'all_floor',
'floor',
'shore', 'house_type', 'fitment', 'time', 'Source', 'house_trait']
df['month'] = month
print('load %s' % name)
return df
def load_data(self):
"""加载训练数据"""
print('加载挂牌训练数据...')
cache_path = os.path.dirname(os.path.realpath(__file__)) + '/cache/guapai_%s-%s.hdf' % (self.beginDate, self.endDate)
if os.path.exists(cache_path):
# 加载缓存
meta_df = pd.read_hdf(cache_path, 'meta')
all_df = pd.read_hdf(cache_path, 'data')
else:
# pool = Pool()
# files=[i for i in os.listdir(data_path) if os.path.dirname(os.path.realpath(__file__))+'' not in i]
files = np.unique(
[datetime.strftime(x, '%Y-%m') for x in list(pd.date_range(start=self.beginDate, end=self.endDate))])
# files = sorted(files)
# dfs = [pool.apply_async(load_guapai, (name, month)) for month, name in enumerate(files)]
# pool.close()
# pool.join()
# dfs = [i.get() for i in dfs]
dfs = []
for month, name in enumerate(files):
dfs.append(self.load_guapai(name, str(month)))
print('共加载%s个月份的挂牌数据...' % len(dfs))
all_df = pd.concat(dfs, ignore_index=True)
# 获取经纬度信息
newdisk_df = pd.read_csv(self.newdisk_path, usecols=['NewDiskID', 'PropertyID', 'NewDiskName', 'Coordinates'])
# newdisk_df = tools.read_basic_table('AD_NewDisk') 训练模型,使用本地数据,不再读取数据库。
newdisk_df.rename(columns={'NewDiskName': 'name'}, inplace=True)
# 获取板块、环线信息
property_df = pd.read_csv(self.property_path, usecols=['PropertyID', 'Area', 'Plate', 'Module', 'HousingName'])
property_df.rename(columns={'Area': 'area', 'HousingName': 'name'}, inplace=True)
# 获取楼盘地址信息
address_df = pd.read_csv(self.address_path, usecols=['RoadLaneNo', 'NewDiskID'])
address_df.rename(columns={'RoadLaneNo': 'address'}, inplace=True)
# merge them
meta_df = pd.merge(newdisk_df, property_df.drop('name', axis=1), on='PropertyID', how='left')
# meta_df=pd.merge(meta_df,address_df,on='NewDiskID',how='left')
# 小区名称清洗
index = meta_df.name.notnull()
meta_df.loc[index, 'name'] = meta_df.loc[index, 'name'].apply(self.name_filter)
all_df.name = all_df.name.apply(self.name_filter)
address_df.address = address_df.address.apply(self.address_filter)
all_df.address = all_df.address.apply(self.address_filter)
# 转换数值类型 str->float
numerical_columns = ['price', 'unit_price', 'acreage', 'all_floor', 'floor']
all_df[numerical_columns] = all_df[numerical_columns].astype(float)
all_df['No'] = range(all_df.shape[0])
address_match = pd.merge(all_df[['No', 'address']], address_df, on='address', how='inner')
name_match = pd.merge(all_df[['No', 'name', 'area']], meta_df[['name', 'area', 'NewDiskID']],
on=['name', 'area'],
how='inner')
match = pd.concat((address_match[['No', 'NewDiskID']], name_match[['No', 'NewDiskID']]), ignore_index=True)
match.drop_duplicates(keep='first', inplace=True)
match = match.sort_values('No')
all_df = all_df.loc[match.No]
all_df['NewDiskID'] = match.NewDiskID.values
all_df.drop('No', axis=1, inplace=True)
all_df = pd.merge(all_df, meta_df[['NewDiskID', 'Coordinates', 'Plate', 'Module']], on='NewDiskID',
how='left')
meta_df.to_hdf(cache_path, 'meta')
all_df.to_hdf(cache_path, 'data')
return meta_df, all_df
def preprocess(self, all_df):
"""特征预处理"""
print('清洗挂牌数据...')
cache_path = os.path.dirname(os.path.realpath(__file__)) + '/cache/feats_%s-%s.hdf' % (self.beginDate, self.endDate)
if os.path.exists(cache_path):
all_df = pd.read_hdf(cache_path, 'data')
else:
# 修正面积
acreage_log = np.log(all_df.acreage)
mean = acreage_log.mean()
std = acreage_log.std()
i = acreage_log[(acreage_log <= mean + 2 * std) & (acreage_log >= mean - 1 * std)].index
sns.set({'figure.figsize': (8, 4)})
sns.boxplot(all_df.loc[i].acreage)
all_df.loc[i].acreage.describe()
all_df = all_df.loc[i]
# 修正单价
unit_price_log = np.log1p(all_df.unit_price)
mean = unit_price_log.mean()
std = unit_price_log.std()
i = unit_price_log[(unit_price_log <= mean + 3 * std) & (unit_price_log >= mean - 3.2 * std)].index
sns.set({'figure.figsize': (8, 4)})
sns.boxplot(all_df.loc[i].unit_price)
all_df.loc[i].unit_price.describe()
all_df = all_df.loc[i]
# 修复总价
# 修复总价单位误差
all_df.loc[all_df.price <= 10000, 'price'] *= 10000
# 差价分布
anomaly_price = np.abs(all_df.unit_price * all_df.acreage - all_df.price)
anomaly_price_index = anomaly_price[anomaly_price > 100000].index # 差价太多为异常点
# 直接删除异常样本
all_df.drop(anomaly_price_index, axis=0, inplace=True)
# 环线
all_df.loc[all_df[all_df.Module == '所有'].index, 'Module'] = '内环内'
# sorted_module = all_df[['unit_price', 'Module']].groupby('Module').median().sort_values('unit_price')
# i = pd.Series(range(0, sorted_module.shape[0]), index=sorted_module.index)
# all_df.Module = all_df.Module.map(i.to_dict())
# 楼层映射
all_df.loc[all_df.floor < 0, 'floor'] = np.nan
# 分段映射
all_df['floor_section'] = self.floor_map(all_df.floor)
# 朝向因素
# 暂无为缺省字段
all_df.shore.replace({'暂无数据': '暂无', ' ': '暂无', '': '暂无'}, inplace=True)
sorted_shore = all_df[['unit_price', 'shore']].groupby('shore').mean().sort_values('unit_price')
i = pd.Series(range(0, sorted_shore.shape[0]), index=sorted_shore.index)
all_df.shore = all_df.shore.map(i.to_dict())
# 房屋类型
all_df.loc[all_df[(all_df.house_type == '其它') | (all_df.house_type == '工厂')].index, 'house_type'] = '公寓'
sorted_house_type = all_df[['house_type', 'unit_price']].groupby('house_type').median().sort_values(
'unit_price')
i = pd.Series(range(0, sorted_house_type.shape[0]), index=sorted_house_type.index)
i.to_dict()
all_df.house_type = all_df.house_type.map(i.to_dict())
# 装修情况
default_fit = '暂无' # 缺省字段填充
all_df.fitment.replace({'': default_fit, '暂无数据': default_fit, '豪华装': '豪装', '其他': default_fit}, inplace=True)
all_df.fitment = all_df.fitment.apply(lambda x: x.strip('修'))
sorted_fitment = all_df[['fitment', 'unit_price']].groupby('fitment').median().sort_values('unit_price')
i = pd.Series(range(0, sorted_fitment.shape[0]), index=sorted_fitment.index)
all_df.fitment = all_df.fitment.map(i.to_dict())
# 房型
r = re.compile('室|厅|厨|卫') # 正则提取房型数据
l = [map(int, r.split(i)[:-1]) for i in all_df.room_type]
room_type_df = pd.DataFrame(l, index=all_df.index, columns=['室', '厅', '厨', '卫'])
all_df = pd.concat((all_df, room_type_df), axis=1)
# 时间
all_df.time = all_df.time.apply(lambda x: self.time_map(x)).astype(int)
all_df.time = all_df.time.apply(lambda x: min(2018 - x, 100) if 0 < x <= 2018 else None)
# 经纬度
coors = self.make_coordinates(all_df.Coordinates.values)
all_df.index = coors.index
all_df = pd.concat((all_df, coors), axis=1).drop('Coordinates', axis=1)
# 缓存特征矩阵
all_df = all_df[all_df.unit_price.notnull()]
all_df.to_hdf(cache_path, 'data')
print('共有%d条训练数据' % all_df.shape[0])
return all_df
def train_model(self, x_train, y_train):
# * LightGBM
# cache_path = os.path.dirname(os.path.realpath(__file__))+'/cache/model_%s-%s_%s.txt' % (beginDate, endDate, x_train.shape[1])
if os.path.exists(self.cache_path):
print('使用缓存中的模型,不再训练...')
gbm = lgb.Booster(model_file=self.cache_path)
else:
print('开始模型训练...')
# 设置模型参数
# params = {
# 'objective': 'regression',
# 'metric': 'mse',
# 'learning_rate': 0.2,
# 'feature_fraction': 0.6,
# 'bagging_fraction': 0.6,
# 'max_depth': 14,
# 'num_leaves': 220,
# 'bagging_freq': 5,
# 'min_data_in_leaf': 10,
# 'min_gain_to_split': 0,
# 'lambda_l1': 1,
# 'lambda_l2': 1,
# 'verbose': 0,
# }
params = {
'objective': self.objective,
'metric': self.metric,
'learning_rate': self.learning_rate,
'feature_fraction': self.feature_fraction,
'bagging_fraction': self.bagging_fraction,
'max_depth': self.max_depth,
'num_leaves': self.num_leaves,
'bagging_freq': self.bagging_freq,
'min_data_in_leaf': self.min_data_in_leaf,
'min_gain_to_split': self.min_gain_to_spilt,
'lambda_l1': self.lambda_l1,
'lambda_l2': self.lambda_l2,
'verbose': self.verbose,
}
lgb_train = lgb.Dataset(x_train, y_train, categorical_feature=['area', 'Plate', 'Module', 'floor_section'])
gbm = lgb.train(params, lgb_train, num_boost_round=750)
gbm.save_model(self.cache_path)
return gbm
def make_train_set(self, all_df):
'''计算单套价格'''
# 训练集
x_train = all_df[
['acreage', 'all_floor', 'floor', 'time', 'NewDiskID',
'area', 'Plate', 'Module', 'floor_section', 'loc_x', 'loc_y']]
# 计算小区房价中位数
med_price = pd.concat((x_train.NewDiskID, all_df.unit_price), axis=1)
med_price = med_price.groupby('NewDiskID', as_index=False)['unit_price'].agg({'median': 'mean'})
med_price.to_csv(self.medprice_path, index=False)
x_train = pd.merge(x_train, med_price, on='NewDiskID', how='left')
# 将离散型变量转换成整型,用于lgb训练
area_le = LabelEncoder() # 大版块
arealabel_name = pd.unique(x_train.area)
x_train.area = area_le.fit_transform(x_train.area)
arealabel = pd.DataFrame({"area": arealabel_name, "label": area_le.transform(arealabel_name)})
arealabel.to_csv(self.arealabel_path) # 把标签对应量记录下来,方便日后用模型预测时使用同一套标签。
plate_le = LabelEncoder() # 小板块
platelabel_name = | pd.unique(x_train.Plate) | pandas.unique |
# Copyright 2021 AI Singapore. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import pandas as pd
from rarity.data_loader import CSVDataLoader, DataframeLoader
# add this in the conftest.py under tests folder
from selenium.webdriver.chrome.options import Options
def pytest_setup_options():
options = Options()
# added mainly for integration test in gitlab-ci to resolve
# (unknown error: DevToolsActivePort file doesn't exist)
# (The process started from chrome location /usr/bin/google-chrome is no longer running,
# so ChromeDriver is assuming that Chrome has crashed.)
# solution reference => https://github.com/plotly/dash/issues/1420
options.add_argument('--no-sandbox')
return options
@pytest.fixture
def csv_loader_single_modal_reg():
SAMPLE_DATA_DIR = './tests/sample_data/regression/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelA.csv')
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Regression'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_single_modal_cls():
SAMPLE_DATA_DIR = './tests/sample_data/classification/binary/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelA.csv')
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_reg():
SAMPLE_DATA_DIR = './tests/sample_data/regression/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Regression'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_cls():
SAMPLE_DATA_DIR = './tests/sample_data/classification/binary/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_cls_multi():
SAMPLE_DATA_DIR = './tests/sample_data/classification/multiclass/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'multiclass_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Multiclass-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_single_modal_reg():
DF_FEATURES = pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6]], columns=['x1', 'x2', 'x3'])
DF_Y_TRUE = pd.DataFrame([[22.6], [36.6]], columns=['actual'])
DF_Y_PRED_1 = pd.DataFrame([[22.2], [35.0]], columns=['pred'])
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Regression'
data_loader = DataframeLoader(DF_FEATURES,
DF_Y_TRUE,
df_yPred_ls=[DF_Y_PRED_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_single_modal_cls():
DF_FEATURES = pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6], [0.3, 2.3, 5.2]], columns=['x1', 'x2', 'x3'])
DF_Y_TRUE = pd.DataFrame([[0], [1], [1]], columns=['actual'])
DF_Y_PRED_1 = | pd.DataFrame([[0.38, 0.62], [0.86, 0.14], [0.78, 0.22]], columns=['0', '1']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.preprocessing import OrdinalEncoder
def categorical_round(data, cols):
data[:, cols] = np.round(data[:, cols])
return data
def vec_translate(a, my_dict):
return np.vectorize(my_dict.__getitem__)(a)
def categorical_frequency_mapping(data, columns):
"""Sort the """
inv_mappings = {}
for col in columns:
unique_values, counts = np.unique(data[:, col], return_counts=True)
sorted_values = unique_values[np.argsort(-counts)]
mapping = {k: v for k, v in zip(unique_values, sorted_values)}
data[:, col] = vec_translate(data[:, col], mapping)
inv_mappings[col] = {k: v for k, v in zip(sorted_values, unique_values)}
return data, inv_mappings
def categorical_frequency_inverse_mapping(data, columns, inv_mappings):
for col in columns:
data[:, col] = vec_translate(data[:, col], inv_mappings[col])
data = categorical_round(data, columns)
return data
def encode_one_hot(df, cols):
categorical_data = | pd.DataFrame(df[:, cols], columns=cols) | pandas.DataFrame |
"""
A set of classes for aggregation of TERA data sources into common formats.
"""
from rdflib import Graph, Namespace, Literal, URIRef, BNode
from rdflib.namespace import RDF, OWL, RDFS
UNIT = Namespace('http://qudt.org/vocab/unit#')
import pandas as pd
import validators
import glob
import math
from tqdm import tqdm
import warnings
import copy
import tera.utils as ut
nan_values = ['nan', float('nan'),'--','-X','NA','NC',-1,'','sp.', -1,'sp,','var.','variant','NR','sp','ssp','ssp.','ssp,']
class DataObject:
def __init__(self, namespace = 'http://www.example.org/', verbose = True, name = 'Data Object'):
"""
Base class for aggregation of data.
Parameters
----------
namespace : str
Base URI for the data set.
verbose : bool
"""
self.graph = Graph()
self.namespace = Namespace(namespace)
self.name = name
self.verbose = verbose
def __add__(self, other):
c = copy.deepcopy(self)
c.graph += other.graph
return c
def __str__(self):
return self.name
def __dict__(self):
return {
'namespace':self.namespace,
'num_triples':len(self.graph)
}
def __del__(self):
self.graph = Graph()
def save(self, path):
"""Save graph to file.
Parameters
----------
path : str
ex: file.nt
"""
self.graph.serialize(path, format=path.split('.').pop(-1))
def replace(self, converted):
"""Replace old entities with new in data object.
Usefull after converting between datasets.
Parameters
----------
converted : list
list of (old, new) tuples.
"""
if len(converted) < 1:
warnings.warn('Empty mapping list.')
return
tmp = set()
for old, new in converted:
triples = self.graph.triples((old,None,None))
tmp |= set([(new,p,o) for _,p,o in triples])
triples = self.graph.triples((None, None, old))
tmp |= set([(s,p,new) for s,p,_ in triples])
self.graph.remove((old,None,None))
self.graph.remove((None,None,old))
for t in tmp:
self.graph.add(t)
def apply_func(self, func, dataframe, cols, sub_bar=False):
pbar = None
if self.verbose and not sub_bar:
pbar = tqdm(total=len(dataframe.index),desc=self.name)
for row in zip(*[dataframe[c] for c in cols]):
func(row)
if pbar: pbar.update(1)
class Taxonomy(DataObject):
def __init__(self,
namespace = 'https://www.ncbi.nlm.nih.gov/taxonomy/',
name = 'NCBI Taxonomy',
verbose = True,
directory = None):
"""
Aggregation of the NCBI Taxonomy.
Parameters
----------
directory : str
Path to data set. Downloaded from ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/new_taxdump/new_taxdump.zip
"""
super(Taxonomy, self).__init__(namespace, verbose, name)
if directory:
self._load_ncbi_taxonomy(directory)
self.verbose = verbose
def _add_subproperties(self, uri, pref = False):
self.graph.add((uri,OWL.subPropertyOf,RDFS.label))
if pref:
self.graph.add((uri,OWL.subPropertyOf,URIRef('http://www.w3.org/2004/02/skos/core#prefLabel')))
def _load_ncbi_taxonomy(self, directory):
self._load_hierarchy(directory+'nodes.dmp')
self._load_divisions(directory+'division.dmp')
self._load_names(directory+'names.dmp')
self._add_domain_and_range_triples()
self._add_disjoint_axioms()
def _load_hierarchy(self, path):
df = pd.read_csv(path, sep='|', usecols=[0,1,2,4], names=['child','parent','rank','division'], na_values = nan_values, dtype = str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
c,p,r,d = row
c = self.namespace['taxon/'+str(c)]
rc = r
r = r.replace(' ','_')
if r != 'no_rank':
self.graph.add((c, self.namespace['rank'], self.namespace['rank/'+r]))
self.graph.add((self.namespace['rank/'+r], RDFS.label, Literal(rc)))
self.graph.add((self.namespace['rank/'+r], RDF.type, self.namespace['Rank']))
p = self.namespace['taxon/'+str(p)]
d = str(d).replace(' ','_')
d = self.namespace['division/'+str(d)]
if r == 'species': #species are treated as instances
self.graph.add((c,RDF.type, p))
self.graph.add((c, RDF.type, d))
else:
self.graph.add((c,RDFS.subClassOf, p))
self.graph.add((c, RDFS.subClassOf, d))
self.apply_func(func, df, ['child','parent','rank','division'])
def _load_names(self, path):
df = pd.read_csv(path, sep='|', usecols=[0,1,2,3], names=['taxon','name','unique_name','name_type'],na_values = nan_values,dtype = str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
c,n,un,nt = row
c = self.namespace['taxon/'+str(c)]
n = Literal(n)
un = Literal(un)
if len(un) > 0:
self.graph.add((c, self.namespace['uniqueName'], un))
self._add_subproperties(self.namespace['uniqueName'], pref=True)
if len(n) > 0:
ntl = Literal(nt)
nt = self.namespace[nt.replace(' ','_')]
self._add_subproperties(nt,pref=False)
self.graph.add((c,nt,n))
self.graph.add((nt,RDFS.label,ntl))
self.graph.add((nt,RDFS.domain,self.namespace['Taxon']))
self.apply_func(func, df, ['taxon','name','unique_name','name_type'])
def _load_divisions(self, path):
df = pd.read_csv(path, sep='|', usecols=[0,1,2], names=['division','acronym','name'], na_values = nan_values, dtype = str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
d,a,n = row
d = self.namespace['division/'+str(d)]
self.graph.add((d,RDF.type,self.namespace['Division']))
self.graph.add((d,RDFS.label,Literal(n)))
#self.graph.add((d,RDFS.label,Literal(a)))
self.apply_func(func, df, ['division','acronym','name'])
def _add_domain_and_range_triples(self):
self.graph.add((self.namespace['rank'],RDFS.domain,self.namespace['Taxon']))
self.graph.add((self.namespace['rank'],RDFS.range,self.namespace['Rank']))
def _add_disjoint_axioms(self):
for d in [self.namespace['division/1'], #Invertebrates
self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/9'], #Viruses
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/0'], #Bacteria
OWL.disjoinWith,d))
for d in [self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/9'], #Viruses
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/1'], #Invertebrates
OWL.disjoinWith,d))
for d in [self.namespace['division/4'], #Plants and Fungi
self.namespace['division/9'], #Viruses
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/2'], #Mammals
OWL.disjoinWith,d))
for d in [self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/3'], #Phages
OWL.disjoinWith,d))
for d in [self.namespace['division/2'], #Mammals
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/4'], #Plants and Fungi
OWL.disjoinWith,d))
for d in [self.namespace['division/1']]: #Invertebrates
self.graph.add((self.namespace['division/5'], #Primates
OWL.disjoinWith,d))
for d in [self.namespace['division/1']]: #Invertebrates
self.graph.add((self.namespace['division/6'], #Rodents
OWL.disjoinWith,d))
for d in [self.namespace['division/1'], #Invertebrates
self.namespace['division/0'], #Bacteria
self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/9'], #Viruses
OWL.disjoinWith,d))
class Traits(DataObject):
def __init__(self,
namespace = 'https://eol.org/pages/',
name = 'EOL Traits',
verbose = True,
directory = None):
"""
Encyclopedia of Life Traits.
Parameters
----------
directory : str
Path to data set. See https://opendata.eol.org/dataset/all-trait-data-large
"""
super(Traits, self).__init__(namespace, verbose, name)
if directory:
self._load_eol_traits(directory)
def _load_eol_traits(self, directory):
self._load_traits(directory+'trait_bank/traits.csv')
self._load_desc(directory+'trait_bank/terms.csv')
for f in glob.glob(directory+'eol_rels/*.csv'):
self._load_eol_subclasses(f)
def _load_traits(self, path):
df = pd.read_csv(path, sep=',', usecols=['page_id','predicate','value_uri'], na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
s,p,o = row
s = self.namespace[s]
try:
val = validators.url(o)
o = URIRef(o)
except TypeError:
o = Literal(o)
val = True
if validators.url(s) and validators.url(p) and val:
self.graph.add((URIRef(s),URIRef(p),o))
self.apply_func(func, df, ['page_id','predicate','value_uri'])
def _load_literal_traits(self,path):
df = pd.read_csv(path, sep=',', usecols=['page_id','predicate','measurement','units_uri'], na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
s,p,o,u = row
s = self.namespace[s]
try:
o = Literal(o)
u = URIRef(u)
bnode = BNode()
self.graph.add((bnode,RDF.value,o))
self.graph.add((bnode,UNIT.units,u))
self.graph.add((URIRef(s),URIRef(p),bnode))
except TypeError:
pass
self.apply_func(func, df, ['page_id','predicate',''])
def _load_desc(self, path):
df = pd.read_csv(path, sep=',', usecols=['uri','name'], na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
uri,name = row
if validators.url(uri) and name:
self.graph.add((URIRef(uri),RDFS.label,Literal(name)))
self.apply_func(func, df, ['uri','name'])
def _load_eol_subclasses(self, path):
try:
try:
df = pd.read_csv(path,sep=',',usecols=['child','parent'],na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
except ValueError:
df = pd.read_csv(path,sep=',',header=None,na_values = nan_values, dtype=str)
df.columns = ['parent','child']
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
except FileNotFoundError as e:
print(e,path)
def func(row):
c,p = row
if validators.url(c) and validators.url(p):
c,p = URIRef(c),URIRef(p)
self.graph.add((c,RDFS.subClassOf,p))
self.apply_func(func, df, ['child','parent'])
class Effects(DataObject):
def __init__(self,
namespace = 'https://cfpub.epa.gov/ecotox/',
name = 'Ecotox Effects',
verbose = True,
directory = None):
"""
Ecotox effects data aggregation.
Parameters
----------
directory : str
Path to data set. Downloaded from ftp://newftp.epa.gov/ecotox/ecotox_ascii_12_12_2019.exe
"""
super(Effects, self).__init__(namespace, verbose, name)
self._load_effect_data(directory + 'tests.txt', directory + 'results.txt')
def _load_effect_data(self, tests_path, results_path):
tests = pd.read_csv(tests_path, sep='|', dtype = str, na_values = nan_values)
tests.dropna(inplace=True, subset=['test_id',
'test_cas',
'species_number'])
tests.fillna(inplace=True, value='missing')
tests = tests.apply(lambda x: x.str.strip())
results = pd.read_csv(results_path, sep='|', dtype = str, na_values = nan_values)
results.dropna(inplace=True, subset=['test_id','endpoint','conc1_mean','conc1_unit','effect'])
results.fillna(inplace=True, value='missing')
results = results.apply(lambda x: x.str.strip())
def test_func(row):
test_id, cas_number, species_number, stdm, stdu, habitat, lifestage, age, ageunit, weight, weightunit = row
#must be included
t = self.namespace['test/'+str(test_id)]
s = self.namespace['taxon/'+str(species_number)]
c = self.namespace['cas/'+str(cas_number)]
self.graph.add((t, RDF.type, self.namespace['Test']))
self.graph.add((t, self.namespace['species'], s))
self.graph.add((t, self.namespace['chemical'], c))
for v,u,p in zip([stdm,age,weight],[stdu,ageunit,weightunit],['studyDuration','organismAge','organismWeight']):
if v != 'missing':
b = BNode()
self.graph.add( (b, RDF.value, Literal(v)) )
if u != 'missing':
u = ut.unit_parser(u)
if u:
self.graph.add( (b, UNIT.units, UNIT[u]) )
self.graph.add( (t, self.namespace[p], b) )
if habitat != 'missing':
self.graph.add((t, self.namespace['organismHabitat'],self.namespace['habitat/'+habitat]))
if lifestage != 'missing':
self.graph.add((t, self.namespace['organismLifestage'],self.namespace['lifestage/'+lifestage]))
def results_func(row):
test_id, endpoint, conc, conc_unit, effect = row
t = self.namespace['test/'+str(test_id)]
r = BNode()
ep = self.namespace['endpoint/'+str(endpoint)]
ef = self.namespace['effect/'+str(effect)]
self.graph.add((r,self.namespace['endpoint'],ep))
self.graph.add((r,self.namespace['effect'],ef))
b = BNode()
conc = ''.join(filter(str.isdigit, conc))
if conc:
self.graph.add( (b, RDF.value, Literal(conc)) )
if conc_unit != 'missing':
u = ut.unit_parser(conc_unit)
if u:
self.graph.add( (b, UNIT.units, UNIT[u]) )
self.graph.add( (r, self.namespace['concentration'], b) )
self.graph.add( (t, self.namespace['hasResult'],r) )
self.apply_func(test_func, tests, ['test_id',
'test_cas',
'species_number',
'study_duration_mean',
'study_duration_unit',
'organism_habitat',
'organism_lifestage',
'organism_age_mean',
'organism_age_unit',
'organism_init_wt_mean',
'organism_init_wt_unit'])
self.apply_func(results_func, results, ['test_id','endpoint','conc1_mean','conc1_unit','effect'])
class EcotoxTaxonomy(DataObject):
def __init__(self,
namespace = 'https://cfpub.epa.gov/ecotox/',
name = 'Ecotox Taxonomy',
verbose = True,
directory = None):
"""
Ecotox taxonomy aggregation.
Parameters
----------
directory : str
Path to dataset. Downloaded from ftp://newftp.epa.gov/ecotox/ecotox_ascii_12_12_2019.exe
"""
super(EcotoxTaxonomy, self).__init__(namespace, verbose, name)
self._load_taxa(directory + 'validation/species.txt' )
self._load_synonyms(directory + 'validation/species_synonyms.txt')
self._load_hierarchy(directory + 'validation/species.txt')
self._add_subproperties()
self._add_domain_and_range_triples()
self._add_disjoint_axioms()
def _add_subproperties(self):
self.graph.add((self.namespace['latinName'],OWL.subPropertyOf,RDFS.label))
self.graph.add((self.namespace['latinName'],OWL.subPropertyOf,URIRef('http://www.w3.org/2004/02/skos/core#prefLabel')))
self.graph.add((self.namespace['commonName'],OWL.subPropertyOf,RDFS.label))
def _load_taxa(self, path):
df = pd.read_csv(path, sep='|', usecols=['species_number','common_name','latin_name','ecotox_group'], dtype = str, na_values = nan_values)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
s, cn, ln, group = row
s = self.namespace['taxon/'+s]
group = str(group).replace(' ','')
names = group.split(',')
tmp = group.split(',')
group_uri = [self.namespace['group/'+gr.replace('\W','')] for gr in tmp]
for gri,n in zip(group_uri,names):
if len(n) < 1: continue
self.graph.add((s, self.namespace['ecotoxGroup'], gri))
self.graph.add((gri, RDFS.label, Literal(n)))
if cn:
self.graph.add((s, self.namespace['commonName'], Literal(cn)))
if ln:
self.graph.add((s, self.namespace['latinName'], Literal(ln)))
self.apply_func(func, df, ['species_number','common_name','latin_name','ecotox_group'])
def _add_disjoint_axioms(self):
base = Namespace('https://cfpub.epa.gov/ecotox/group/')
for d in ['Worms',
'Algae',
'Insects/Spiders',
'Trees',
'Mammals',
'Fish',
'Reptiles',
'Moss',
'Ferns',
'Fungi',
'Crustaceans',
'Flowers',
'Shrubs']:
self.graph.add((base['Birds'],
OWL.disjoinWith,
base[d]))
for d in ['Insects/Spiders',
'Trees',
'Moss',
'Ferns',
'Fungi']:
self.graph.add((base['Amphibians'],
OWL.disjoinWith,
base[d]))
for d in ['Insects/Spiders',
'Trees',
'Moss',
'Ferns',
'Fungi',
'Mammals',
'Vertebrates',
'Reptiles',
'Crustaceans']:
self.graph.add((base['Algae'],
OWL.disjoinWith,
base[d]))
for d in ['Trees',
'Moss',
'Ferns',
'Fungi',
'Fish',
'Mammals',
'Vertebrates']:
self.graph.add((base['Invertebrates'],
OWL.disjoinWith,
base[d]))
for d in ['Birds',
'Trees',
'Moss',
'Ferns',
'Fungi',
'Mammals',
'Vertebrates',
'Fish']:
self.graph.add((base['Insects/Spiders'],
OWL.disjoinWith,
base[d]))
for d in ['Birds',
'Trees',
'Moss',
'Ferns',
'Fungi',
'Mammals',
'Vertebrates',
'Fish']:
self.graph.add((base['Trees'],
OWL.disjoinWith,
base[d]))
for d in ['Birds',
'Trees',
'Moss',
'Ferns',
'Fungi',
'Invertebrates',
'Fish',
'Flowers',
'Crustaceans']:
self.graph.add((base['Mammals'],
OWL.disjoinWith,
base[d]))
for d in ['Birds',
'Trees',
'Moss',
'Ferns',
'Fungi',
'Mammals',
'Flowers',
'Crustaceans']:
self.graph.add((base['Fish'],
OWL.disjoinWith,
base[d]))
for d in ['Trees',
'Moss',
'Ferns',
'Fungi',
'Mammals',
'Fish',
'Insects/Spiders',
'Crustaceans',
'Flowers']:
self.graph.add((base['Reptiles'],
OWL.disjoinWith,
base[d]))
for d in ['Mammals',
'Fish',
'Crustaceans',
'Insects/Spiders',
'Worms',
'Birds']:
self.graph.add((base['Moss'],
OWL.disjoinWith,
base[d]))
for d in ['Mammals',
'Fish',
'Crustaceans',
'Insects/Spiders',
'Worms',
'Birds']:
self.graph.add((base['Ferns'],
OWL.disjoinWith,
base[d]))
for d in ['Mammals',
'Fish',
'Vertebrates',
'Invertebrates',
'Crustaceans',
'Insects/Spiders',
'Worms',
'Birds']:
self.graph.add((base['Fungi'],
OWL.disjoinWith,
base[d]))
for d in ['Mammals',
'Fish',
'Vertebrates',
'Insects/Spiders',
'Worms',
'Birds']:
self.graph.add((base['Crustaceans'],
OWL.disjoinWith,
base[d]))
def _load_synonyms(self, path):
df = pd.read_csv(path, sep='|', dtype = str, na_values = nan_values)
df.dropna(inplace=True, subset=['species_number','latin_name'])
df = df.apply(lambda x: x.str.strip())
def func(row):
s, ln = row
s = self.namespace['taxon/'+s]
self.graph.add((s, self.namespace['synonym'], Literal(ln)))
self.apply_func(func, df, ['species_number','latin_name'])
def _load_hierarchy(self, path):
ks = ['species_number',
'genus',
'family',
'tax_order',
'class',
'superclass',
'subphylum_div',
'phylum_division',
'kingdom']
df = pd.read_csv(path, usecols=ks, sep= '|', dtype = str, na_values = nan_values)
df.dropna(inplace=True, subset=['species_number'])
df = df.apply(lambda x: x.str.replace('\W',''))
def func(row):
sn, *lineage = row
for k,l in zip(['species']+ks[1:],lineage):
rank = k
if not pd.isnull(l):
break
rank = self.namespace['rank/'+rank]
self.graph.add((rank,RDF.type,self.namespace['Rank']))
lineage = [self.namespace['taxon/'+str(l).strip()] for l in lineage if not | pd.isnull(l) | pandas.isnull |
"""
.. module:: croprows_geo.py
:synopsis: Geospatial operations for croprows
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import croprows_globals as crglobals
import croprows_utils as crutils
from croprows_fileutils import saveResultXMLFile
import imutils_boundingrect as imboundrect
import pandas as pd
import geopandas as gpd
import json
import geojson
import fiona
from shapely.geometry import shape
from shapely.wkt import dumps, loads
from shapely.geometry import Point
from shapely.geometry.polygon import LineString, LinearRing, Polygon
from shapely.ops import nearest_points
import sys
import os
from os import walk
import time
import numpy as np
from numpy import zeros
import cv2
import math
import re
import itertools
from joblib import Parallel, delayed
import multiprocessing
from multiprocessing import active_children, Pool, freeze_support
from scipy.sparse.csgraph import connected_components
fiona.drvsupport.supported_drivers['kml'] = 'rw' # enable KML support which is disabled by default
fiona.drvsupport.supported_drivers['KML'] = 'rw' # enable KML support which is disabled by default
#inspired in shapely-extending-line-feature
#see: https://stackoverflow.com/questions/33159833/shapely-extending-line-feature?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
def getExtrapoledLine(startPoint,endPoint):
"""
getExtrapoledLine. Creates a line extrapoled in startPoint->endPoint direction
:param startPoint: (Point) start point.
:param endPoint: (Point) end point.
:returns LineString: (LineString) line.
"""
a = (startPoint[0]-crglobals.EXTRAPOL_RATIO*(endPoint[0]-startPoint[0]), startPoint[1]-crglobals.EXTRAPOL_RATIO*(endPoint[1]-startPoint[1]))
b = (startPoint[0]+crglobals.EXTRAPOL_RATIO*(endPoint[0]-startPoint[0]), startPoint[1]+crglobals.EXTRAPOL_RATIO*(endPoint[1]-startPoint[1]))
return LineString([a,b])
def extendAllLinesInParallel(tilesDirName,epsgValue,iCols,jRows,maskVectorFile,seedValue):
"""
extendAllLinesInParallel.
:param tilesDirName: (String) tile directory name.
:param epsgValue: (String) code for refrerence system.
:param iCols: (int) current column.
:param jRows: (int) current row.
:param maskVectorFile: (String) mask file name.
:param seedValue: (int) seed for crop rows orientation.
:returns none: None.
"""
crutils.printLogMsg(crglobals.SEPARATOR_MSG)
crutils.printLogMsg(crglobals.WORKER_MSG+"extendAllLinesInParallel -> Processing Tile %s , %s " % (str(iCols), str(jRows) ) )
processName = multiprocessing.current_process().name
crutils.printLogMsg(crglobals.START_MSG+'Process name: %s ' % (processName))
crutils.printLogMsg(crglobals.START_MSG+"Parent processs: %s" % (str( os.getppid() )) )
crutils.printLogMsg(crglobals.START_MSG+"Process id: %s" % (str( os.getpid() )) )
crutils.printLogMsg(crglobals.CHECK_MSG+'tilesDirName: %s' % (tilesDirName))
crutils.printLogMsg(crglobals.OK_MSG+'EPSG: %s' % (epsgValue))
crutils.printLogMsg(crglobals.OK_MSG+'iCols: %s' % (iCols))
crutils.printLogMsg(crglobals.OK_MSG+'jRows: %s' % (jRows))
dirNameVectorResults = os.path.join(tilesDirName,crglobals.VECTORDIR)
dirNameVectorObjResults = os.path.join(dirNameVectorResults,crglobals.OBJDIR)
crutils.printLogMsg(crglobals.OK_MSG+'Vector dir: %s' % (dirNameVectorResults))
crutils.printLogMsg(crglobals.OK_MSG+'Vector dir Obj: %s' % (dirNameVectorObjResults))
crutils.printLogMsg(crglobals.OK_MSG+'Mask File: %s' % (maskVectorFile))
boundsVectorFile = crglobals.PICNAME+"-"+crglobals.COLPREFIX+str(iCols)+"-"+crglobals.ROWPREFIX+str(jRows)+crglobals.GEOJSON_EXT
linesVectorFile = crglobals.PICNAME+"-"+crglobals.COLPREFIX+str(iCols)+"-"+crglobals.ROWPREFIX+str(jRows)+"_"+crglobals.VECTORLINES+crglobals.GEOJSON_EXT
crutils.printLogMsg(crglobals.OK_MSG+"Bounds File : %s" % (boundsVectorFile))
crutils.printLogMsg(crglobals.OK_MSG+"Lines File: %s" % (linesVectorFile))
crutils.printLogMsg(crglobals.CHECK_MSG+'File %s is correct !' % (linesVectorFile) )
extendLinesGeom(iCols,jRows,epsgValue,dirNameVectorResults,dirNameVectorObjResults,boundsVectorFile,linesVectorFile,maskVectorFile,seedValue)
crutils.printLogMsg(crglobals.SEPARATOR_MSG)
return 1
def extendLinesGeom(col,row,epsgValue,dirNameVectorResults,dirNameVectorObjResults,boundsVectorFile,linesVectorFile,maskVectorFile,seedValue):
"""
extendLinesGeom.
:param col: (int) current column.
:param row: (int) current row.
:param dirNameVectorResults: (String) vector dir path.
:param boundsVectorFile: (String) bounds file.
:param linesVectorFile: (String) lines file.
:param maskVectorFile: (String) mask file.
:returns none: (None) nothing.
"""
linesExtendedFileName = crglobals.PICNAME+"-"+crglobals.COLPREFIX+str(col)+"-"+crglobals.ROWPREFIX+str(row)+"_"+crglobals.VECTORLINES+"_ext.shp"
#os.path.join(
if (os.path.exists( os.path.join(dirNameVectorResults, boundsVectorFile) )) and (os.path.exists( os.path.join(dirNameVectorObjResults,linesVectorFile) )) :
crutils.printLogMsg(crglobals.CHECK_MSG+'Exists: %s' % (os.path.join(dirNameVectorResults,boundsVectorFile)))
crutils.printLogMsg(crglobals.CHECK_MSG+'Exists: %s' % (os.path.join(dirNameVectorObjResults,linesVectorFile)))
boxGeoDataFrame = gpd.GeoDataFrame.from_file(os.path.join(dirNameVectorResults,boundsVectorFile))#dirNameVectorResults+"/"+boundsVectorFile)
try:
linesGeoDataFrame = gpd.GeoDataFrame.from_file(os.path.join(dirNameVectorObjResults,linesVectorFile))#dirNameVectorObjResults+"/"+linesVectorFile)
maskGeoDataFrame = gpd.GeoDataFrame.from_file(maskVectorFile)
extendProcessing(boxGeoDataFrame,linesGeoDataFrame,epsgValue,dirNameVectorObjResults,linesExtendedFileName,maskGeoDataFrame,col,row,seedValue)
except KeyError as exception:
crutils.printLogMsg('Bounds file: %s' % (boundsVectorFile))
crutils.printLogMsg('Lines file: %s' % (linesVectorFile))
crutils.printLogMsg(crglobals.FAIL_MSG+'Geometry not found')
else:
crutils.printLogMsg(crglobals.FAIL_MSG+'Lines or bounds file does not exist %s' % (os.path.join(dirNameVectorObjResults,linesVectorFile)))
return 1
def extendProcessing(boxGeoDataFrame,linesGeoDataFrame,epsgValue,dirNameVectorObjResults,linesExtendedFileName,vectorMask,col,row,seedValue):
"""
extendProcessing.
:param boxGeoDataFrame: (Box) box
:param linesGeoDataFrame: (int) lines
:param epsgValue: (int) spatial reference system
:param dirNameVectorObjResults: (String) vector objects folder
:param linesExtendedFileName: (String) lines extended file.
:param seedValue: (int) seed for crop rows orientation.
:returns none: None.
"""
crsEpsgId = {'init': 'epsg:'+str(epsgValue)}
longLinesArray = []
idLongLinesArray = []
cuttedLineArray = []
newidcutedline = []
newobjdistances = []
distanceLinear = []
index = []
flagCounter = 0
externalPoint = Point( (0,0) )
#Extrapolate lines
for x in range (0 , len(linesGeoDataFrame.geometry)):
linea_bx= (list(linesGeoDataFrame.geometry[x].coords))
extrapoledLine = getExtrapoledLine(*linea_bx[-2:])
idLongLinesArray.append(x)
longLinesArray.append(extrapoledLine)
dataFrameLongLines = pd.DataFrame({'id': idLongLinesArray})
longLinesGeoDataFrame=gpd.GeoDataFrame(dataFrameLongLines, crs=crsEpsgId, geometry=longLinesArray)
crutils.printLogMsg(crglobals.DONE_MSG+'Generated long lines !')
dataFrameLineCuttedByBox=(longLinesGeoDataFrame.intersection(boxGeoDataFrame.geometry.iloc[0]))
geoDataFrameLinesCuttedByBox=gpd.GeoDataFrame(crs=crsEpsgId, geometry=dataFrameLineCuttedByBox)
crutils.printLogMsg(crglobals.DONE_MSG+'Cut long lines by bounds !')
#############################################
### TEST #change 06-06-2018
#Get the convex hull lines
convexHullFromMask = vectorMask.convex_hull.iloc[0]
x, y = convexHullFromMask.exterior.xy
pointsConvexHullFromMaskArray = np.array(list(zip(x,y)))
minBBoxRect= imboundrect.minimum_bounding_rectangle(pointsConvexHullFromMaskArray)
polygonOMBB = Polygon([minBBoxRect[0], minBBoxRect[1], minBBoxRect[2] , minBBoxRect[3]])
#cut lines by ombb
#longLinesGeoDataFrame
dataFrameLineCuttedByMask=(geoDataFrameLinesCuttedByBox.intersection(polygonOMBB))
#############################################
#change 06-06-2018
#dataFrameLineCuttedByMask=(geoDataFrameLinesCuttedByBox.intersection(vectorMask.geometry.iloc[0]))
geoDataFrameLineCuttedByMask=gpd.GeoDataFrame(crs=crsEpsgId, geometry=dataFrameLineCuttedByMask)
crutils.printLogMsg(crglobals.DONE_MSG+'Line clipping by mask!')
#################################
#if cutlinedk[0].length > 0:
# angle=crutils.getAzimuth( (cutlinedk[0].coords[0][0]) , (cutlinedk[0].coords[0][1]) , (cutlinedk[0].coords[1][0]) , (cutlinedk[0].coords[1][1]) )
# anglep =(angle+270)
# xp = (np.min(box.geometry.bounds.minx)) + np.sin(np.deg2rad(anglep)) * 10
# yp = (np.max(box.geometry.bounds.maxy)) + np.cos(np.deg2rad(anglep)) * 10
# externalPoint = Point( ( xp,yp ) )
#################################
#print(str(anglep))
#print(cutlinedk[0].centroid.x)
#print(cutlinedk[0].centroid.y)
#print('--------------ANGULO -------------------')
#print(angle)
#print('--------------ANGULO PERPEN-------------------')
#xp = (cutlinedk[0].centroid.x) + np.sin(np.deg2rad(anglep)) * 20
#yp = (cutlinedk[0].centroid.y) + np.cos(np.deg2rad(anglep)) * 20
#print( 'POINT( %s %s )' % ( xp,yp))
#####
#TODO: Order id by spatial criteria
#####
#line1 = LineString([(np.min(box.geometry.bounds.minx), np.min(box.geometry.bounds.miny)),
# (np.max(box.geometry.bounds.maxx), np.min(box.geometry.bounds.miny))])
crutils.printLogMsg(crglobals.DONE_MSG+'Calculate distance by seed criteria : %s ' % (str(seedValue) ))
projectDistance = 100
if(seedValue==1):
pnt0Calc = (LineString([(np.min(boxGeoDataFrame.geometry.bounds.minx), np.max(boxGeoDataFrame.geometry.bounds.maxy)), (np.max(boxGeoDataFrame.geometry.bounds.maxx), np.max(boxGeoDataFrame.geometry.bounds.maxy))])).centroid
elif(seedValue==2):
pnt0Calc = (LineString([(np.min(boxGeoDataFrame.geometry.bounds.minx), np.max(boxGeoDataFrame.geometry.bounds.maxy)), (np.min(boxGeoDataFrame.geometry.bounds.minx), np.min(boxGeoDataFrame.geometry.bounds.miny))])).centroid
elif(seedValue==3):
if dataFrameLineCuttedByMask[0].length > 0:
angle=crutils.getAzimuth( (dataFrameLineCuttedByMask[0].coords[0][0]) , (dataFrameLineCuttedByMask[0].coords[0][1]) , (dataFrameLineCuttedByMask[0].coords[1][0]) , (dataFrameLineCuttedByMask[0].coords[1][1]) )
anglep =(angle+270)
xp = (np.min(boxGeoDataFrame.geometry.bounds.minx)) + np.sin(np.deg2rad(anglep)) * projectDistance
yp = (np.max(boxGeoDataFrame.geometry.bounds.maxy)) + np.cos(np.deg2rad(anglep)) * projectDistance
externalPoint = Point( ( xp,yp ) )
pnt0Calc = externalPoint
#Point((np.min(boxGeoDataFrame.geometry.bounds.minx), np.max(boxGeoDataFrame.geometry.bounds.maxy)))
elif(seedValue==4):
if dataFrameLineCuttedByMask[0].length > 0:
angle=crutils.getAzimuth( (dataFrameLineCuttedByMask[0].coords[0][0]) , (dataFrameLineCuttedByMask[0].coords[0][1]) , (dataFrameLineCuttedByMask[0].coords[1][0]) , (dataFrameLineCuttedByMask[0].coords[1][1]) )
anglep =(angle+270)
xp = (np.max(boxGeoDataFrame.geometry.bounds.maxx)) + np.sin(np.deg2rad(anglep)) * projectDistance
yp = (np.max(boxGeoDataFrame.geometry.bounds.maxy)) + np.cos(np.deg2rad(anglep)) * projectDistance
externalPoint = Point( ( xp,yp ) )
pnt0Calc = externalPoint
#pnt0Calc = Point((np.max(boxGeoDataFrame.geometry.bounds.maxx), np.max(boxGeoDataFrame.geometry.bounds.maxy)))
boxminxmaypoint = pnt0Calc
crutils.printLogMsg(crglobals.DONE_MSG+'%s chosen for distance calculation' % (boxminxmaypoint) )
for x in range (0 , len(geoDataFrameLineCuttedByMask.geometry)):
if geoDataFrameLineCuttedByMask.geometry.geom_type[x] == 'LineString':
if(len(list(geoDataFrameLineCuttedByMask.geometry[x].coords)))==2:
linea_bx= LineString(list(geoDataFrameLineCuttedByMask.geometry[x].coords))
if(linea_bx.length > crglobals.MINLINEDISTANCE ):
index.append(flagCounter)
flagCounter +=1
#newidcutedline.append(x)
cuttedLineArray.append(linea_bx)
distanceLinear.append(linea_bx.length)
#print('centroid')
#print(linea_bx.centroid)
distanceplin = boxminxmaypoint.distance(linea_bx.centroid)
newobjdistances.append(distanceplin)
sortedDistance = np.argsort(newobjdistances).astype('int')
idByGeo = [x for _,x in sorted(zip(sortedDistance,index))]
#Sort Distances
newObjDistancesSorted= np.sort(newobjdistances)
#Removing Adjacents and lines duplicates
newObjDistancesSorted = crutils.removeAdjacentsInArray(newObjDistancesSorted)
crutils.printLogMsg(crglobals.DONE_MSG+'Removed adjacents and duplicate lines !')
#print('distances: %s ' % (newobjdistances) )
#print('---------->distances kk: %s ' % (newObjDistancesSorted) )
##Removing Closing Lines
pairsdistances = zip([0]+newObjDistancesSorted, newObjDistancesSorted)
distancesFiltered = [pair[1] for pair in pairsdistances if abs(pair[0]-pair[1]) >= crglobals.MINCROPROWDISTANCE ]
#distancesFiltered = [pair[1] for pair in pairsdistances if abs(pair[0]-pair[1]) >= crglobals.MINCROPROWDISTANCE and abs(pair[0]-pair[1]) <= crglobals.MAXCROPROWDISTANCE ]
#remove
#add 3-9-2018
#pairsdistances2 = zip([0]+distancesFiltered, distancesFiltered)
#distancesFiltered = [pair2[1] for pair2 in pairsdistances2 if abs(pair2[0]-pair2[1]) <= crglobals.MAXCROPROWDISTANCE ]
#distancesFiltered.append(newObjDistancesSorted[len(newObjDistancesSorted)])
crutils.printLogMsg(crglobals.DONE_MSG+'Removed closing lines by proximity MIN : %s units ' % ( str(crglobals.MINCROPROWDISTANCE)) )
#crutils.printLogMsg(crglobals.DONE_MSG+'Removed closing lines by proximity MAX : %s units ' % ( str(crglobals.MAXCROPROWDISTANCE)) )
#print('new x: %s ' % (distancesFiltered) )
getIndexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
#look for
k=[]
flagCounter3=0
for x in distancesFiltered:
#print(distancesFiltered[i])
#print(getIndexes(distancesFiltered[i],newobjdistances))
k.append(getIndexes(distancesFiltered[flagCounter3],newobjdistances)[0])
flagCounter3=flagCounter3+1
#Reindex lines filtered
index2 = []
flagCounter2 = 0
m=[]
j=0
for x in k:
m.append(cuttedLineArray[k[j]])
index2.append(flagCounter2)
flagCounter2 +=1
j=j+1
sortdistance2 = np.argsort(distancesFiltered).astype('int')
idByGeo2 = [x for _,x in sorted(zip(sortdistance2,index2))]
crutils.printLogMsg(crglobals.DONE_MSG+'Re-indexing candidate lines !')
#Fix distances substracting projectDistance
arrayDistances = np.array(distancesFiltered)
fixdist = arrayDistances - projectDistance
crutils.printLogMsg(crglobals.DONE_MSG+'Distances fixed !')
dataFrameFixedLines = pd.DataFrame({ 'id': k , 'col': str(col) , 'row': str(row) , 'colrow': str(col)+'_'+str(row) })
geoDataFrameFixedLines=gpd.GeoDataFrame(dataFrameFixedLines, crs=crsEpsgId, geometry=m)
geoDataFrameFixedLines.dropna()
extfile = os.path.join(dirNameVectorObjResults,linesExtendedFileName)#dirNameVectorObjResults+'/'+linesExtendedFileName
if(len(geoDataFrameFixedLines.values)>0):
#ddkfhmm.to_file(driver = 'ESRI Shapefile', filename=str(extfile))
crutils.printLogMsg(crglobals.DONE_MSG+'Writing file line extended and clipped: %s ' % (extfile))
geoDataFrameFixedLines.to_file(driver = 'ESRI Shapefile', filename=str(extfile))
else:
crutils.printLogMsg(crglobals.FAIL_MSG+'Invalid geometry skip file writing: %s ' % (extfile))
return 1
#"[Done] Lines extended and clipped by mask"
def mergeAllLines(dirPathLines,epsgValue):
"""
mergeAllLines.
:param dirPathLines: (String) path.
:param epsgValue: (int) epsg code.
:returns none: None.
"""
crutils.printLogMsg(crglobals.SEPARATOR_MSG)
lineask = []
for file in os.listdir(dirPathLines):
if file.endswith(".shp"):
if file.startswith("mosaic"):
filesm = os.path.join(dirPathLines, file)
crutils.printLogMsg(crglobals.OK_MSG+'Reading file: %s ' % (filesm))
lineask.append( gpd.GeoDataFrame.from_file(filesm) )
mergedLinesArray=[]
a=0
for i in lineask:
mergedLinesArray.append(lineask[a])
a+=1
result = | pd.concat(mergedLinesArray,axis=0) | pandas.concat |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from textwrap import dedent
from parameterized import parameterized
import numpy as np
from numpy import nan
import pandas as pd
from zipline._protocol import handle_non_market_minutes, BarData
from zipline.assets import Asset, Equity
from zipline.errors import (
HistoryInInitialize,
HistoryWindowStartsBeforeData,
)
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.testing import (
create_minute_df_for_asset,
str_to_seconds,
MockDailyBarReader,
)
import zipline.testing.fixtures as zf
OHLC = ['open', 'high', 'low', 'close']
OHLCP = OHLC + ['price']
ALL_FIELDS = OHLCP + ['volume']
class WithHistory(zf.WithCreateBarData, zf.WithDataPortal):
TRADING_START_DT = TRADING_ENV_MIN_DATE = START_DATE = pd.Timestamp(
'2014-01-03',
tz='UTC',
)
TRADING_END_DT = END_DATE = pd.Timestamp('2016-01-29', tz='UTC')
SPLIT_ASSET_SID = 4
DIVIDEND_ASSET_SID = 5
MERGER_ASSET_SID = 6
HALF_DAY_TEST_ASSET_SID = 7
SHORT_ASSET_SID = 8
# asset1:
# - 2014-03-01 (rounds up to TRADING_START_DT) to 2016-01-29.
# - every minute/day.
# asset2:
# - 2015-01-05 to 2015-12-31
# - every minute/day.
# asset3:
# - 2015-01-05 to 2015-12-31
# - trades every 10 minutes
# SPLIT_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - splits on 2015-01-05 and 2015-01-06
# DIVIDEND_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - dividends on 2015-01-05 and 2015-01-06
# MERGER_ASSET
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - merger on 2015-01-05 and 2015-01-06
@classmethod
def init_class_fixtures(cls):
super().init_class_fixtures()
cls.trading_days = cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT
)
cls.ASSET1 = cls.asset_finder.retrieve_asset(1)
cls.ASSET2 = cls.asset_finder.retrieve_asset(2)
cls.ASSET3 = cls.asset_finder.retrieve_asset(3)
cls.SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.SPLIT_ASSET_SID,
)
cls.DIVIDEND_ASSET = cls.asset_finder.retrieve_asset(
cls.DIVIDEND_ASSET_SID,
)
cls.MERGER_ASSET = cls.asset_finder.retrieve_asset(
cls.MERGER_ASSET_SID,
)
cls.HALF_DAY_TEST_ASSET = cls.asset_finder.retrieve_asset(
cls.HALF_DAY_TEST_ASSET_SID,
)
cls.SHORT_ASSET = cls.asset_finder.retrieve_asset(
cls.SHORT_ASSET_SID,
)
@classmethod
def make_equity_info(cls):
jan_5_2015 = pd.Timestamp('2015-01-05', tz='UTC')
day_after_12312015 = pd.Timestamp('2016-01-04', tz='UTC')
return pd.DataFrame.from_dict(
{
1: {
'start_date': pd.Timestamp('2014-01-03', tz='UTC'),
'end_date': cls.TRADING_END_DT,
'symbol': 'ASSET1',
'exchange': "TEST",
},
2: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET2',
'exchange': "TEST",
},
3: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET3',
'exchange': "TEST",
},
cls.SPLIT_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'SPLIT_ASSET',
'exchange': "TEST",
},
cls.DIVIDEND_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'DIVIDEND_ASSET',
'exchange': "TEST",
},
cls.MERGER_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'MERGER_ASSET',
'exchange': "TEST",
},
cls.HALF_DAY_TEST_ASSET_SID: {
'start_date': pd.Timestamp('2014-07-02', tz='UTC'),
'end_date': day_after_12312015,
'symbol': 'HALF_DAY_TEST_ASSET',
'exchange': "TEST",
},
cls.SHORT_ASSET_SID: {
'start_date': pd.Timestamp('2015-01-05', tz='UTC'),
'end_date': pd.Timestamp('2015-01-06', tz='UTC'),
'symbol': 'SHORT_ASSET',
'exchange': "TEST",
}
},
orient='index',
)
@classmethod
def make_splits_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
])
@classmethod
def make_mergers_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.MERGER_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.MERGER_ASSET_SID,
}
])
@classmethod
def make_dividends_data(cls):
return pd.DataFrame([
{
# only care about ex date, the other dates don't matter here
'ex_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'amount': 2.0,
'sid': cls.DIVIDEND_ASSET_SID,
},
{
'ex_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'amount': 4.0,
'sid': cls.DIVIDEND_ASSET_SID,
}],
columns=[
'ex_date',
'record_date',
'declared_date',
'pay_date',
'amount',
'sid'],
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader(
dates=cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT,
),
)
def verify_regular_dt(self, idx, dt, mode, fields=None, assets=None):
if mode == 'daily':
freq = '1d'
else:
freq = '1m'
cal = self.trading_calendar
equity_cal = self.trading_calendars[Equity]
def reindex_to_primary_calendar(a, field):
"""
Reindex an array of prices from a window on the NYSE
calendar by the window on the primary calendar with the same
dt and window size.
"""
if mode == 'daily':
dts = cal.sessions_window(dt, -9)
# `dt` may not be a session on the equity calendar, so
# find the next valid session.
equity_sess = equity_cal.minute_to_session_label(dt)
equity_dts = equity_cal.sessions_window(equity_sess, -9)
elif mode == 'minute':
dts = cal.minutes_window(dt, -10)
equity_dts = equity_cal.minutes_window(dt, -10)
output = pd.Series(
index=equity_dts,
data=a,
).reindex(dts)
# Fill after reindexing, to ensure we don't forward fill
# with values that are being dropped.
if field == 'volume':
return output.fillna(0)
elif field == 'price':
return output.fillna(method='ffill')
else:
return output
fields = fields if fields is not None else ALL_FIELDS
assets = assets if assets is not None else [self.ASSET2, self.ASSET3]
bar_data = self.create_bardata(
simulation_dt_func=lambda: dt,
)
check_internal_consistency(
bar_data, assets, fields, 10, freq
)
for field in fields:
for asset in assets:
asset_series = bar_data.history(asset, field, 10, freq)
base = MINUTE_FIELD_INFO[field] + 2
if idx < 9:
missing_count = 9 - idx
present_count = 9 - missing_count
if field in OHLCP:
if asset == self.ASSET2:
# asset2 should have some leading nans
np.testing.assert_array_equal(
np.full(missing_count, np.nan),
asset_series[0:missing_count]
)
# asset2 should also have some real values
np.testing.assert_array_equal(
np.array(range(base,
base + present_count + 1)),
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 should be NaN the entire time
np.testing.assert_array_equal(
np.full(10, np.nan),
asset_series
)
elif field == 'volume':
if asset == self.ASSET2:
# asset2 should have some zeros (instead of nans)
np.testing.assert_array_equal(
np.zeros(missing_count),
asset_series[0:missing_count]
)
# and some real values
np.testing.assert_array_equal(
np.array(
range(base, base + present_count + 1)
) * 100,
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 is all zeros, no volume yet
np.testing.assert_array_equal(
np.zeros(10),
asset_series
)
else:
# asset3 should have data every 10 minutes
# construct an array full of nans, put something in the
# right slot, and test for comparison
position_from_end = ((idx + 1) % 10) + 1
# asset3's baseline data is 9 NaNs, then 11, then 9 NaNs,
# then 21, etc. for idx 9 to 19, value_for_asset3 should
# be a baseline of 11 (then adjusted for the individual
# field), thus the rounding down to the nearest 10.
value_for_asset3 = (((idx + 1) // 10) * 10) + \
MINUTE_FIELD_INFO[field] + 1
if field in OHLC:
asset3_answer_key = np.full(10, np.nan)
asset3_answer_key[-position_from_end] = \
value_for_asset3
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
),
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'volume':
asset3_answer_key = np.zeros(10)
asset3_answer_key[-position_from_end] = \
value_for_asset3 * 100
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
) * 100,
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'price':
# price is always forward filled
# asset2 has prices every minute, so it's easy
if asset == self.ASSET2:
# at idx 9, the data is 2 to 11
np.testing.assert_array_equal(
reindex_to_primary_calendar(
range(idx - 7, idx + 3),
field=field,
),
asset_series
)
if asset == self.ASSET3:
# Second part begins on the session after
# `position_from_end` on the NYSE calendar.
second_begin = (
dt - equity_cal.day * (position_from_end - 1)
)
# First part goes up until the start of the
# second part, because we forward-fill.
first_end = second_begin - cal.day
first_part = asset_series[:first_end]
second_part = asset_series[second_begin:]
decile_count = ((idx + 1) // 10)
# in our test data, asset3 prices will be nine
# NaNs, then ten 11s, ten 21s, ten 31s...
if len(second_part) >= 10:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
elif decile_count == 1:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
np.testing.assert_array_equal(
np.array([11] * len(second_part)),
second_part
)
else:
np.testing.assert_array_equal(
np.array([decile_count * 10 - 9] *
len(first_part)),
first_part
)
np.testing.assert_array_equal(
np.array([decile_count * 10 + 1] *
len(second_part)),
second_part
)
def check_internal_consistency(bar_data, assets, fields, bar_count, freq):
if isinstance(assets, Asset):
asset_list = [assets]
else:
asset_list = assets
if isinstance(fields, str):
field_list = [fields]
else:
field_list = fields
multi_field_dict = {
asset: bar_data.history(asset, field_list, bar_count, freq)
for asset in asset_list
}
multi_asset_dict = {
field: bar_data.history(asset_list, field, bar_count, freq)
for field in fields
}
panel = bar_data.history(asset_list, field_list, bar_count, freq)
for field in field_list:
# make sure all the different query forms are internally
# consistent
for asset in asset_list:
series = bar_data.history(asset, field, bar_count, freq)
np.testing.assert_array_equal(
series,
multi_asset_dict[field][asset]
)
np.testing.assert_array_equal(
series,
multi_field_dict[asset][field]
)
np.testing.assert_array_equal(
series,
panel[field][asset]
)
# each minute's OHLCV data has a consistent offset for each field.
# for example, the open is always 1 higher than the close, the high
# is always 2 higher than the close, etc.
MINUTE_FIELD_INFO = {
'open': 1,
'high': 2,
'low': -1,
'close': 0,
'price': 0,
'volume': 0, # unused, later we'll multiply by 100
}
class MinuteEquityHistoryTestCase(WithHistory,
zf.WithMakeAlgo,
zf.ZiplineTestCase):
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
DATA_PORTAL_FIRST_TRADING_DAY = zf.alias('TRADING_START_DT')
@classmethod
def make_equity_minute_bar_data(cls):
equities_cal = cls.trading_calendars[Equity]
data = {}
sids = {2, 5, cls.SHORT_ASSET_SID, cls.HALF_DAY_TEST_ASSET_SID}
for sid in sids:
asset = cls.asset_finder.retrieve_asset(sid)
data[sid] = create_minute_df_for_asset(
equities_cal,
asset.start_date,
asset.end_date,
start_val=2,
)
data[1] = create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2014-01-03', tz='utc'),
pd.Timestamp('2016-01-29', tz='utc'),
start_val=2,
)
asset2 = cls.asset_finder.retrieve_asset(2)
data[asset2.sid] = create_minute_df_for_asset(
equities_cal,
asset2.start_date,
equities_cal.previous_session_label(asset2.end_date),
start_val=2,
minute_blacklist=[
pd.Timestamp('2015-01-08 14:31', tz='UTC'),
pd.Timestamp('2015-01-08 21:00', tz='UTC'),
],
)
# Start values are crafted so that the thousands place are equal when
# adjustments are applied correctly.
# The splits and mergers are defined as 4:1 then 2:1 ratios, so the
# prices approximate that adjustment by quartering and then halving
# the thousands place.
data[cls.MERGER_ASSET_SID] = data[cls.SPLIT_ASSET_SID] = pd.concat((
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-05', tz='UTC'),
pd.Timestamp('2015-01-05', tz='UTC'),
start_val=8000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-06', tz='UTC'),
pd.Timestamp('2015-01-06', tz='UTC'),
start_val=2000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-07', tz='UTC'),
pd.Timestamp('2015-01-07', tz='UTC'),
start_val=1000),
create_minute_df_for_asset(
equities_cal,
| pd.Timestamp('2015-01-08', tz='UTC') | pandas.Timestamp |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import scipy.stats as stats
from matplotlib import gridspec
from matplotlib.lines import Line2D
from .util import *
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pylab as pl
import matplotlib.dates as mdates
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import matplotlib.patheffects as pe
from .sanker import Sanker
import imageio
class Visualizer():
def __init__(self, district_list, private_list, city_list, contract_list, bank_list, leiu_list):
self.district_list = district_list.copy()
self.private_list = private_list.copy()
for x in city_list:
self.private_list.append(x)
self.contract_list = contract_list
self.bank_list = bank_list
self.leiu_list = leiu_list
self.private_districts = {}
for x in self.private_list:
self.private_districts[x.name] = []
for xx in x.district_list:
self.private_districts[x.name].append(xx)
inflow_inputs = pd.read_csv('calfews_src/data/input/calfews_src-data.csv', index_col=0, parse_dates=True)
x2_results = pd.read_csv('calfews_src/data/input/x2DAYFLOW.csv', index_col=0, parse_dates=True)
self.observations = inflow_inputs.join(x2_results)
self.observations['delta_outflow'] = self.observations['delta_inflow'] + self.observations['delta_depletions'] - self.observations['HRO_pump'] - self.observations['TRP_pump']
self.index_o = self.observations.index
self.T_o = len(self.observations)
self.day_month_o = self.index_o.day
self.month_o = self.index_o.month
self.year_o = self.index_o.year
kern_bank_observations = pd.read_csv('calfews_src/data/input/kern_water_bank_historical.csv')
kern_bank_observations = kern_bank_observations.set_index('Year')
semitropic_bank_observations = pd.read_csv('calfews_src/data/input/semitropic_bank_historical.csv')
semitropic_bank_observations = semitropic_bank_observations.set_index('Year')
total_bank_kwb = np.zeros(self.T_o)
total_bank_smi = np.zeros(self.T_o)
for x in range(0, self.T_o):
if self.month_o[x] > 9:
year_str = self.year_o[x]
else:
year_str = self.year_o[x] - 1
if self.month_o[x] == 9 and self.day_month_o[x] == 30:
year_str = self.year_o[x]
total_bank_kwb[x] = kern_bank_observations.loc[year_str, 'Ag'] + kern_bank_observations.loc[year_str, 'Mixed Purpose']
deposit_history = semitropic_bank_observations[semitropic_bank_observations.index <= year_str]
total_bank_smi[x] = deposit_history['Metropolitan'].sum() + deposit_history['South Bay'].sum()
self.observations['kwb_accounts'] = pd.Series(total_bank_kwb, index=self.observations.index)
self.observations['smi_accounts'] = pd.Series(total_bank_smi, index=self.observations.index)
def get_results_sensitivity_number(self, results_file, sensitivity_number, start_month, start_year, start_day):
self.values = {}
numdays_index = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
with h5py.File(results_file, 'r') as f:
data = f['s' + sensitivity_number]
names = data.attrs['columns']
names = list(map(lambda x: str(x).split("'")[1], names))
df_data = pd.DataFrame(data[:], columns=names)
for x in df_data:
self.values[x] = df_data[x]
datetime_index = []
monthcount = start_month
yearcount = start_year
daycount = start_day
leapcount = np.remainder(start_year, 4)
for t in range(0, len(self.values[x])):
datetime_index.append(str(yearcount) + '-' + str(monthcount) + '-' + str(daycount))
daycount += 1
if leapcount == 0 and monthcount == 2:
numdays_month = numdays_index[monthcount - 1] + 1
else:
numdays_month = numdays_index[monthcount - 1]
if daycount > numdays_month:
daycount = 1
monthcount += 1
if monthcount == 13:
monthcount = 1
yearcount += 1
leapcount += 1
if leapcount == 4:
leapcount = 0
self.values['Datetime'] = pd.to_datetime(datetime_index)
self.values = pd.DataFrame(self.values)
self.values = self.values.set_index('Datetime')
self.index = self.values.index
self.T = len(self.values.index)
self.day_year = self.index.dayofyear
self.day_month = self.index.day
self.month = self.index.month
self.year = self.index.year
self.starting_year = self.index.year[0]
self.ending_year = self.index.year[-1]
self.number_years = self.ending_year - self.starting_year
total_kwb_sim = np.zeros(len(self.values))
total_smi_sim = np.zeros(len(self.values))
for district_partner in ['DLR', 'KCWA', 'ID4', 'SMI', 'TJC', 'WON', 'WRM']:
total_kwb_sim += self.values['kwb_' + district_partner]
self.values['kwb_total'] = pd.Series(total_kwb_sim, index = self.values.index)
for district_partner in ['SOB', 'MET']:
total_smi_sim += self.values['semitropic_' + district_partner]
self.values['smi_total'] = pd.Series(total_smi_sim, index = self.values.index)
def set_figure_params(self):
self.figure_params = {}
self.figure_params['delta_pumping'] = {}
self.figure_params['delta_pumping']['extended_simulation'] = {}
self.figure_params['delta_pumping']['extended_simulation']['outflow_list'] = ['delta_outflow', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['pump1_list'] = ['delta_HRO_pump', 'HRO_pump']
self.figure_params['delta_pumping']['extended_simulation']['pump2_list'] = ['delta_TRP_pump', 'TRP_pump']
self.figure_params['delta_pumping']['extended_simulation']['scenario_labels'] = ['Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['simulation_labels'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['observation_labels'] = ['HRO_pump', 'TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['agg_list'] = ['AS-OCT', 'AS-OCT', 'D']
self.figure_params['delta_pumping']['extended_simulation']['unit_mult'] = [1.0, 1.0, cfs_tafd]
self.figure_params['delta_pumping']['extended_simulation']['max_value_list'] = [5000, 5000, 15]
self.figure_params['delta_pumping']['extended_simulation']['use_log_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['use_cdf_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['scenario_type_list'] = ['observation', 'validation', 'scenario']
self.figure_params['delta_pumping']['extended_simulation']['x_label_list'] = ['Total Pumping, SWP Delta Pumps (tAF/year)', 'Total Pumping, CVP Delta Pumps (tAF/year)', 'Daily Exceedence Probability', '']
self.figure_params['delta_pumping']['extended_simulation']['y_label_list'] = ['Probability Density', 'Probability Density', 'Daily Delta Outflow (tAF)', 'Relative Frequency of Water-year Types within Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names1'] = ['Historical (1996-2016) Observations', 'Historical (1996-2016) Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names2'] = ['Critical', 'Dry', 'Below Normal', 'Above Normal', 'Wet']
self.figure_params['state_estimation'] = {}
for x in ['publication', 'sacramento', 'sanjoaquin', 'tulare']:
self.figure_params['state_estimation'][x] = {}
self.figure_params['state_estimation'][x]['non_log'] = ['Snowpack (SWE)',]
self.figure_params['state_estimation'][x]['predictor values'] = ['Mean Inflow, Prior 30 Days (tAF/day)','Snowpack (SWE)']
self.figure_params['state_estimation'][x]['colorbar_label_index'] = [0, 30, 60, 90, 120, 150, 180]
self.figure_params['state_estimation'][x]['colorbar_label_list'] = ['Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr']
self.figure_params['state_estimation'][x]['subplot_annotations'] = ['A', 'B', 'C', 'D']
self.figure_params['state_estimation'][x]['forecast_periods'] = [30,'SNOWMELT']
self.figure_params['state_estimation'][x]['all_cols'] = ['DOWY', 'Snowpack', '30MA']
self.figure_params['state_estimation'][x]['forecast_values'] = []
for forecast_days in self.figure_params['state_estimation'][x]['forecast_periods']:
if forecast_days == 'SNOWMELT':
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Snowmelt Season (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append('Snowmelt Flow')
else:
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Next ' + str(forecast_days) + ' Days (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append(str(forecast_days) + ' Day Flow')
self.figure_params['state_estimation']['publication']['watershed_keys'] = ['SHA', 'ORO', 'MIL', 'ISB']
self.figure_params['state_estimation']['publication']['watershed_labels'] = ['Shasta', 'Oroville', 'Millerton', 'Isabella']
self.figure_params['state_estimation']['sacramento']['watershed_keys'] = ['SHA', 'ORO', 'FOL', 'YRS']
self.figure_params['state_estimation']['sacramento']['watershed_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar']
self.figure_params['state_estimation']['sanjoaquin']['watershed_keys'] = ['NML', 'DNP', 'EXC', 'MIL']
self.figure_params['state_estimation']['sanjoaquin']['watershed_labels'] = ['New Melones', '<NAME>', 'Exchequer', 'Millerton']
self.figure_params['state_estimation']['tulare']['watershed_keys'] = ['PFT', 'KWH', 'SUC', 'ISB']
self.figure_params['state_estimation']['tulare']['watershed_labels'] = ['Pine Flat', 'Kaweah', 'Success', 'Isabella']
self.figure_params['model_validation'] = {}
for x in ['delta', 'sierra', 'sanluis', 'bank']:
self.figure_params['model_validation'][x] = {}
self.figure_params['model_validation']['delta']['title_labels'] = ['State Water Project Pumping', 'Central Valley Project Pumping', 'Delta X2 Location']
num_subplots = len(self.figure_params['model_validation']['delta']['title_labels'])
self.figure_params['model_validation']['delta']['label_name_1'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_x2']
self.figure_params['model_validation']['delta']['label_name_2'] = ['HRO_pump', 'TRP_pump', 'DAY_X2']
self.figure_params['model_validation']['delta']['unit_converstion_1'] = [1.0, 1.0, 1.0]
self.figure_params['model_validation']['delta']['unit_converstion_2'] = [cfs_tafd, cfs_tafd, 1.0]
self.figure_params['model_validation']['delta']['y_label_timeseries'] = ['Pumping (tAF/week)', 'Pumping (tAF/week)', 'X2 inland distance (km)']
self.figure_params['model_validation']['delta']['y_label_scatter'] = ['(tAF/yr)', '(tAF/yr)', '(km)']
self.figure_params['model_validation']['delta']['timeseries_timestep'] = ['W', 'W', 'W']
self.figure_params['model_validation']['delta']['scatter_timestep'] = ['AS-OCT', 'AS-OCT', 'M']
self.figure_params['model_validation']['delta']['aggregation_methods'] = ['sum', 'sum', 'mean']
self.figure_params['model_validation']['delta']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['delta']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['sierra']['title_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar', 'New Melones', '<NAME>', 'Exchequer', 'Millerton', 'Pine Flat', 'Kaweah', 'Success', 'Isabella']
num_subplots = len(self.figure_params['model_validation']['sierra']['title_labels'])
self.figure_params['model_validation']['sierra']['label_name_1'] = ['shasta_S', 'oroville_S', 'folsom_S', 'yuba_S', 'newmelones_S', 'donpedro_S', 'exchequer_S', 'millerton_S', 'pineflat_S', 'kaweah_S', 'success_S', 'isabella_S']
self.figure_params['model_validation']['sierra']['label_name_2'] = ['SHA_storage', 'ORO_storage', 'FOL_storage', 'YRS_storage', 'NML_storage', 'DNP_storage', 'EXC_storage', 'MIL_storage', 'PFT_storage', 'KWH_storage', 'SUC_storage', 'ISB_storage']
self.figure_params['model_validation']['sierra']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sierra']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_scatter'] = []
self.figure_params['model_validation']['sierra']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sierra']['scatter_timestep'] = []
self.figure_params['model_validation']['sierra']['aggregation_methods'] = ['mean'] * num_subplots
self.figure_params['model_validation']['sierra']['notation_location'] = ['bottom'] * num_subplots
self.figure_params['model_validation']['sierra']['show_legend'] = [False] * num_subplots
counter_kaweah = self.figure_params['model_validation']['sierra']['title_labels'].index('Kaweah')
counter_success = self.figure_params['model_validation']['sierra']['title_labels'].index('Success')
counter_isabella = self.figure_params['model_validation']['sierra']['title_labels'].index('Isabella')
self.figure_params['model_validation']['sierra']['notation_location'][counter_kaweah] = 'top'
self.figure_params['model_validation']['sierra']['notation_location'][counter_success] = 'topright'
self.figure_params['model_validation']['sierra']['show_legend'][counter_isabella] = True
self.figure_params['model_validation']['sanluis']['title_labels'] = ['State (SWP) Portion, San Luis Reservoir', 'Federal (CVP) Portion, San Luis Reservoir']
num_subplots = len(self.figure_params['model_validation']['sanluis']['title_labels'])
self.figure_params['model_validation']['sanluis']['label_name_1'] = ['sanluisstate_S', 'sanluisfederal_S']
self.figure_params['model_validation']['sanluis']['label_name_2'] = ['SLS_storage', 'SLF_storage']
self.figure_params['model_validation']['sanluis']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sanluis']['scatter_timestep'] = ['M'] * num_subplots
self.figure_params['model_validation']['sanluis']['aggregation_methods'] = ['point'] * num_subplots
self.figure_params['model_validation']['sanluis']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['sanluis']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['bank']['title_labels'] = ['Kern Water Bank Accounts', 'Semitropic Water Bank Accounts']
num_subplots = len(self.figure_params['model_validation']['bank']['title_labels'])
self.figure_params['model_validation']['bank']['label_name_1'] = ['kwb_total', 'smi_total']
self.figure_params['model_validation']['bank']['label_name_2'] = ['kwb_accounts', 'smi_accounts']
self.figure_params['model_validation']['bank']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['bank']['unit_converstion_2'] = [1.0/1000000.0, 1.0/1000.0]
self.figure_params['model_validation']['bank']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['bank']['scatter_timestep'] = ['AS-OCT'] * num_subplots
self.figure_params['model_validation']['bank']['aggregation_methods'] = ['change'] * num_subplots
self.figure_params['model_validation']['bank']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'] = [False] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'][0] = True
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_losthills'] = {}
self.figure_params['state_response']['sanluisstate_losthills']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_losthills']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_losthills']['groundwater_account_names'] = ['LHL','WON']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'steelblue']
self.figure_params['state_response']['sanluisstate_losthills']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_losthills']['subplot_titles'] = ['State Water Project Delta Operations', 'Lost Hills Drought Management', 'San Luis Reservoir Operations', 'Lost Hills Flood Management']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharged from Contract Allocation' 'Recharge of Uncontrolled Flood Spills']
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_wheeler'] = {}
self.figure_params['state_response']['sanluisstate_wheeler']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_wheeler']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_wheeler']['groundwater_account_names'] = ['WRM']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'lightsteelblue']
self.figure_params['state_response']['sanluisstate_wheeler']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_wheeler']['subplot_titles'] = ['State Water Project Delta Operations', 'Wheeler Ridge Drought Management', 'San Luis Reservoir Operations', 'Wheeler Ridge Flood Management']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharge of Uncontrolled Flood Spills', 'Recharged from Contract Allocation']
self.figure_params['district_water_use'] = {}
self.figure_params['district_water_use']['physical'] = {}
self.figure_params['district_water_use']['physical']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors', 'Groundwater Banks']
self.figure_params['district_water_use']['physical']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['physical']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler', 'northkern', 'kerntulare']
self.figure_params['district_water_use']['physical']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['physical']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['physical']['Groundwater Banks'] = ['stockdale', 'kernriverbed', 'poso', 'pioneer', 'kwb', 'b2800', 'irvineranch', 'northkernwb']
self.figure_params['district_water_use']['physical']['subplot columns'] = 2
self.figure_params['district_water_use']['physical']['color map'] = 'YlGbBu_r'
self.figure_params['district_water_use']['physical']['write file'] = True
self.figure_params['district_water_use']['annual'] = {}
self.figure_params['district_water_use']['annual']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors']
self.figure_params['district_water_use']['annual']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['annual']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler']
self.figure_params['district_water_use']['annual']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['annual']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['annual']['subplot columns'] = 2
self.figure_params['district_water_use']['annual']['color map'] = 'BrBG_r'
self.figure_params['district_water_use']['annual']['write file'] = True
self.figure_params['flow_diagram'] = {}
self.figure_params['flow_diagram']['tulare'] = {}
self.figure_params['flow_diagram']['tulare']['column1'] = ['Shasta', 'Folsom', 'Oroville', 'New Bullards', 'Uncontrolled']
self.figure_params['flow_diagram']['tulare']['row1'] = ['Delta Outflow', 'Carryover',]
self.figure_params['flow_diagram']['tulare']['column2'] = ['San Luis (Fed)', 'San Luis (State)', 'Millerton', 'Isabella', 'Pine Flat', 'Kaweah', 'Success']
self.figure_params['flow_diagram']['tulare']['row2'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column3'] = ['Exchange', 'CVP-Delta', 'Cross Valley', 'State Water Project', 'Friant Class 1','Friant Class 2', 'Kern River', 'Kings River', 'Kaweah River', 'Tule River', 'Flood']
self.figure_params['flow_diagram']['tulare']['row3'] = ['Private Pumping', 'GW Banks']
self.figure_params['flow_diagram']['tulare']['column4'] = ['Exchange', 'CVP-Delta', 'Urban', 'KCWA', 'CVP-Friant','Other']
self.figure_params['flow_diagram']['tulare']['row4'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column5'] = ['Irrigation', 'Urban', 'In-Lieu Recharge', 'Direct Recharge']
self.figure_params['flow_diagram']['tulare']['titles'] = ['Sacramento Basin\nSupplies', 'Tulare Basin\nSupplies', 'Surface Water\nContract Allocations', 'Contractor Groups', 'Water Use Type']
def scenario_compare(self, folder_name, figure_name, plot_name, validation_values, show_plot):
outflow_list = self.figure_params[figure_name][plot_name]['outflow_list']
pump1_list = self.figure_params[figure_name][plot_name]['pump1_list']
pump2_list = self.figure_params[figure_name][plot_name]['pump2_list']
scenario_labels = self.figure_params[figure_name][plot_name]['scenario_labels']
simulation_labels = self.figure_params[figure_name][plot_name]['simulation_labels']
observation_labels = self.figure_params[figure_name][plot_name]['observation_labels']
agg_list = self.figure_params[figure_name][plot_name]['agg_list']
unit_mult = self.figure_params[figure_name][plot_name]['unit_mult']
max_value_list = self.figure_params[figure_name][plot_name]['max_value_list']
use_log_list = self.figure_params[figure_name][plot_name]['use_log_list']
use_cdf_list = self.figure_params[figure_name][plot_name]['use_cdf_list']
scenario_type_list = self.figure_params[figure_name][plot_name]['scenario_type_list']
x_label_list = self.figure_params[figure_name][plot_name]['x_label_list']
y_label_list = self.figure_params[figure_name][plot_name]['y_label_list']
legend_label_names1 = self.figure_params[figure_name][plot_name]['legend_label_names1']
legend_label_names2 = self.figure_params[figure_name][plot_name]['legend_label_names2']
color1 = sns.color_palette('spring', n_colors = 3)
color2 = sns.color_palette('summer', n_colors = 3)
color_list = np.array([color1[0], color1[2], color2[0]])
max_y_val = np.zeros(len(simulation_labels))
fig = plt.figure(figsize = (20, 16))
gs = gridspec.GridSpec(3,2, width_ratios=[3,1], figure = fig)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
ax4 = plt.subplot(gs[:, 1])
axes_list = [ax1, ax2, ax3]
counter = 0
for sim_label, obs_label, agg, max_value, use_log, use_cdf, ax_loop in zip(simulation_labels, observation_labels, agg_list, max_value_list, use_log_list, use_cdf_list, axes_list):
data_type_dict = {}
data_type_dict['scenario'] = self.values[sim_label].resample(agg).sum() * unit_mult[0]
data_type_dict['validation'] = validation_values[sim_label].resample(agg).sum() * unit_mult[1]
data_type_dict['observation'] = self.observations[obs_label].resample(agg).sum() * unit_mult[2]
if use_log:
for scen_type in scenario_type_list:
values_int = data_type_dict[scen_type]
data_type_dict[scen_type] = np.log(values_int[values_int > 0])
for scen_type in scenario_type_list:
max_y_val[counter] = max([max(data_type_dict[scen_type]), max_y_val[counter]])
counter += 1
if use_cdf:
for scen_type, color_loop in zip(scenario_type_list, color_list):
cdf_values = np.zeros(100)
values_int = data_type_dict[scen_type]
for x in range(0, 100):
x_val = int(np.ceil(max_value)) * (x/100)
cdf_values[x] = len(values_int[values_int > x_val])/len(values_int)
ax_loop.plot(cdf_values, np.arange(0, int(np.ceil(max_value)), int(np.ceil(max_value))/100), linewidth = 3, color = color_loop)
else:
pos = np.linspace(0, max_value, 101)
for scen_type, color_loop in zip(scenario_type_list, color_list):
kde_est = stats.gaussian_kde(data_type_dict[scen_type])
ax_loop.fill_between(pos, kde_est(pos), edgecolor = 'black', alpha = 0.6, facecolor = color_loop)
sri_dict = {}
sri_dict['validation'] = validation_values['delta_forecastSRI']
sri_dict['scenario'] = self.values['delta_forecastSRI']
sri_cutoffs = {}
sri_cutoffs['W'] = [9.2, 100]
sri_cutoffs['AN'] = [7.8, 9.2]
sri_cutoffs['BN'] = [6.6, 7.8]
sri_cutoffs['D'] = [5.4, 6.6]
sri_cutoffs['C'] = [0.0, 5.4]
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
scenario_type_list = ['validation', 'scenario']
colors = sns.color_palette('RdBu_r', n_colors = 5)
percent_years = {}
for wyt in wyt_list:
percent_years[wyt] = np.zeros(len(scenario_type_list))
for scen_cnt, scen_type in enumerate(scenario_type_list):
ann_sri = []
for x_cnt, x in enumerate(sri_dict[scen_type]):
if sri_dict[scen_type].index.month[x_cnt] == 9 and sri_dict[scen_type].index.day[x_cnt] == 30:
ann_sri.append(x)
ann_sri = np.array(ann_sri)
for x_cnt, wyt in enumerate(wyt_list):
mask_value = (ann_sri >= sri_cutoffs[wyt][0]) & (ann_sri < sri_cutoffs[wyt][1])
percent_years[wyt][scen_cnt] = len(ann_sri[mask_value])/len(ann_sri)
colors = sns.color_palette('RdBu_r', n_colors = 5)
last_type = np.zeros(len(scenario_type_list))
for cnt, x in enumerate(wyt_list):
ax4.bar(['Validated Period\n(1997-2016)', 'Extended Simulation\n(1906-2016)'], percent_years[x], alpha = 1.0, label = wyt, facecolor = colors[cnt], edgecolor = 'black', bottom = last_type)
last_type += percent_years[x]
ax1.set_xlim([0.0, 500.0* np.ceil(max_y_val[0]/500.0)])
ax2.set_xlim([0.0, 500.0* np.ceil(max_y_val[1]/500.0)])
ax3.set_xlim([0.0, 1.0])
ax4.set_ylim([0, 1.15])
ax1.set_yticklabels('')
ax2.set_yticklabels('')
label_list = []
loc_list = []
for value_x in range(0, 120, 20):
label_list.append(str(value_x) + ' %')
loc_list.append(value_x/100.0)
ax4.set_yticklabels(label_list)
ax4.set_yticks(loc_list)
ax3.set_xticklabels(label_list)
ax3.set_xticks(loc_list)
ax3.set_yticklabels(['4', '8', '16', '32', '64', '125', '250', '500', '1000', '2000', '4000'])
ax3.set_yticks([np.log(4), np.log(8), np.log(16), np.log(32), np.log(64), np.log(125), np.log(250), np.log(500), np.log(1000), np.log(2000), np.log(4000)])
ax3.set_ylim([np.log(4), np.log(4000)])
for ax, x_lab, y_lab in zip([ax1, ax2, ax3, ax4], x_label_list, y_label_list):
ax.set_xlabel(x_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.set_ylabel(y_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.grid(False)
for tick in ax.get_xticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
for tick in ax.get_yticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
legend_elements = []
for x_cnt, x in enumerate(legend_label_names1):
legend_elements.append(Patch(facecolor = color_list[x_cnt], edgecolor = 'black', label = x))
ax1.legend(handles = legend_elements, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
legend_elements_2 = []
for x_cnt, x in enumerate(legend_label_names2):
legend_elements_2.append(Patch(facecolor = colors[x_cnt], edgecolor = 'black', label = x))
ax4.legend(handles = legend_elements_2, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
plt.savefig(folder_name + figure_name + '_' + plot_name + '.png', dpi = 150, bbox_inches = 'tight', pad_inches = 0.0)
if show_plot:
plt.show()
plt.close()
def make_deliveries_by_district(self, folder_name, figure_name, plot_name, scenario_name, show_plot):
if plot_name == 'annual':
name_bridge = {}
name_bridge['semitropic'] = 'KER01'
name_bridge['westkern'] = 'KER02'
name_bridge['wheeler'] = 'KER03'
name_bridge['kerndelta'] = 'KER04'
name_bridge['arvin'] = 'KER05'
name_bridge['belridge'] = 'KER06'
name_bridge['losthills'] = 'KER07'
name_bridge['northkern'] = 'KER08'
name_bridge['northkernwb'] = 'KER08'
name_bridge['ID4'] = 'KER09'
name_bridge['sosanjoaquin'] = 'KER10'
name_bridge['berrenda'] = 'KER11'
name_bridge['buenavista'] = 'KER12'
name_bridge['cawelo'] = 'KER13'
name_bridge['rosedale'] = 'KER14'
name_bridge['shaffer'] = 'KER15'
name_bridge['henrymiller'] = 'KER16'
name_bridge['kwb'] = 'KER17'
name_bridge['b2800'] = 'KER17'
name_bridge['pioneer'] = 'KER17'
name_bridge['irvineranch'] = 'KER17'
name_bridge['kernriverbed'] = 'KER17'
name_bridge['poso'] = 'KER17'
name_bridge['stockdale'] = 'KER17'
name_bridge['delano'] = 'KeT01'
name_bridge['kerntulare'] = 'KeT02'
name_bridge['lowertule'] = 'TUL01'
name_bridge['tulare'] = 'TUL02'
name_bridge['lindmore'] = 'TUL03'
name_bridge['saucelito'] = 'TUL04'
name_bridge['porterville'] = 'TUL05'
name_bridge['lindsay'] = 'TUL06'
name_bridge['exeter'] = 'TUL07'
name_bridge['terra'] = 'TUL08'
name_bridge['teapot'] = 'TUL09'
name_bridge['bakersfield'] = 'BAK'
name_bridge['fresno'] = 'FRE'
name_bridge['southbay'] = 'SOB'
name_bridge['socal'] = 'SOC'
name_bridge['tehachapi'] = 'TEH'
name_bridge['tejon'] = 'TEJ'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'PIX'
name_bridge['chowchilla'] = 'CHW'
name_bridge['maderairr'] = 'MAD'
name_bridge['fresnoid'] = 'FSI'
name_bridge['westlands'] = 'WTL'
name_bridge['panoche'] = 'PAN'
name_bridge['sanluiswater'] = 'SLW'
name_bridge['delpuerto'] = 'DEL'
elif plot_name == 'monthly':
name_bridge = {}
name_bridge['semitropic'] = 'Semitropic Water Storage District'
name_bridge['westkern'] = 'West Kern Water District'
name_bridge['wheeler'] = 'Wheeler Ridge-Maricopa Water Storage District'
name_bridge['kerndelta'] = 'Kern Delta Water District'
name_bridge['arvin'] = 'Arvin-Edison Water Storage District'
name_bridge['belridge'] = 'Belridge Water Storage District'
name_bridge['losthills'] = 'Lost Hills Water District'
name_bridge['northkern'] = 'North Kern Water Storage District'
name_bridge['northkernwb'] = 'North Kern Water Storage District'
name_bridge['ID4'] = 'Urban'
name_bridge['sosanjoaquin'] = 'Southern San Joaquin Municipal Utility District'
name_bridge['berrenda'] = 'Berrenda Mesa Water District'
name_bridge['buenavista'] = 'Buena Vista Water Storage District'
name_bridge['cawelo'] = 'Cawelo Water District'
name_bridge['rosedale'] = 'Rosedale-Rio Bravo Water Storage District'
name_bridge['shaffer'] = 'Shafter-Wasco Irrigation District'
name_bridge['henrymiller'] = 'Henry Miller Water District'
name_bridge['kwb'] = 'Kern Water Bank Authority'
name_bridge['b2800'] = 'Kern Water Bank Authority'
name_bridge['pioneer'] = 'Kern Water Bank Authority'
name_bridge['irvineranch'] = 'Kern Water Bank Authority'
name_bridge['kernriverbed'] = 'Kern Water Bank Authority'
name_bridge['poso'] = 'Kern Water Bank Authority'
name_bridge['stockdale'] = 'Kern Water Bank Authority'
name_bridge['delano'] = 'Delano-Earlimart Irrigation District'
name_bridge['kerntulare'] = 'Kern-Tulare Water District'
name_bridge['lowertule'] = 'Lower Tule River Irrigation District'
name_bridge['tulare'] = 'Tulare Irrigation District'
name_bridge['lindmore'] = 'Lindmore Irrigation District'
name_bridge['saucelito'] = 'Saucelito Irrigation District'
name_bridge['porterville'] = 'Porterville Irrigation District'
name_bridge['lindsay'] = 'Lindsay-Strathmore Irrigation District'
name_bridge['exeter'] = 'Exeter Irrigation District'
name_bridge['terra'] = 'Terra Bella Irrigation District'
name_bridge['teapot'] = 'Tea Pot Dome Water District'
name_bridge['bakersfield'] = 'Urban'
name_bridge['fresno'] = 'Urban'
name_bridge['southbay'] = 'Urban'
name_bridge['socal'] = 'Urban'
name_bridge['tehachapi'] = 'Tehachapi - Cummings County Water District'
name_bridge['tejon'] = 'Tejon-Castac Water District'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'Pixley Irrigation District'
name_bridge['chowchilla'] = 'Chowchilla Water District'
name_bridge['maderairr'] = 'Madera Irrigation District'
name_bridge['fresnoid'] = 'Fresno Irrigation District'
name_bridge['westlands'] = 'Westlands Water District'
name_bridge['panoche'] = 'Panoche Water District'
name_bridge['sanluiswater'] = 'San Luis Water District'
name_bridge['delpuerto'] = 'Del Puerto Water District'
name_bridge['alta'] = 'Alta Irrigation District'
name_bridge['consolidated'] = 'Consolidated Irrigation District'
location_type = plot_name
self.total_irrigation = {}
self.total_recharge = {}
self.total_pumping = {}
self.total_flood_purchases = {}
self.total_recovery_rebate = {}
self.total_recharge_sales = {}
self.total_recharge_purchases = {}
self.total_recovery_sales = {}
self.total_recovery_purchases = {}
for bank in self.bank_list:
self.total_irrigation[bank.name] = np.zeros(self.number_years*12)
self.total_recharge[bank.name] = np.zeros(self.number_years*12)
self.total_pumping[bank.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[bank.name] = np.zeros(self.number_years*12)
for district in self.district_list:
self.total_irrigation[district.name] = np.zeros(self.number_years*12)
self.total_recharge[district.name] = np.zeros(self.number_years*12)
self.total_pumping[district.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[district.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[district.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[district.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[district.name] = np.zeros(self.number_years*12)
date_list_labels = []
for year_num in range(self.starting_year, 2017):
start_month = 1
end_month = 13
if year_num == self.starting_year:
start_month = 10
if year_num == 2016:
end_month = 10
for month_num in range(start_month, end_month):
date_string_start = str(year_num) + '-' + str(month_num) + '-01'
date_list_labels.append(date_string_start)
for district in self.district_list:
inleiu_name = district.name + '_inleiu_irrigation'
inleiu_recharge_name = district.name + '_inleiu_recharge'
direct_recover_name = district.name + '_recover_banked'
indirect_surface_name = district.name + '_exchanged_SW'
indirect_ground_name = district.name + '_exchanged_GW'
inleiu_pumping_name = district.name + '_leiupumping'
pumping_name = district.name + '_pumping'
recharge_name = district.name + '_' + district.key + '_recharged'
numdays_month = [31, 28, 31, 30, 31, 30, 31, 31, 29, 31, 30, 31]
for year_num in range(0, self.number_years+1):
year_str = str(year_num + self.starting_year)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year - 1)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0]
else:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_pumping_name].values[0]
#if classifying by physical location, to district operating the bank
self.total_pumping[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_sales[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_rebate[district.name][year_num*12 + month_num - 10] += total_leiupumping
#Recharge, in- and out- of district
if recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_name].values[0]
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.bank_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge[bank_name.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.leiu_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
#Contract deliveries
for contract in self.contract_list:
delivery_name = district.name + '_' + contract.name + '_delivery'
recharge_contract_name = district.name + '_' + contract.name + '_recharged'
flood_irr_name = district.name + '_' + contract.name + '_flood_irrigation'
flood_name = district.name + '_' + contract.name + '_flood'
###All deliveries made from a district's contract
if delivery_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), delivery_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
##Deliveries made for recharge are subtracted from the overall contract deliveries
if recharge_contract_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_contract_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] -= total_recharge
#flood water used for irrigation - always attribute as irrigation
if flood_irr_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_irr_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
if flood_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_name].values[0]
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumping (daily values aggregated by year)
if pumping_name in self.values:
annual_pumping = 0.0
for x in range(0, len(self.index)):
monthly_index = (self.year[x] - self.starting_year)*12 + self.month[x] - 10
if self.day_month[x] == 1:
self.total_pumping[district.name][monthly_index] += annual_pumping
annual_pumping = 0.0
else:
annual_pumping += self.values.loc[self.index[x], pumping_name]
self.total_pumping[district.name][-1] += annual_pumping
#Get values for any private entities within the district
for private_name in self.private_list:
private = private_name.name
if district.key in self.private_districts[private]:
inleiu_name = private + '_' + district.key + '_inleiu_irrigation'
inleiu_recharge_name = private + '_' + district.key + '_inleiu_irrigation'
direct_recover_name = private + '_' + district.key + '_recover_banked'
indirect_surface_name = private + '_' + district.key + '_exchanged_SW'
indirect_ground_name = private + '_' + district.key + '_exchanged_GW'
inleiu_pumping_name = private + '_' + district.key + '_leiupumping'
pumping_name = private + '_' + district.key + '_pumping'
recharge_name = private + '_' + district.key + '_' + district.key + '_recharged'
for year_num in range(0, self.number_years - 1):
year_str = str(year_num + self.starting_year + 1)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years - 1:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year + 1)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0]
else:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_pumping_name].values[0]
#if classifying by phyiscal location, to district operating the bank
self.total_pumping[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_sales[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_rebate[district.name][year_num*12 + month_num - 10] += total_leiupumping
#Recharge, in- and out- of district
if recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_name].values[0]
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.bank_list:
bank_recharge_name = private + '_' + district.key + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge[bank_name.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.leiu_list:
bank_recharge_name = private + '_' + district.key + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
#Contract deliveries (minus deliveries made for recharge (accouted for above)
for contract in self.contract_list:
delivery_name = private + '_' + district.key + '_' + contract.name + '_delivery'
recharge_contract_name = private + '_' + district.key + '_' + contract.name + '_recharged'
flood_irr_name = private + '_' + district.key + '_' + contract.name + '_flood_irrigation'
flood_name = private + '_' + district.key + '_' + contract.name + '_flood'
###All deliveries made from a district's contract
if delivery_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), delivery_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
##Deliveries made for recharge are subtracted from the overall contract deliveries
if recharge_contract_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_contract_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] -= total_recharge
#flood water used for irrigation - always attribute as irrigation
if flood_irr_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_irr_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
if flood_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[ | pd.DatetimeIndex([date_string_current]) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = timedelta_range('9H', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['5H', '7H', '9H'])
result = rng - other
tm.assert_index_equal(result, expected)
result = other - rng
tm.assert_index_equal(result, -expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_addsub_integer_array_no_freq(self, box):
# GH#19959
tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])
other = box([14, -1, 16])
with pytest.raises(NullFrequencyError):
tdi + other
with pytest.raises(NullFrequencyError):
other + tdi
with pytest.raises(NullFrequencyError):
tdi - other
with pytest.raises(NullFrequencyError):
other - tdi
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
# Note: add and sub are tested in tests.test_arithmetic
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
pytest.raises(ValueError, lambda: tdi + dti[0:1])
pytest.raises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
pytest.raises(NullFrequencyError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
pytest.raises(TypeError, lambda: td * other)
pytest.raises(TypeError, lambda: other * td)
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
tm.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other - td, expected)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = | pd.to_timedelta('00:00:02') | pandas.to_timedelta |
import pathlib
import tempfile
import ftplib
import numpy
import pandas
from unittest import mock
import pytest
import numpy.testing as nptest
import pandas.testing as pdtest
from cloudside import asos
from cloudside.tests import get_test_file
@pytest.fixture
def fake_rain_data():
rain_raw = [
0.0,
1.0,
2.0,
3.0,
4.0,
4.0,
4.0,
4.0,
4.0,
4.0,
4.0,
4.0,
0.0,
0.0,
0.0,
0.0,
0.0,
5.0,
5.0,
5.0,
5.0,
5.0,
5.0,
5.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
2.0,
3.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
daterange = pandas.date_range(
start="2001-01-01 11:55", end="2001-01-01 15:50", freq=asos.FIVEMIN
)
return pandas.Series(rain_raw, index=daterange)
@pytest.fixture
def asos_metar():
teststring = (
"24229KPDX PDX20170108090014901/08/17 09:00:31 5-MIN KPDX 081700Z "
"10023G35KT 7SM -FZRA OVC065 00/M01 A2968 250 96 -1400 080/23G35 RMK "
"AO2 PK WND 10035/1654 P0005 I1000 T00001006"
)
return asos.MetarParser(teststring, strict=False)
def retr_error(cmd, action):
raise ftplib.error_perm
def test_MetarParser_datetime(asos_metar):
expected = pandas.Timestamp(year=2017, month=1, day=8, hour=9, minute=0, second=31)
assert asos_metar.datetime == expected
def test_MetarParser_asos_dict(asos_metar):
result = asos_metar.asos_dict()
# the "dict" rounds down the timestamp to the nearest 5 min
dateval = pandas.Timestamp(year=2017, month=1, day=8, hour=9, minute=0, second=0)
expected = asos.Obs(
datetime=dateval,
raw_precipitation=0.05,
temperature=0.0,
dew_point=-0.6,
wind_speed=23.0,
wind_direction=100,
air_pressure=250.0,
sky_cover=1.0,
)
assert result == expected
@pytest.mark.parametrize(
("exists", "force", "call_count"),
[(True, True, 1), (True, False, 0), (False, True, 1), (False, False, 1)],
)
@pytest.mark.parametrize("datestr", ["2016-01-01", "1999-01-01"])
@mock.patch("ftplib.FTP")
def test__fetch_file(ftp, exists, force, call_count, datestr):
ts = pandas.Timestamp("2016-01-01")
with tempfile.TemporaryDirectory() as rawdir:
std_path = pathlib.Path(rawdir).joinpath(f"64010KPDX{ts.year}01.dat")
if exists:
std_path.touch()
if ts.year == 1999 and call_count == 1:
expected_path = None
else:
expected_path = std_path
if expected_path is None:
ftp.retrlines.side_effect = retr_error
dst_path = asos._fetch_file("KPDX", ts, ftp, rawdir, force_download=force)
assert dst_path == expected_path
assert ftp.retrlines.call_count == call_count
@mock.patch.object(ftplib.FTP, "retrlines")
@mock.patch.object(ftplib.FTP, "login")
def test_fetch_files(ftp_login, ftp_retr):
with tempfile.TemporaryDirectory() as rawdir:
raw_paths = asos.fetch_files(
"KPDX", "1999-10-01", "2000-02-01", "<EMAIL>", rawdir
)
assert isinstance(raw_paths, filter)
assert all([(isinstance(rp, pathlib.Path) or (rp is None)) for rp in raw_paths])
assert ftp_login.called_once_with("<EMAIL>")
assert ftp_retr.call_count == 5
@pytest.mark.parametrize(("all_na", "expected"), [(False, 55), (True, 0)])
def test__find_reset_time(fake_rain_data, all_na, expected):
if all_na:
fake_rain_data.loc[:] = numpy.nan
result = asos._find_reset_time(fake_rain_data)
assert result == expected
def test_process_precip(fake_rain_data):
precip = fake_rain_data.to_frame("raw_precip")
result = asos._process_precip(precip, 55, "raw_precip")
expected = numpy.array(
[
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
5.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
nptest.assert_array_almost_equal(result, expected)
def test_parse_file():
datpath = pathlib.Path(get_test_file("sample_asos.dat"))
csvpath = pathlib.Path(get_test_file("sample_asos.csv"))
result = asos.parse_file(datpath)
expected = (
pandas.read_csv(csvpath, parse_dates=True, index_col=["datetime"])
.resample("5min")
.asfreq()
)
pdtest.assert_frame_equal(
result.fillna(-9999).sort_index(axis="columns"),
expected.fillna(-9999).sort_index(axis="columns"),
)
@mock.patch("ftplib.FTP")
@mock.patch("cloudside.validate.unique_index")
@mock.patch("cloudside.asos._fetch_file")
@mock.patch("cloudside.asos.parse_file", return_value= | pandas.Series([1, 2, 3]) | pandas.Series |
# -*- coding: utf-8 -*-
import pdb, importlib, inspect, time, datetime, json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from valuation_estimation import factor_valuation_estimation
from vision.db.signletion_engine import get_fin_consolidated_statements_pit, get_fundamentals, query
from vision.table.industry_daily import IndustryDaily
from vision.table.fin_cash_flow import FinCashFlow
from vision.table.fin_balance import FinBalance
from vision.table.fin_income import FinIncome
from vision.table.fin_indicator import FinIndicator
from vision.table.fin_indicator_ttm import FinIndicatorTTM
from vision.table.fin_income_ttm import FinIncomeTTM
from vision.table.fin_cash_flow_ttm import FinCashFlowTTM
from vision.db.signletion_engine import *
from vision.table.valuation import Valuation
from vision.table.industry import Industry
from vision.table.stk_daily_price import SkDailyPrice
from data.sqlengine import sqlEngine
from utilities.sync_util import SyncUtil
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url, methods=[
{'packet': 'valuation_estimation.factor_valuation_estimation', 'class': 'FactorValuationEstimation'}]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
engine = sqlEngine()
trade_date_pre = self.get_trade_date(trade_date, 1, days=30)
trade_date_1y = self.get_trade_date(trade_date, 1)
trade_date_3y = self.get_trade_date(trade_date, 3)
trade_date_4y = self.get_trade_date(trade_date, 4)
trade_date_5y = self.get_trade_date(trade_date, 5)
# report data
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
balance_report = engine.fetch_fundamentals_pit_extend_company_id(FinBalance,
[FinBalance.total_assets,
], dates=[trade_date])
if len(balance_report) <= 0 or balance_report is None:
balance_report = pd.DataFrame({'security_code': [], 'total_assets': []})
for column in columns:
if column in list(balance_report.keys()):
balance_report = balance_report.drop(column, axis=1)
balance_report = balance_report.rename(columns={
'total_assets': 'total_assets_report', # 资产总计
})
# valuation_report_sets = pd.merge(indicator_sets, balance_report, how='outer', on='security_code')
# MRQ data
cash_flow_mrq = engine.fetch_fundamentals_pit_extend_company_id(FinCashFlow,
[FinCashFlow.cash_and_equivalents_at_end,
], dates=[trade_date])
if len(cash_flow_mrq) <= 0 or cash_flow_mrq is None:
cash_flow_mrq = pd.DataFrame({'security_code': [], 'cash_and_equivalents_at_end': []})
for column in columns:
if column in list(cash_flow_mrq.keys()):
cash_flow_mrq = cash_flow_mrq.drop(column, axis=1)
cash_flow_mrq = cash_flow_mrq.rename(columns={
'cash_and_equivalents_at_end': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
balance_mrq = engine.fetch_fundamentals_pit_extend_company_id(FinBalance,
[FinBalance.longterm_loan, # 短期借款
FinBalance.total_assets, # 资产总计
FinBalance.shortterm_loan, # 短期借款
FinBalance.equities_parent_company_owners,
# 归属于母公司股东权益合计
], dates=[trade_date])
if len(balance_mrq) <= 0 or balance_mrq is None:
balance_mrq = pd.DataFrame(
{'security_code': [], 'longterm_loan': [], 'total_assets': [], 'shortterm_loan': [],
'equities_parent_company_owners': []})
for column in columns:
if column in list(balance_mrq.keys()):
balance_mrq = balance_mrq.drop(column, axis=1)
balance_mrq = balance_mrq.rename(columns={
'shortterm_loan': 'shortterm_loan', # 短期借款
'longterm_loan': 'longterm_loan', # 长期借款
'total_assets': 'total_assets', # 资产总计
'equities_parent_company_owners': 'equities_parent_company_owners', # 归属于母公司股东权益合计
})
valuation_mrq = pd.merge(cash_flow_mrq, balance_mrq, on='security_code')
indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(FinIndicator,
[FinIndicator.np_cut,
], dates=[trade_date])
for col in columns:
if col in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(col, axis=1)
# indicator_sets = indicator_sets.rename(columns={'EBIT': 'ebit_mrq'})
valuation_mrq = pd.merge(indicator_sets, valuation_mrq, how='outer', on='security_code')
income_sets = engine.fetch_fundamentals_pit_extend_company_id(FinIncome,
[FinIncome.income_tax, # 所得税
], dates=[trade_date])
for col in columns:
if col in list(income_sets.keys()):
income_sets = income_sets.drop(col, axis=1)
valuation_mrq = pd.merge(income_sets, valuation_mrq, how='outer', on='security_code')
cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(FinCashFlow,
[FinCashFlow.fixed_assets_depreciation,
# 固定资产折旧
FinCashFlow.intangible_assets_amortization,
# 无形资产摊销
FinCashFlow.fix_intan_other_asset_acqui_cash,
# 购建固定资产、无形资产和其他...
FinCashFlow.defferred_expense_amortization,
# 长期待摊费用摊销
FinCashFlow.borrowing_repayment, # 偿还债务支付的现金
FinCashFlow.cash_from_borrowing, # 取得借款收到的现金
FinCashFlow.cash_from_bonds_issue,
# 发行债券所收到的现金
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(col, axis=1)
valuation_mrq = pd.merge(cash_flow_sets, valuation_mrq, how='outer', on='security_code')
balance_sets = engine.fetch_fundamentals_pit_extend_company_id(FinBalance,
[FinBalance.shortterm_loan,
FinBalance.total_current_assets, # 流动资产合计
FinBalance.total_current_liability, # 流动负债合计
], dates=[trade_date])
for col in columns:
if col in list(balance_sets.keys()):
balance_sets = balance_sets.drop(col, axis=1)
valuation_mrq = pd.merge(balance_sets, valuation_mrq, how='outer', on='security_code')
balance_sets_pre = engine.fetch_fundamentals_pit_extend_company_id(FinBalance,
[FinBalance.total_current_assets, # 流动资产合计
FinBalance.total_current_liability,
# 流动负债合计
], dates=[trade_date_pre])
for col in columns:
if col in list(balance_sets_pre.keys()):
balance_sets_pre = balance_sets_pre.drop(col, axis=1)
balance_sets_pre = balance_sets_pre.rename(columns={
'total_current_assets': 'total_current_assets_pre',
'total_current_liability': 'total_current_liability_pre',
})
valuation_mrq = pd.merge(balance_sets_pre, valuation_mrq, how='outer', on='security_code')
# TTM data
# 总市值合并到TTM数据中,
cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(FinCashFlowTTM,
[FinCashFlowTTM.net_operate_cash_flow,
], dates=[trade_date])
if len(cash_flow_ttm_sets) <= 0 or cash_flow_ttm_sets is None:
cash_flow_ttm_sets = pd.DataFrame({'security_code': [], 'net_operate_cash_flow': []})
for column in columns:
if column in list(cash_flow_ttm_sets.keys()):
cash_flow_ttm_sets = cash_flow_ttm_sets.drop(column, axis=1)
cash_flow_ttm_sets = cash_flow_ttm_sets.rename(columns={
'net_operate_cash_flow': 'net_operate_cash_flow', # 经营活动现金流量净额
})
indicator_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(FinIndicatorTTM,
[FinIndicatorTTM.np_cut,
], dates=[trade_date_1y])
if len(indicator_ttm_sets) <= 0 or indicator_ttm_sets is None:
indicator_ttm_sets = pd.DataFrame({'security_code': [], 'np_cut': []})
for column in columns:
if column in list(indicator_ttm_sets.keys()):
indicator_ttm_sets = indicator_ttm_sets.drop(column, axis=1)
income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(FinIncomeTTM,
[FinIncomeTTM.net_profit,
FinIncomeTTM.np_parent_company_owners,
FinIncomeTTM.total_operating_revenue,
FinIncomeTTM.operating_revenue,
FinIncomeTTM.total_profit,
], dates=[trade_date])
if len(income_ttm_sets) <= 0 or income_ttm_sets is None:
income_ttm_sets = pd.DataFrame(
{'security_code': [], 'net_profit': [], 'np_parent_company_owners': [], 'total_operating_revenue': [],
'operating_revenue': [], 'total_profit': []})
for column in columns:
if column in list(income_ttm_sets.keys()):
income_ttm_sets = income_ttm_sets.drop(column, axis=1)
income_ttm_sets = income_ttm_sets.rename(columns={
'total_profit': 'total_profit', # 利润总额 ttm
'net_profit': 'net_profit', # 净利润
'np_parent_company_owners': 'np_parent_company_owners', # 归属于母公司所有者的净利润
'total_operating_revenue': 'total_operating_revenue', # 营业总收入
'operating_revenue': 'operating_revenue', # 营业收入
})
income_ttm_sets_3 = engine.fetch_fundamentals_pit_extend_company_id(FinIncomeTTM,
[FinIncomeTTM.np_parent_company_owners,
], dates=[trade_date_3y])
if len(income_ttm_sets_3) <= 0 or income_ttm_sets_3 is None:
income_ttm_sets_3 = pd.DataFrame({'security_code': [], 'np_parent_company_owners': []})
for column in columns:
if column in list(income_ttm_sets_3.keys()):
income_ttm_sets_3 = income_ttm_sets_3.drop(column, axis=1)
income_ttm_sets_3 = income_ttm_sets_3.rename(columns={
'np_parent_company_owners': 'np_parent_company_owners_3', # 归属于母公司所有者的净利润
})
income_ttm_sets_5 = engine.fetch_fundamentals_pit_extend_company_id(FinIncomeTTM,
[FinIncomeTTM.np_parent_company_owners,
], dates=[trade_date_5y])
if len(income_ttm_sets_5) <= 0 or income_ttm_sets_5 is None:
income_ttm_sets_5 = pd.DataFrame({'security_code': [], 'np_parent_company_owners': []})
for column in columns:
if column in list(income_ttm_sets_5.keys()):
income_ttm_sets_5 = income_ttm_sets_5.drop(column, axis=1)
income_ttm_sets_5 = income_ttm_sets_5.rename(columns={
'np_parent_company_owners': 'np_parent_company_owners_5', # 归属于母公司所有者的净利润
})
valuation_ttm_sets = pd.merge(cash_flow_ttm_sets, income_ttm_sets, how='outer', on='security_code')
valuation_ttm_sets = pd.merge(valuation_ttm_sets, indicator_ttm_sets, how='outer', on='security_code')
valuation_ttm_sets = | pd.merge(valuation_ttm_sets, income_ttm_sets_3, how='outer', on='security_code') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 05 09:54:42 2020
@author: Andres
"""
import os
import numpy as np
import pandas as pd
from Modules.IDF_Func import Wilches
from Modules.IDF_Func import Pulgarin
from Modules.IDF_Func import VargasDiazGranados
from Modules.IDF_Func import IDEAM
from Modules.Utils import Listador
from Modules import Read
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.colors as colors
################################ INPUT IDF #################################
Path_series = os.path.abspath(os.path.join(os.path.dirname(__file__), 'Datos/Series/'))
Path_IDF = os.path.abspath(os.path.join(os.path.dirname(__file__), 'IDF'))
def MaxAnual(Esta, Path_series, window=None):
"""
Read estation to extract the anual max value
"""
Dat = Read.EstacionCSV_np(Esta, Esta.split('.csv')[0],Path_series)
if window is not None:
Dat = Dat.rolling(f'{window}D').sum()
Dat = Dat.dropna()
Max = Dat.groupby(lambda y : y.year).max()
return Max[~np.isnan(Max.values)].values.ravel()/24
def GraphIDF(Int, duration, frecuency, cmap_name='jet', name='IDF', pdf=True, png=False, PathFigs=Path_IDF,):
"""
Graph of month diurnal cycles
INPUTS
Int : 2D array with the Intesity [mm/hour] with shape=(len(durations), len(frecuency))
duration : 1D array with durations [min]
frecuency : 1D array with reuturn periods [years]
cmap_name : color map name
name : stringo for save the figure
Path : abtolute Path to save files
"""
# define some random data that emulates your indeded code:
NCURVES = len(frecuency)
plt.close('all')
fig = plt.figure(figsize=(9.6,6))
ax = fig.add_subplot(111)
cNorm = colors.Normalize(vmin=0, vmax=NCURVES)
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=plt.get_cmap(cmap_name))
lines = []
for idx in range(NCURVES):
line = Int[:, idx]
colorVal = scalarMap.to_rgba(idx)
colorText = str(frecuency[idx])+' years'
retLine, = ax.plot(duration,line, linewidth=2,
color=colorVal,
label=colorText)
lines.append(retLine)
#added this to get the legend to work
handles,labels = ax.get_legend_handles_labels()
# # Shrink current axis by 20%
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width*1.0, box.height])
ax.legend(handles, labels, loc='center right', bbox_to_anchor=(1, 0.5),
fancybox=False, shadow=False)
ax.set_xlabel('Duration [minutes]', fontsize=16)
ax.set_ylabel('Intensity [mm/hour]', fontsize=16)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
if pdf == True:
plt.savefig(os.path.join(PathFigs, name+'.pdf'), format='pdf', transparent=True)
if png == True:
plt.savefig(os.path.join(PathFigs, name+'.png'), transparent=True)
elif png == True:
plt.savefig(os.path.join(PathFigs, name+'.png'), transparent=True)
else:
print("Graph not saved. To save it at least one of png or pdf parameters must be True.")
Estaciones = Listador(Path_series, final='.csv')
# Return periods
Tr = np.array([2.33, 5, 10, 25, 50, 100, 200, 500, 1000])
theta = -0.82
#
for i in range(len(Estaciones)):
Name = Estaciones[i].split('.csv')[0]
data = MaxAnual(Estaciones[i], Path_series)
dP, Idq = Pulgarin(data, Tr, theta)
GraphIDF(Idq, dP, Tr, cmap_name='jet', name=Name+'IDF', pdf=True, png=False, PathFigs=Path_IDF,)
IDF = pd.DataFrame(Idq, index=dP, columns=Tr)
IDF.to_csv(os.path.join(Path_IDF,Estaciones[i]))
# Compare Wilches with VargasDiazGranados
Tr = np.array([2,3,5,10,25,50,100])
# Nombre = '<NAME> [25021340]'
# Nombre = '<NAME> [25021470]'
Nombre = '<NAME> [29015020]'
# IDEAM params
C1 = [6798.308,9998.576,14882.323,23468.705,39184.485,55085.160,75025.218]
X0 = [27.895,32.735,37.828,43.764,50.544,55.091,59.234]
C2 = [1.112,1.159,1.207,1.263,1.325,1.366,1.403]
# data = MaxAnual(Nombre+'.csv', Path_series)
#
# from math import factorial as fact
# x = np.sort(data)
# n = len(x)
# ks = np.array([0, 1])
# Mk = np.zeros(len(ks))
#
# for j in range(len(ks)):
# k = ks[j]
# N = fact(n-1)/(fact(k)*fact(n-1-k))
# Mi = 0.
# for i in range(n-k):
# Ni = fact(n-(i+1))/(fact(k)*fact(n-(i+1)-k))
# # print (Ni)
# Mi = x[i]*Ni
# Mk[j] = Mk[j] + Mi
# Mk[j] = (1./n)*Mk[j]/N
#
# alphad = (Mk[0] - 2.*Mk[1])/np.log(2.)
# mud = Mk[0] - 0.5772*alphad
#
# y = -np.log(-np.log(1.-1./Tr))
#
# ################################ Duration ##############################
#
# dlim1 = 1440
# dlim2 = 60
# dlim3 = 5
# d1 = np.arange(dlim2, dlim1, 5)
# d2 = np.arange(dlim3, ddlim2, 2.5)
#
# # IDF curves
# Idq1 = np.zeros((len(d1), len(Tr)))
# Idq2 = np.zeros((len(d2), len(Tr)))
#
# for i in range(len(Tr)):
# Idq1[:,i] = (mud + alphad*y[i])*(d1/1440.)**theta
# Idq2[:,i] = Idq1[0,i]*((32.4/d2**0.75) - (30.00/d2))
#
# d = np.hstack((d2, d1))
# Idq = np.vstack((Idq2, Idq1))
#
# X1 = Idq1[0,:]
# X2 = mud + alphad*y
for w in np.arange(1,8):
data = MaxAnual(Nombre+'.csv', Path_series, window=w)
Name = f'{Nombre}_{w}days_'
dP, Idq = Pulgarin(data, Tr, theta)
dV, IdV = VargasDiazGranados(data, Tr, Region=2)
dI, Ida = IDEAM(Tr, X0,C1,C2)
DiP = Ida-Idq
DiV = Ida-IdV
GraphIDF(Idq, dP, Tr, cmap_name='jet', name=Name+'IDF_Pulgarin', pdf=True, png=False, PathFigs=Path_IDF,)
GraphIDF(IdV, dV, Tr, cmap_name='jet', name=Name+'IDF_Vargas', pdf=True, png=False, PathFigs=Path_IDF,)
GraphIDF(Ida, dI, Tr, cmap_name='jet', name=Name+'IDF_IDEAM', pdf=True, png=False, PathFigs=Path_IDF,)
GraphIDF(DiV, dV, Tr, cmap_name='jet', name=Name+'IDF_DifVargas', pdf=True, png=False, PathFigs=Path_IDF,)
GraphIDF(DiP, dV, Tr, cmap_name='jet', name=Name+'IDF_DifPulgarin',pdf=True, png=False, PathFigs=Path_IDF,)
IDF_P = pd.DataFrame(Idq, index=dP, columns=Tr)
IDF_V = pd.DataFrame(IdV, index=dV, columns=Tr)
IDF_A = | pd.DataFrame(Ida, index=dI, columns=Tr) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
#%%
N = 50
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = np.pi * (15 * np.random.rand(N)) ** 2 # 0 to 15 point radii
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.show()
#%%
X = np.linspace(-np.pi, np.pi, 256, endpoint=True)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C, color="blue", linewidth=2.5, linestyle="-")
plt.plot(X, S, color="red", linewidth=2.5, linestyle="-")
plt.xlim(X.min() * 1.1, X.max() * 1.1)
plt.xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi],
[r'$-\pi$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$+\pi$'])
plt.ylim(C.min() * 1.1, C.max() * 1.1)
plt.yticks([-1, 0, +1],
[r'$-1$', r'$0$', r'$+1$'])
plt.show()
#%%
import numpy as np
import pandas as pd
np.ndarray
Ellipsis
slice
import matplotlib.pyplot as plt
#% matplotlib inline
#%%
### Import data
# Always good to set a seed for reproducibility
SEED = 222
np.random.seed(SEED)
df = | pd.read_csv('data/input.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from scripts.utils import remove_outliers, linear_regression
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.preprocessing import normalize
#from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import torch
lan = "all"
if lan == "es":
language = "Spanish"
elif lan == "fr":
language = "French"
elif lan == "all":
language = "French & Spanish"
# Load time taken to translate and calculate sentence length
if lan == "all":
es = pd.read_csv("data/un-timed-sentences/en-es.processed", sep='\t')
fr = pd.read_csv("data/un-timed-sentences/en-fr.processed", sep='\t')
wpd = pd.concat([es,fr], axis=0, ignore_index=True).drop_duplicates()
else:
wpd = pd.read_csv("data/un-timed-sentences/en-"+lan+".processed", sep='\t').drop_duplicates()
words=[]
for i in wpd.index:
words.append(len(wpd['Segment'][i].split()))
wpd["words"] = words
# Filter empty sentences (with only serial number)
time = wpd.loc[~wpd['Segment'].str.contains("^\s*\S*[0-9]\S*\s*$"), ['Time-to-edit', 'words']].reset_index(drop=True)
time.columns= ["time (ms)", "words"]
""" TER - words per day"""
# Load TER scores
if lan == "all":
es = pd.read_csv("data/un-timed-sentences/en-es-gs.score", header=None, sep='\t')
fr = pd.read_csv("data/un-timed-sentences/en-fr-gs.score", header=None, sep='\t')
ter = pd.concat([es,fr], axis=0, ignore_index=True)
else:
ter = pd.read_csv("data/un-timed-sentences/en-"+lan+"-gs.score", header=None, sep='\t')
ter.columns = ["score"]
# Join important columns to single dataframe
df = pd.concat([ter, time], axis=1)
# Calculate translation rate (and normalise)
#df['perms'] = df['words'] / df['time (ms)'] # words per ms
df['spw'] = (df['time (ms)'])/1000 / df['words'] # seconds per word
#df['rate'] = (df['perms'] - df['perms'].min()) / (df['perms'].max() - df['perms'].min())
# Remove perfect translations
dft = df.loc[df['score'] != 0]
# Remove outliers
dfr = remove_outliers(df, 'spw', lq=0.05, uq=0.95)
# Correlation
print(dfr.corr().round(3)['score'])
# Quantiles
def quantiles(df):
""" Output distribution of each quantile in the data set. """
q1 = df.loc[df['perms'] <= df['perms'].quantile(0.25)]
q2 = df.loc[(df['perms'] >= df['perms'].quantile(0.25)) & (df['perms'] <= df['perms'].quantile(0.50))]
q3 = df.loc[(df['perms'] >= df['perms'].quantile(0.50)) & (df['perms'] <= df['perms'].quantile(0.75))]
q4 = df.loc[df['perms'] >= df['perms'].quantile(0.75)]
q_corr={}
q_df={1:q1, 2:q2, 3:q3, 4:q4}
for q in range(1,5):
q_corr[q] = q_df[q].corr()['score']
qcor_df = pd.DataFrame.from_dict(q_corr)
qcor_df.columns=['q1', 'q2', 'q3', 'q4']
print(qcor_df.round(3))
#dfr = dfr.loc[dfr['spw'] < 8] # filter out extreme cases
dfr = dfr.loc[dfr['score'] <= 1.0]
dfr = dfr.loc[dfr['words'] <= 90]
# scatter plots
plt.scatter(dfr['spw'], dfr['score'])
plt.xlabel("seconds per word")
plt.ylabel("TER")
#plt.xlim([min(df['spw'])-0.0001, max(df['spw'])+0.0001])
#plt.scatter(q3['perms'], q3['score'])
c, m = np.polynomial.polynomial.polyfit(dfr['spw'], dfr['score'], 1)
y_pred = m*dfr['spw'] + c
residuals = dfr['score'] - y_pred
median_error = abs(residuals).median()
MAE = mean_absolute_error(dfr['score'], y_pred) # mean absolute error
plt.plot(np.unique(dfr['spw']), np.poly1d(np.polyfit(dfr['spw'], dfr['score'], 1))(np.unique(dfr['spw'])), 'k--')
x1 = np.linspace(min(dfr['spw']), max(dfr['spw']))
y1 = m*x1 + c
plt.plot(x1, y1+MAE, 'r--') # confidence intervals (bestfit +/- MAE)
plt.plot(x1, y1-MAE, 'r--')
plt.show()
plt.figure()
plt.scatter(dfr['words'], dfr['score'])
#plt.plot(np.unique(dfr['words']), np.poly1d(np.polyfit(dfr['words'], dfr['score'], 1))(np.unique(dfr['words'])), 'r--')
plt.xlabel("Sentence length (words)")
plt.ylabel("TER")
plt.title("Timed Sentences - %s" % language)
plt.show()
plt.figure()
plt.scatter(dfr['words'], dfr['time (ms)'])
plt.plot(np.unique(dfr['words']), np.poly1d(np.polyfit(dfr['words'], dfr['time (ms)'], 1))(np.unique(dfr['words'])), 'k--')
plt.xlabel("Sentence length (words)")
plt.ylabel("Time taken to translate (ms)")
plt.title("Timed Sentences - %s" % language)
#plt.show()
# Line of best fit and distance from each point to the line
c, m = np.polynomial.polynomial.polyfit(dfr['words'], dfr['time (ms)'], 1)
y_pred = m*dfr['words'] + c
residuals = dfr['time (ms)'] - y_pred
median_error = abs(residuals).median()
MAE = mean_absolute_error(dfr['time (ms)'], y_pred) # mean absolute error
x1 = np.linspace(min(dfr['words']), max(dfr['words']))
y1 = m*x1 + c
plt.plot(x1, y1+MAE, 'r--') # confidence intervals (bestfit +/- MAE)
plt.plot(x1, y1-MAE, 'r--')
plt.show()
pos_res = residuals.loc[residuals > MAE] # points above the line
neg_res = residuals.loc[residuals < -MAE] # points below the line
# Load biber dimension and select useful dimensions
if lan == "all":
es = pd.read_csv("data/un-timed-sentences/en-es-biber.en", sep='\t')
fr = pd.read_csv("data/un-timed-sentences/en-fr-biber.en", sep='\t')
biber = pd.concat([es,fr], axis=0, ignore_index=True)
else:
biber = | pd.read_csv("data/un-timed-sentences/en-"+lan+"-biber.en", sep='\t') | pandas.read_csv |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = | Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"]) | pandas.Series |
# -*- coding: utf-8 -*-
from constants import EXECUTED_OFFENDERS_URL, ARCHIVE_HTML_FILENAME, DATAFRAME_FILENAME
from bs4 import BeautifulSoup
from datetime import datetime
import pandas as pd
import requests
import feather
def get_executed_offenders_html():
response = requests.get(EXECUTED_OFFENDERS_URL)
if response.status_code != 200:
# TODO: account for non-OK responses
pass
return response.content
def get_executed_offenders_from_file(html_file=ARCHIVE_HTML_FILENAME):
with open(html_file, 'r', encoding='latin-1') as fd:
raw_html_content = fd.read()
return raw_html_content
def get_executed_offenders_data():
raw_html_content = get_executed_offenders_from_file()
soup = BeautifulSoup(raw_html_content, 'html.parser')
table = soup.find('table', {'class': 'os'})
rows = table.findAll('tr')
# data
data_rows = rows[1:]
# populate the data
data = []
for dr in data_rows:
executed_offenders_data = dict()
raw_row_data = dr.findAll('td')
# execution number
executed_offenders_data['execution_no'] = int(raw_row_data[0].text)
# offender information link
executed_offenders_data['offender_information_link'] = \
raw_row_data[1].find('a', href=True).get('href')
# last statement link
executed_offenders_data['last_statement_link'] = \
raw_row_data[2].find('a', href=True).get('href')
# last name
executed_offenders_data['last_name'] = raw_row_data[3].text
# first name
executed_offenders_data['first_name'] = raw_row_data[4].text
# TDCJ number
executed_offenders_data['tdcj_no'] = raw_row_data[5].text
# age
executed_offenders_data['age'] = int(raw_row_data[6].text)
# date
executed_offenders_data['date'] = datetime.strptime(
raw_row_data[7].text, '%m/%d/%Y')
# race
executed_offenders_data['race'] = raw_row_data[8].text
# county
executed_offenders_data['county'] = raw_row_data[9].text
data.append(executed_offenders_data)
return data
def get_last_statements_data(eo_data):
"""
`eo_data` stands for 'Executed Offenders Data'.
"""
last_statements_data = []
for eo in eo_data:
last_statement_link = eo.get('last_statement_link')
if last_statement_link is None:
continue
response = requests.get(last_statement_link)
soup = BeautifulSoup(response.content, 'html.parser')
# extract body text
body_text = [
p.text.strip() for p in
soup.find('div', {'id': 'body'}).findAll('p')
]
# extract last statement
last_statement = "\n".join(
body_text[body_text.index('Last Statement:') + 1:]
)
last_statements_data.append(
{
'execution_no': eo['execution_no'],
'last_statement': last_statement,
}
)
return last_statements_data
def get_offender_information_data(eo_data):
pass
def get_all_data():
executed_offenders_data = get_executed_offenders_data()
last_statements_data = get_last_statements_data(executed_offenders_data)
return {
'executed_offenders': executed_offenders_data,
'last_statements': last_statements_data,
}
def get_df(eo_data, ls_data):
# executed offenders
eo_df = | pd.DataFrame(eo_data) | pandas.DataFrame |
from tsfresh.examples.robot_execution_failures import download_robot_execution_failures, \
load_robot_execution_failures
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from glob import glob
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from tsfresh.transformers import RelevantFeatureAugmenter
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import ComprehensiveFCParameters
settings = ComprehensiveFCParameters()
from tsfresh import extract_features
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from tsfresh.feature_selection.relevance import calculate_relevance_table
from pca import PCAForPandas
from dtwnn import KnnDtw
from boruta import BorutaPy
import copy
import time
import sys
import csv
import matplotlib.colors as mcolors
# adjust for testing, but the full run requires 10 stratified sample folds
num_folds = 10
# tell pandas to consider infinity as a missing value (for filtering)
pd.options.mode.use_inf_as_na = True
# record our overall start time for time delta display in log messages
mark = time.time()
# return value to indicate that the test for a fold failed and should be ignored
ignore_this_fold = {
'rfc': -1,
'ada': -1,
'rfc_count': -1,
'ada_count': -1,
}
# read both the TEST and TRAIN files for a particular
# dataset into a single set, then partition the data
# and label into X and y DataFrames
def get_combined_raw_dataset(root_path: str):
name = root_path.split('/')[2]
raw_train = pd.read_csv(root_path + name + '_TRAIN.tsv', delimiter='\t', header=None)
raw_test = pd.read_csv(root_path + name + '_TEST.tsv', delimiter='\t', header=None)
combined = raw_train.append(raw_test)
v = combined.reset_index().drop(['index'], axis=1)
X = v.iloc[:,1:]
y = v.iloc[:,:1]
return (X, y)
# convert a raw dataframe into the vertically oriented
# format that tsfresh requires for feature extraction
def raw_to_tsfresh(X, y):
ids = []
values = []
ys = []
indices = []
for id, row in X.iterrows():
c = (y.loc[[id], :]).iloc[0][0]
ys.append(int(c))
indices.append(id)
first = True
for v in row:
if (not first):
ids.append(id)
values.append(float(v))
first = False
d = { 'id': ids, 'value': values }
return (pd.DataFrame(data=d), pd.Series(data=ys, index=indices))
# helper function to filter features out of a dataframe given
# a calculated tsfresh relevance table (R)
def filter_features(df, R):
for id, row in R.iterrows():
if (row['relevant'] == False):
df = df.drop([row['feature']], axis=1)
return df
# calculate the accuracy rate of a prediction
def accuracy_rate(predicted, actual):
correct = 0
for p, a in zip(predicted, actual):
if (p == a):
correct += 1
return correct / len(predicted)
# a single place to configure our RFC and ADA classifiers:
def build_rfc():
return RandomForestClassifier()
def build_ada():
return AdaBoostClassifier()
# Perform the standard FRESH algorithm
def perform_fresh(X_train, y_train, X_test, y_test):
log('Processing fresh')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction and relevance tests ONLY on the train
# data set.
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
extracted_train = extracted_train.dropna(axis='columns')
# We run FRESH and its variants first at the default fdr level of 0.05,
# but if it returns 0 features (why?) then we lower the value and try
# again.
filtered_train = None
for fdr in [0.05, 0.01, 0.005, 0.001, 0.00001]:
log('Using ' + str(fdr))
R = calculate_relevance_table(extracted_train, y_train.squeeze(), fdr_level=fdr)
filtered_train = filter_features(extracted_train, R)
if (filtered_train.shape[1] > 0):
break
# Extract features from the test set, but then apply the same relevant
# features that we used from the train set
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_test = extracted_test.dropna(axis='columns')
filtered_test = filter_features(extracted_test, R)
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(filtered_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(filtered_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(filtered_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(filtered_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# Safely executes a feature-based fold run, catching any
# exceptions so that we simply ignore this failed fold. This
# was added to make FRESH and its variants more robust, as
# sometimes a single fold out of 10 in FRESH would fail as
# the algorithm (even at low fdr settings) would report zero
# relevant features
def run_safely(f, X_train, y_train, X_test, y_test):
try:
return f(X_train, y_train, X_test, y_test)
except:
return ignore_this_fold
# FRESH variant with PCA run on the extracted relevant features
def perform_fresh_pca_after(X_train, y_train, X_test, y_test):
log('Processing fresh_pca_after')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction and relevance tests ONLY on the train
# data set.
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
# For some reason, tsfresh is extracting features that contain Nan,
# Infinity or None. This breaks the PCA step. To avoid this, we
# drop columns that contain these values. I know of nothing else to do here.
extracted_train = extracted_train.dropna(axis='columns')
filtered_train = None
# execute at different fdr levels to try to make FRESH more robust
for fdr in [0.05, 0.01, 0.005, 0.001]:
R = calculate_relevance_table(extracted_train, y_train.squeeze(), fdr_level=fdr)
filtered_train = filter_features(extracted_train, R)
if (filtered_train.shape[1] > 0):
break
# Perform PCA on the filtered set of features
pca_train = PCAForPandas(n_components=0.95, svd_solver='full')
filtered_train = pca_train.fit_transform(filtered_train)
# Extract features from the test set, but then apply the same relevant
# features that we used from the train set
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_test = extracted_test.dropna(axis='columns')
filtered_test = filter_features(extracted_test, R)
filtered_test = pca_train.transform(filtered_test)
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(filtered_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(filtered_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(filtered_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(filtered_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# FRESH variant that runs PCA before the filtering step
def perform_fresh_pca_before(X_train, y_train, X_test, y_test):
log('Processing fresh_pca_before')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction and relevance tests ONLY on the train
# data set.
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
# For some reason, tsfresh is extracting features that contain Nan,
# Infinity or None. This breaks the PCA step. To avoid this, we
# drop columns that contain these values.
extracted_train = extracted_train.dropna(axis='columns')
# Perform PCA on the complete set of extracted features
pca_train = PCAForPandas(n_components=0.95, svd_solver='full')
extracted_train = pca_train.fit_transform(extracted_train)
filtered_train = extracted_train.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
# Extract features from the test set, but then apply the same relevant
# features that we used from the train set
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_test = extracted_test.dropna(axis='columns')
filtered_test = pca_train.transform(extracted_test)
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(filtered_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(filtered_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(filtered_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(filtered_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# The Borunta based feature-extraction algorithm
def perform_boruta(X_train, y_train, X_test, y_test):
log('Processing boruta')
rf = build_rfc()
feat_selector = BorutaPy(rf, n_estimators='auto', perc=90, verbose=2, random_state=0)
feat_selector.fit(X_train.values, y_train.values)
X_filtered = feat_selector.transform(X_train.values)
X_test_filtered = feat_selector.transform(X_test.values)
trained_model = rf.fit(X_filtered, y_train.squeeze().values)
rfc_predicted = list(map(lambda v: int(v), rf.predict(X_test_filtered)))
actual = y_test.squeeze().tolist()
bdt = build_ada()
trained_model = bdt.fit(X_filtered, y_train.squeeze().values)
ada_predicted = list(map(lambda v: int(v), bdt.predict(X_test_filtered)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(rf.estimators_),
'ada_count': len(bdt.estimators_),
}
# LDA
def perform_lda(X_train, y_train, X_test, y_test):
log('Processing lda')
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
y_test = y_test.values
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
lda = LDA()
X_train = lda.fit_transform(X_train, y_train)
X_test = lda.transform(X_test)
rf = build_rfc()
trained_model = rf.fit(X_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), rf.predict(X_test)))
actual = y_test.squeeze().tolist()
bdt = build_ada()
trained_model = bdt.fit(X_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(X_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(rf.estimators_),
'ada_count': len(bdt.estimators_),
}
# Take the extracted features from FRESH and use them unfiltered
# to make a prediction
def perform_unfiltered(X_train, y_train, X_test, y_test):
log('Processing unfiltered')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction only
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_train = extracted_train.dropna(axis='columns')
extracted_test = extracted_test.dropna(axis='columns')
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(extracted_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(extracted_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(extracted_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(extracted_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# Nearest Neighbors with Dynamic Time Warping
def perform_dtw_nn(X_train, y_train, X_test, y_test):
log('Processing dtw_nn')
m = KnnDtw(n_neighbors=1, max_warping_window=10)
m.fit(X_train.values, y_train.values)
predicted, proba = m.predict(X_test.values)
actual = y_test.squeeze().tolist()
return accuracy_rate(predicted, actual), 0
# A simple majority vote classifier
def perform_trivial(X_train, y_train, X_test, y_test):
log('Processing trivial')
counts = {}
for v in y_train:
if v not in counts:
counts[v] = 1
else:
counts[v] = counts.get(v) + 1
m = -1
majority = None
for k in counts:
v = counts.get(k)
if (v > m):
m = v
majority = k
majority = np.argmax(counts)
predicted = np.full(len(y_test.squeeze().values), majority)
actual = y_test.squeeze().tolist()
return accuracy_rate(predicted, actual)
# Process a single test/train fold
def process_fold(X_train, y_train, X_test, y_test):
# Fresh and it's variants
fresh = run_safely(perform_fresh, X_train, y_train, X_test, y_test)
fresh_b = run_safely(perform_fresh_pca_before, X_train, y_train, X_test, y_test)
fresh_a = run_safely(perform_fresh_pca_after, X_train, y_train, X_test, y_test)
unfiltered = run_safely(perform_unfiltered, X_train, y_train, X_test, y_test)
# The other two feature-based approaches
boruta = run_safely(perform_boruta, X_train, y_train, X_test, y_test)
lda = run_safely(perform_lda, X_train, y_train, X_test, y_test)
# Shape based DTW_NN and the majority vote classifier
dtw = perform_dtw_nn(X_train, y_train, X_test, y_test)
trivial = perform_trivial(X_train, y_train, X_test, y_test)
return ({
'Boruta_ada': boruta.get('ada'),
'Boruta_rfc': boruta.get('rfc'),
'DTW_NN': dtw[0],
'FRESH_PCAa_ada': fresh_a.get('ada'),
'FRESH_PCAa_rfc': fresh_a.get('rfc'),
'FRESH_PCAb_ada': fresh_b.get('ada'),
'FRESH_PCAb_rfc': fresh_b.get('rfc'),
'FRESH_ada': fresh.get('ada'),
'FRESH_rfc': fresh.get('rfc'),
'LDA_ada': lda.get('ada'),
'LDA_rfc': lda.get('rfc'),
'ada': unfiltered.get('ada'),
'rfc': unfiltered.get('rfc'),
'trivial': trivial,
}, {
'Boruta_ada': boruta.get('ada_count'),
'Boruta_rfc': boruta.get('rfc_count'),
'DTW_NN': dtw[1],
'FRESH_PCAa_ada': fresh_a.get('ada_count'),
'FRESH_PCAa_rfc': fresh_a.get('rfc_count'),
'FRESH_PCAb_ada': fresh_b.get('ada_count'),
'FRESH_PCAb_rfc': fresh_b.get('ada_count'),
'FRESH_ada': fresh.get('ada_count'),
'FRESH_rfc': fresh.get('rfc_count'),
'LDA_ada': lda.get('ada_count'),
'LDA_rfc': lda.get('rfc_count'),
'ada': unfiltered.get('ada_count'),
'rfc': unfiltered.get('rfc_count'),
'trivial': 0,
})
# Complete processing of one data set. Does 10-fold cross-validation
# extraction and classification
def process_data_set(root_path: str):
combined_X, combined_y = get_combined_raw_dataset(root_path)
skf = StratifiedKFold(n_splits=num_folds)
skf.get_n_splits(combined_X, combined_y)
total_acc = 0
results = []
fold = 1
for train_index, test_index in skf.split(combined_X, combined_y):
log('Processing fold ' + str(fold))
X_train, X_test = combined_X.iloc[train_index], combined_X.iloc[test_index]
y_train, y_test = combined_y.iloc[train_index], combined_y.iloc[test_index]
results.append(process_fold(X_train, y_train, X_test, y_test))
fold += 1
# For this dataset, averages is a map from the name of the
# pipeline (e.g. Boruta_rfc) to the average of all folds,
# similar for std_devs
averages, std_devs, counts = calc_statistics(results)
return averages, std_devs, counts
# Calculates the mean, std_dev and average counts of the
# results
def calc_statistics(results):
averages = {}
std_devs = {}
counts = {}
for k in results[0][0]:
values = []
for r in results:
f = r[0]
if (f.get(k) != -1):
values.append(f.get(k))
averages[k] = np.mean(values)
std_devs[k] = np.std(values)
for k in results[0][1]:
values = []
for r in results:
f = r[1]
if (f.get(k) != -1):
values.append(f.get(k))
counts[k] = np.mean(values)
return averages, std_devs, counts
# dump contents of array of strings to a file
def out_to_file(file: str, lines):
f = open(file, 'w')
for line in lines:
f.write(line + '\n')
f.close()
# log our progress.
def log(message):
elapsed = str(round(time.time() - mark, 0))
f = open('./log.txt', 'w+')
f.write('[' + elapsed.rjust(15, '0') + '] ' + message + '\n')
f.close()
# Output the captured results to the various tsv output files
def output_results(results):
header = 'dataset'
first = results.get(next(iter(results)))[0]
for k in first:
header = header + '\t' + k
# averages
lines = [header]
for r in results:
line = r
aves = results.get(r)[0]
for k in aves:
line = line + '\t' + str(aves.get(k))
lines.append(line)
out_to_file('./averages.tsv', lines)
# std_devs
lines = [header]
for r in results:
line = r
aves = results.get(r)[1]
for k in aves:
line = line + '\t' + str(aves.get(k))
lines.append(line)
out_to_file('./std_devs.tsv', lines)
# counts
lines = [header]
for r in results:
line = r
aves = results.get(r)[2]
for k in aves:
line = line + '\t' + str(aves.get(k))
lines.append(line)
out_to_file('./counts.tsv', lines)
def get_dataset_dirs():
return glob("./data/*/")
# builds a (X, y) DataFrame pair of a random time series with
# a binary label and specified number of samples and length
def build_random_ts(num_samples, length_of_ts):
data = {}
labels = []
for s in range (0, num_samples):
labels.append(np.random.choice([1, 2]))
data['y'] = labels
for col in range(0, length_of_ts):
key = 'feature_' + str(col + 1)
values = []
for s in range (0, num_samples):
values.append(np.random.normal())
data[key] = values
df = pd.DataFrame.from_dict(data)
X = df.iloc[:,1:]
y = df.iloc[:,:1]
return (X, y)
# Dump the current snapshot of results to a given output filename
def capture_timing_result(f, results):
lines = []
for r in results:
values = results.get(r)
line = r
for v in values:
line = line + '\t' + str(v)
lines.append(line)
out_to_file(f, lines)
# Perform the full timing test first for fixed number of
# samples and then a fixed length of time series
def perform_timing_test():
log('performing timing test')
# The collection of tests that we run
tests = [
('Boruta', perform_boruta),
('DTW_NN', perform_dtw_nn),
('FRESH', perform_fresh),
('FRESH_PCAa', perform_fresh_pca_after),
('FRESH_PCAb', perform_fresh_pca_before),
('LDA', perform_lda),
('Full_X', perform_unfiltered)
]
# keep the number of samples constant
constant_samples_results = {}
for test in tests:
constant_samples_results[test[0]] = []
for length in [100, 1000, 2000]:
log('running 1000 samples and ' + str(length) + ' length')
X, y = build_random_ts(1000, length)
skf = StratifiedKFold(n_splits=10)
skf.get_n_splits(X, y)
train_index, test_index = next(skf.split(X, y))
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
for test in tests:
mark = time.time()
try:
test[1](X_train, y_train, X_test, y_test)
except:
log(test[0] + ' ERROR')
constant_samples_results.get(test[0]).append(time.time() - mark)
capture_timing_result('./fixed_samples.tsv', constant_samples_results)
# keep the length constant
constant_length_results = {}
for test in tests:
constant_length_results[test[0]] = []
for num_samples in [100, 1000, 2000]:
log('running 1000 length and ' + str(length) + ' samples')
X, y = build_random_ts(num_samples, 1000)
skf = StratifiedKFold(n_splits=10)
skf.get_n_splits(X, y)
train_index, test_index = next(skf.split(X, y))
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
for test in tests:
mark = time.time()
try:
test[1](X_train, y_train, X_test, y_test)
except:
log(test[0] + ' ERROR')
constant_length_results.get(test[0]).append(time.time() - mark)
capture_timing_result('./fixed_length.tsv', constant_length_results)
def load_and_plot(filename, out, title, colormap, vmax):
df = pd.read_csv(filename, delimiter='\t')
datasets = df['dataset'].tolist()
algorithms = list(df.columns.values)[1:]
data = df.iloc[:,1:].values
create_heatmap(out, data, datasets, algorithms, title, colormap, vmax)
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
def create_boxplot(data, algorithms):
fig = plt.figure(1, figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# rectangular box plot
bplot1 = ax.boxplot(data,
vert=True, # vertical box alignment
patch_artist=True, # fill with color
labels=algorithms) # will be used to label x-ticks
ax.set_title('Used Features')
# fill with colors
colors = ['pink', 'orange', 'darkgoldenrod', 'olive', 'green', 'lightseagreen', 'seagreen', 'lightgreen', 'deepskyblue', 'orchid', 'hotpink', 'palevioletred']
for patch, color in zip(bplot1['boxes'], colors):
patch.set_facecolor(color)
# adding horizontal grid lines
ax.yaxis.grid(True)
plt.setp(ax.get_xticklabels(), rotation=90, ha="right")
ax.set_xlabel('Algorithm')
ax.set_ylabel('Used feature counts')
plt.savefig('./results/counts.png')
def create_heatmap(out, data, row_labels, col_labels, title, colormap, vmax, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, cmap=colormap, vmin=0, vmax=vmax, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
plt.gcf().subplots_adjust(bottom=0.25)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
ax.tick_params(axis='both', which='major', labelsize=6)
ax.tick_params(axis='both', which='minor', labelsize=6)
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=90, ha="right")
plt.title(title)
# Turn spines off and create white grid.
#for edge, spine in ax.spines.items():
# spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.6, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.6, minor=True)
ax.grid(which="minor", color="k", linestyle='-', linewidth=0.5)
ax.tick_params(which="minor", bottom=False, left=False)
f = plt.savefig(out)
plt.clf()
return im, cbar
def generate_boxplot():
df = | pd.read_csv('./results/counts.tsv', delimiter='\t') | pandas.read_csv |
#! /usr/bin python
#------------------------------------------------------------------------------
# PROGRAM: plot_ncc_stripes_panels-all.py
#------------------------------------------------------------------------------
# Version 0.1
# 2 December, 2021
# <NAME>
# https://patternizer.github.io
# patternizer AT gmail DOT com
# michael DOT a DOT taylor AT uea DOT ac DOT uk
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# IMPORT PYTHON LIBRARIES
#------------------------------------------------------------------------------
# Dataframe libraries:
import numpy as np
import pandas as pd
import xarray as xr
# Datetime libraries:
from datetime import datetime
import nc_time_axis
import cftime
from cftime import num2date, DatetimeNoLeap
# Plotting libraries:
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt; plt.close('all')
import matplotlib.colors as mcolors
from matplotlib.cm import ScalarMappable
from matplotlib import rcParams
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# Statistics libraries:
from scipy import stats
# Silence library version notifications
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SETTINGS:
#------------------------------------------------------------------------------
fontsize = 10
nsmooth = 2 # years
cbar_max = 6.0
barwidthfraction = 1.0
t_start = -66000000
t_end = 2200
use_timemask = True
use_logarithm = False
use_log10_scale = False
use_data_cmax = False
use_dark_theme = True
use_smoothing = True
use_overlay_axis = True
use_overlay_timeseries = True
use_overlay_colorbar = True
plot_climate_timeseries = True
plot_climate_bars = False
plot_climate_stripes = True
plot_climate_line = True
#projectionstr = 'RCP3pd'
#projectionstr = 'RCP45'
#projectionstr = 'RCP6'
#projectionstr = 'RCP85'
#projectionstr = 'SSP119'
#projectionstr = 'SSP126'
#projectionstr = 'SSP245'
projectionstr = 'SSP370'
#projectionstr = 'SSP585'
baselinestr = 'baseline_1851_1900'
#baselinestr = 'baseline_1961_1990'
#baselinestr = 'baseline_1971_2000'
titlestr = 'Global mean anomaly, 65.5 Myr BCE - 2200 CE: ' + projectionstr
pathstr = 'DATA/'
pages2kstr = 'PAGES2k.txt'
hadcrut5str = 'HadCRUT5.csv'
fairstr = 'fair' + '_' + projectionstr.lower() + '.csv'
lovarstr = 'variability_realisation0.txt'
hivarstr = 'variability_realisation1.txt'
fairstr = 'fair' + '_' + projectionstr.lower() + '.csv'
paleostr = 'paleo_data_compilation.xls'
pages2k_file = pathstr + pages2kstr
hadcrut5_file = pathstr + hadcrut5str
fair_file = pathstr + fairstr
lo_var_file = pathstr + lovarstr
hi_var_file = pathstr + hivarstr
paleo_file = pathstr + paleostr
ipcc_rgb_txtfile = np.loadtxt("DATA/temp_div.txt") # IPCC AR6 temp div colormap file
cmap = mcolors.LinearSegmentedColormap.from_list('colormap', ipcc_rgb_txtfile) # ipcc_colormap
#cmap = plt.cm.get_cmap('RdBu_r')
#cmap = plt.cm.get_cmap('bwr')
#------------------------------------------------------------------------------
# DARK THEME
#------------------------------------------------------------------------------
if use_dark_theme == True:
matplotlib.rcParams['text.usetex'] = False
# rcParams['font.family'] = ['DejaVu Sans']
# rcParams['font.sans-serif'] = ['Avant Garde']
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Avant Garde', 'Lucida Grande', 'Verdana', 'DejaVu Sans' ]
plt.rc('text',color='white')
plt.rc('lines',color='white')
plt.rc('patch',edgecolor='white')
plt.rc('grid',color='lightgray')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
plt.rc('axes',edgecolor='lightgray')
plt.rc('figure',facecolor='black')
plt.rc('figure',edgecolor='black')
plt.rc('savefig',edgecolor='black')
plt.rc('savefig',facecolor='black')
else:
matplotlib.rcParams['text.usetex'] = False
# rcParams['font.family'] = ['DejaVu Sans']
# rcParams['font.sans-serif'] = ['Avant Garde']
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Avant Garde', 'Lucida Grande', 'Verdana', 'DejaVu Sans' ]
plt.rc('text',color='black')
plt.rc('lines',color='black')
plt.rc('patch',edgecolor='black')
plt.rc('grid',color='lightgray')
plt.rc('xtick',color='black')
plt.rc('ytick',color='black')
plt.rc('axes',labelcolor='black')
plt.rc('axes',facecolor='white')
plt.rc('axes',edgecolor='black')
plt.rc('figure',facecolor='white')
plt.rc('figure',edgecolor='white')
plt.rc('savefig',edgecolor='white')
plt.rc('savefig',facecolor='white')
# Calculate current time
now = datetime.now()
currentdy = str(now.day).zfill(2)
currentmn = str(now.month).zfill(2)
currentyr = str(now.year)
titletime = str(currentdy) + '/' + currentmn + '/' + currentyr
#-----------------------------------------------------------------------------
# LOAD: PAGES2k (via <NAME> with thanks) --> df_pages2k
# NB: convert time to year.decimal
#-----------------------------------------------------------------------------
# FORMAT:
# Year CE | raw instrumental target data | reconstruction ensemble 50th | 2.5th | 97.5th percentiles |
# 31-year butterworth filtered instrumental target data | 31-year butterworth filtered reconstruction 50th |
# 2.5th | 97.5th percentiles
nheader = 5
f = open(pages2k_file)
lines = f.readlines()
years = [] # [0001,2000]
obs = []
for i in range(nheader,len(lines)):
words = lines[i].split()
year = words[0].zfill(4)
val = (len(words)-1)*[None]
for j in range(len(val)):
try: val[j] = float(words[j+1])
except:
pass
years.append(year)
obs.append(val)
f.close()
obs = np.array(obs)
t_pages2k = xr.cftime_range(start=years[0], periods=len(years), freq='A', calendar='gregorian')[0:1849]
ts_pages2k_instr = pd.to_numeric(obs[:,1][0:1849], errors='coerce')
ts_pages2k_recon = | pd.to_numeric(obs[:,5][0:1849], errors='coerce') | pandas.to_numeric |
import re
import warnings
import numpy as np
import pandas as pd
from Amplo.Utils import clean_keys
class DataProcesser:
def __init__(self,
target: str = None,
float_cols: list = None,
int_cols: list = None,
date_cols: list = None,
cat_cols: list = None,
include_output: bool = False,
missing_values: str = 'interpolate',
outlier_removal: str = 'clip',
z_score_threshold: int = 4,
version: int = 1,
verbosity: int = 0,
):
"""
Preprocessing Class. Cleans a dataset into a workable format.
Deals with Outliers, Missing Values, duplicate rows, data types (floats, categorical and
dates), Not a Numbers, Infinities.
Parameters
----------
target str: Column name of target variable
num_cols list: Numerical columns, all parsed to integers and floats
date_cols list: Date columns, all parsed to pd.datetime format
cat_cols list: Categorical Columns. Currently all one-hot encoded.
missing_values str: How to deal with missing values ('remove', 'interpolate' or 'mean')
outlier_removal str: How to deal with outliers ('clip', 'quantiles', 'z-score' or 'none')
z_score_threshold int: If outlierRemoval='z-score', the threshold is adaptable, default=4.
folder str: Directory for storing the output files
version int: Versioning the output files
mode str: classification / regression
"""
# Tests
mis_values_algo = ['remove_rows', 'remove_cols', 'interpolate', 'mean', 'zero']
assert missing_values in mis_values_algo, \
'Missing values algorithm not implemented, pick from {}'.format(', '.join(mis_values_algo))
out_rem_algo = ['quantiles', 'z-score', 'clip', 'none']
assert outlier_removal in out_rem_algo, \
'Outlier Removal algorithm not implemented, pick from {}'.format(', '.join(out_rem_algo))
# Arguments
self.version = version
self.includeOutput = include_output
self.target = target if target is None else re.sub("[^a-z0-9]", '_', target.lower())
self.float_cols = [] if float_cols is None else [re.sub('[^a-z0-9]', '_', fc.lower()) for fc in float_cols]
self.int_cols = [] if int_cols is None else [re.sub('[^a-z0-9]', '_', ic.lower()) for ic in int_cols]
self.num_cols = self.float_cols + self.int_cols
self.cat_cols = [] if cat_cols is None else [re.sub('[^a-z0-9]', '_', cc.lower()) for cc in cat_cols]
self.date_cols = [] if date_cols is None else [re.sub('[^a-z0-9]', '_', dc.lower()) for dc in date_cols]
if self.target in self.num_cols:
self.num_cols.remove(self.target)
# Algorithms
self.missing_values = missing_values
self.outlier_removal = outlier_removal
self.z_score_threshold = z_score_threshold
# Fitted Settings
self.dummies = {}
self._q1 = None
self._q3 = None
self._means = None
self._stds = None
# Info for Documenting
self.is_fitted = False
self.verbosity = verbosity
self.removedDuplicateRows = 0
self.removedDuplicateColumns = 0
self.removedOutliers = 0
self.imputedMissingValues = 0
self.removedConstantColumns = 0
def fit_transform(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Fits this data cleaning module and returns the transformed data.
Parameters
----------
data [pd.DataFrame]: Input data
Returns
-------
data [pd.DataFrame]: Cleaned input data
"""
if self.verbosity > 0:
print('[AutoML] Data Cleaning Started, ({} x {}) samples'.format(len(data), len(data.keys())))
# Clean Keys
data = clean_keys(data)
# Remove Duplicates
data = self.remove_duplicates(data)
# Infer data-types
self.infer_data_types(data)
# Convert data types
data = self.convert_data_types(data, fit_categorical=True)
# Remove outliers
data = self.remove_outliers(data, fit=True)
# Remove missing values
data = self.remove_missing_values(data)
# Remove Constants
data = self.remove_constants(data)
# Convert integer columns
data = self.convert_float_int(data)
# Clean target
data = self.clean_target(data)
# Finish
self.is_fitted = True
if self.verbosity > 0:
print('[AutoML] Processing completed, ({} x {}) samples returned'.format(len(data), len(data.keys())))
return data
def transform(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Function that takes existing settings (including dummies), and transforms new data.
Parameters
----------
data [pd.DataFrame]: Input data
Returns
-------
data [pd.DataFrame]: Cleaned input data
"""
assert self.is_fitted, "Transform only available for fitted objects, run .fit_transform() first."
# Clean Keys
data = clean_keys(data)
# Impute columns
data = self._impute_columns(data)
# Remove duplicates
data = self.remove_duplicates(data, rows=False)
# Convert data types
data = self.convert_data_types(data, fit_categorical=False)
# Remove outliers
data = self.remove_outliers(data, fit=False)
# Remove missing values
data = self.remove_missing_values(data)
# Convert integer columns
data = self.convert_float_int(data)
return data
def get_settings(self) -> dict:
"""
Get settings to recreate fitted object.
"""
assert self.is_fitted, "Object not yet fitted."
return {
'num_cols': self.num_cols,
'float_cols': self.float_cols,
'int_cols': self.int_cols,
'date_cols': self.date_cols,
'cat_cols': self.cat_cols,
'missing_values': self.missing_values,
'outlier_removal': self.outlier_removal,
'z_score_threshold': self.z_score_threshold,
'_means': None if self._means is None else self._means.to_json(),
'_stds': None if self._stds is None else self._stds.to_json(),
'_q1': None if self._q1 is None else self._q1.to_json(),
'_q3': None if self._q3 is None else self._q3.to_json(),
'dummies': self.dummies,
'fit': {
'imputed_missing_values': self.imputedMissingValues,
'removed_outliers': self.removedOutliers,
'removed_constant_columns': self.removedConstantColumns,
'removed_duplicate_rows': self.removedDuplicateRows,
'removed_duplicate_columns': self.removedDuplicateColumns,
}
}
def load_settings(self, settings: dict) -> None:
"""
Loads settings from dictionary and recreates a fitted object
"""
self.num_cols = settings['num_cols'] if 'num_cols' in settings else []
self.float_cols = settings['float_cols'] if 'float_cols' in settings else []
self.int_cols = settings['int_cols'] if 'int_cols' in settings else []
self.cat_cols = settings['cat_cols'] if 'cat_cols' in settings else []
self.date_cols = settings['date_cols'] if 'date_cols' in settings else []
self.missing_values = settings['missing_values'] if 'missing_values' in settings else []
self.outlier_removal = settings['outlier_removal'] if 'outlier_removal' in settings else []
self.z_score_threshold = settings['z_score_threshold'] if 'z_score_threshold' in settings else []
self._means = None if settings['_means'] is None else pd.read_json(settings['_means'])
self._stds = None if settings['_stds'] is None else pd.read_json(settings['_stds'])
self._q1 = None if settings['_q1'] is None else pd.read_json(settings['_q1'])
self._q3 = None if settings['_q3'] is None else pd.read_json(settings['_q3'])
self.dummies = settings['dummies'] if 'dummies' in settings else {}
self.is_fitted = True
def infer_data_types(self, data: pd.DataFrame):
"""
In case no data types are provided, this function infers the most likely data types
"""
if len(self.cat_cols) == len(self.num_cols) == len(self.date_cols) == 0:
# First cleanup
data = data.infer_objects()
# Remove target from columns
if not self.includeOutput and self.target is not None and self.target in data:
data = data.drop(self.target, axis=1)
# Iterate through keys
for key in data.keys():
# Integer
if pd.api.types.is_integer_dtype(data[key]):
self.int_cols.append(key)
# Float
if pd.api.types.is_float_dtype(data[key]):
self.float_cols.append(key)
# Datetime
if pd.api.types.is_datetime64_any_dtype(data[key]):
self.date_cols.append(key)
# Booleans
if pd.api.types.is_bool_dtype(data[key]):
self.int_cols.append(key)
# Strings / Objects
if pd.api.types.is_object_dtype(data[key]):
is_date = data[key].astype('str').apply(pd.to_datetime, errors='coerce').isna().sum() < 0.3 * \
len(data)
if is_date:
self.date_cols.append(key)
elif data[key].nunique() < 100:
self.cat_cols.append(key)
else:
forced_numeric = pd.to_numeric(data[key], errors='coerce')
if forced_numeric.isna().sum() < len(data) / 25:
self.float_cols.append(key)
# Set num cols for reverse compatibility
self.num_cols = self.int_cols + self.float_cols
# Check if float keys are secretly not integers
for key in self.float_cols:
forced_int = pd.to_numeric(data[key].fillna(0), errors='coerce', downcast='integer')
if | pd.api.types.is_integer_dtype(forced_int) | pandas.api.types.is_integer_dtype |
from __future__ import print_function
from shallow import ShallowLearner
from deep import DeepLearner
import pandas as pd
import numpy as np
import plots
import os
def main():
"""Displays a CLI to display the execution process and results of the various classifiers."""
print("This is the 'Big Data' summative assignment for Z0954757.")
print()
# Perform analysis for the Shallow Learning Intitial Investiagtion
shallow_initial = raw_input(
"Would you like to run the initial investigations for the shallow learning approaches (Estimated time to complete: 3 minutes)? (y/n)"
)
if 'y' in shallow_initial.lower():
# Create an instance of the ShallowLearner class
shall = ShallowLearner()
# Get the data for use in the shallow appraches
shall.get_data(os.path.join('datasets','news_ds.csv'))
# Try the first approach
first_results = shall.first_approach()
print(first_results)
# Try the second approach
second_results = shall.second_approach()
print(second_results)
# Try the third approach
third_results = shall.third_approach()
print(third_results)
# Try the fourth approach
fourth_results = shall.fourth_approach()
print(fourth_results)
# Perform analysis for the Shallow Learning Further Investigations
shallow_further = raw_input(
"Would you like to run the further investigations for the shallow learning approaches? (y/n)"
)
if 'y' in shallow_further.lower():
load_data = raw_input(
"Type 'load' to load pre-existing data or nothing to regenerate the data (Estimated time to regenerate: 90 minutes)"
)
if 'load' in load_data.lower():
#Load data from csv files.
plots.plot_grid_search(os.path.join('saves','ThirdApproachVariations.csv'), ' Third Approach - TF-IDF Grid Search Optimisation')
plots.plot_grid_search(os.path.join('saves','FourthApproachVariations.csv'), ' Fourth Approach - N-gram (1,2) Grid Search Optimisation')
else:
print("Regenerating data.")
# Create an instance of the ShallowLearner class
shall = ShallowLearner()
# Get the data for use in the shallow appraches
shall.get_data(os.path.join('datasets','news_ds.csv'))
# Create arrays of test values for splits and max features.
splits = np.arange(0.2, 1, 0.2)
max_feats = np.arange(1000, 21000, 2000)
print("Test splits: ", splits)
print("Test maximum features: ", max_feats)
# Intialise a dictionary to collect the results.
third_results_dict = {
'splits' : [],
'no feats' : [],
'Accuracy': [],
'Precision' : [],
'Recall':[],
'F1':[]
}
print("Varying splits and max features for approach three.")
for test_split in splits:
print("Testing at split: ", test_split)
for features in max_feats:
print("Testing at max features: ", features)
results = shall.third_approach(split=test_split, no_features=features)
third_results_dict['splits'].append(test_split)
third_results_dict['no feats'].append(features)
third_results_dict['Accuracy'].append(results['Accuracy'])
third_results_dict['Precision'].append(results['Precision'])
third_results_dict['Recall'].append(results['Recall'])
third_results_dict['F1'].append(results['F1'])
third_results_df = pd.DataFrame(third_results_dict)
third_results_df.to_csv(os.path.join('saves','ThirdApproachVariationsRegen.csv'))
# Vary n-gram format in approach four
print("Varying n-gram range for approach four.")
n_gram_ranges = [(1,1),(2,2), (3,3), (1,2), (1,3)]
fourth_n_gram_results_dict = {
'n_gram_range' : [],
'Accuracy': [],
'Precision' : [],
'Recall':[],
'F1':[]
}
for n_range in n_gram_ranges:
print("Testing n gram range: ", n_range)
results = shall.fourth_approach(n_range)
fourth_n_gram_results_dict['n_gram_range'].append(n_range)
fourth_n_gram_results_dict['Accuracy'].append(results['Accuracy'])
fourth_n_gram_results_dict['Precision'].append(results['Precision'])
fourth_n_gram_results_dict['Recall'].append(results['Recall'])
fourth_n_gram_results_dict['F1'].append(results['F1'])
fourth_n_gram_results_df = | pd.DataFrame(fourth_n_gram_results_dict) | pandas.DataFrame |
# Script that parses the Site of Interest annotations from a MAPS XML file
# Developed with data from MAPS Viewer 3.6
import xml.etree.ElementTree as ET
import csv
import ast
import numpy as np
import pandas as pd
import math
from pathlib import Path
import logging
import copy
import multiprocessing
import unicodedata
import random
class XmlParsingFailed(Exception):
def __init__(self, message):
logging.error(message)
class MapsXmlParser:
"""XML Parser class that processes MAPS XMLs and reveals the image tiles belonging to each annotation
This class takes XML files from the Thermo MAPS Application, extracts the location of all its annotations and
compares it to the location of all the image tiles. This gets a bit more complicated, because tiles only have a
relative position in pixels within their layers and those layers are (potentially) turned at arbitrary angles
relative to the global coordinate system (which is in meters).
It is important that the functions are run in the correct order to extract & process this information. Therefore,
use it by initializing the class and the calling parser.parse_xml().
Args:
project_folder (str): The path to the project folder of the MAPS project, containing the XML file, as a string
name_of_highmag_layer (str): Name of the image layer in MAPS for which tiles containing annotations should
be found. Defaults to 'highmag'
use_unregistered_pos (bool): Whether to use the unregistered position of the tiles (where MAPS thinks it
acquired them => True, default) or a calculated position (e.g. through stitching in MAPS) should be used for
the tiles (False)
stitch_radius (int): The number of images in each direction from the tile containing the annotation should be
stitched. Parser doesn't do the stitching, just extracts relevant information about neighboring tiles.
Defaults to 1 => 3x3 images would be stitched.
Attributes:
project_folder_path (str): The path to the project folder of the MAPS project, containing the XML file, as a
string
layers (dict): Contains information about the layers (squares/acquisitions). The keys are local paths to
the metadata about the layer and the values are dictionaries again. Each layer contains the information
about the stage position of the center of the layer (in meters, StagePosition_center_x &
StagePosition_center_y), about the rotation of the layer relative to the global stage positions (in degrees,
rotation), the number of columns of tiles (images) in the layer (columns), the vertical & horizontal overlap
between the tiles (overlapVertical, overlapHorizontal), the number of rows of tiles (images) in the layer
(rows), the horizontal field width of each tile (its width in meters, tileHfw), the horizontal field width
of the whole layer (its width in meters, totalHfw), the name of the layer (layer_name), the vertical field
width of each tile (its width in meters, tileVfw), and the global stage position of the corner of the layer
(in meters, StagePosition_corner_x & StagePosition_corner_y)
tiles (dict): Contains information about individual tiles. The keys are the combined layer name & filename of
the tile and the values are a dictionary again. Each tile contains the information about what layer it
belongs to (layer, key to the layer dict), the path to the image as a Path variable (img_path), its
filename, the name of the layer (layer_name) and its relative position x & y within that layer
(RelativeTilePosition_x & RelativeTilePosition_y)
annotations (dict): Contains the information about all the annotations. The keys are the names of the
annotations (MAPS enforces uniqueness), its values are a dictionary containing the StagePosition_x &
StagePosition_y positions of the annotation (in m => global coordinate system for the experiment)
annotation_tiles (dict): Contains the relevant output of the XML parsing. The keys are the names of the
annotation. The values are the key to the corresponding layer in the layer dict (layer), the path to the
image as a Path variable (img_path), its filename, the name of the layer (layer_name) and the relative
position x & y of the tile within that layer (RelativeTilePosition_x & RelativeTilePosition_y), the
absolute stage position of the annotation (Annotation_StagePosition_x and Annotation_StagePosition_y), the
position of the annotation within the tile image (in pixels, Annotation_tile_img_position_x &
Annotation_tile_img_position_y), a list of the surrounding tile names (surrounding_tile_names) and a list of
booleans of whether each of the surrounding tiles exist, including the tile itself (surrounding_tile_exists)
stitch_radius (int): The number of images in each direction from the tile containing the annotation should be
stitched.
pixel_size (float): The pixel size of the acquisition in the name_of_highmag_layer [in meters]
img_height (int): The height of the highmag image in pixels
img_width (int): The width of the highmag image in pixels
"""
def __init__(self, project_folder: str, name_of_highmag_layer: str = 'highmag', use_unregistered_pos: bool = True,
stitch_radius: int = 1):
self.project_folder_path = Path(project_folder)
self.layers = {}
self.tiles = {}
self.annotations = {}
self.annotation_tiles = {}
self.stitch_radius = stitch_radius
self.pixel_size = 0.0
# Usage of width and height may be switched, as Talos images are always square and
# I couldn't test non-square images
self.img_height = 0
self.img_width = 0
# Internal variables
self._name_of_highmag_layer = name_of_highmag_layer
xml_file_name = 'MapsProject.xml'
xml_file_path = self.project_folder_path / xml_file_name
self._xml_file = self.load_xml(xml_file_path)
self._tile_names = []
self._tile_center_stage_positions = []
if use_unregistered_pos:
self._position_to_extract = 'UnalignedPosition'
else:
self._position_to_extract = 'CalculatedPosition'
@staticmethod
def load_xml(xml_file_path):
"""Loads and returns the MAPS XML File
Args:
xml_file_path (Path): Path to the XML file as a pathlib path
Returns:
root: The root of the XML file parsed with xml.etree.ElementTree
"""
try:
root = ET.parse(xml_file_path).getroot()
except FileNotFoundError:
raise XmlParsingFailed("Can't find the MAPS XML File at the location {}".format(xml_file_path))
return root
def parse_xml(self):
"""Run function for the class
parse_xml calls all the necessary functions of the class in the correct order to parse the XML file. Call this
function after initializing a class object, then access the results via the annotation_tiles variable that
contains the relevant output of the XML parsing
Returns:
dict: annotation_tiles
"""
self.extract_layers_and_annotations()
self.get_relative_tile_locations()
self.calculate_absolute_tile_coordinates()
self.find_annotation_tile()
self.determine_surrounding_tiles()
return self.annotation_tiles
@staticmethod
def convert_windows_pathstring_to_path_object(string_path):
"""Converts a windows path string to a path object
Some paths are provided in the XML file and the metadata as Windows paths. This function creates pathlib Path
objects out of them.
Args:
string_path (str): String of a Windows path containing double backslashes
Returns:
path: Path object of the string_path
"""
folders = string_path.split('\\')
path = Path()
for folder in folders:
path = path / folder
return path
def convert_img_path_to_local_path(self, img_path):
"""Converts a local path of the microscope computer to a path of the image in the project folder
MAPS saves the image paths of the local storage of the image files. In our setup, this path starts with
'D:\\ProjectName', even though the files aren't actually on the D drive anymore but were copied to a share, into
the project_folder_path. This function strips 'D:\\ProjectName' away from the path and returns a Path object for
the location of the images on the share.
Args:
img_path (str): Original path to the images on the microscope computer, starting with 'D:\\ProjectName'
Returns:
path: Path object of the corrected path on the share
"""
folders = img_path.split('\\')
path = self.project_folder_path
try:
layersdata_index = folders.index('LayersData')
except ValueError:
raise XmlParsingFailed('Could not find the folder LayersData that should contain the raw data in the '
'filepath for the iamge_files: {}'.format(img_path))
for folder in folders[layersdata_index:]:
path = path / folder
return path
@staticmethod
def create_logger(log_file_path, multiprocessing_logger: bool = False):
""" Returns a logger and creates it if it doesn't yet exist
Gets the correct logger for normal or multiprocessing. If it already exists, just returns this logger. If it
doesn't exist yet, sets it up correctly.
Args:
log_file_path (str): Path to the log file
multiprocessing_logger (bool): Whether a multiprocessing logger is needed. Defaults to False.
Returns:
logger: Logger object that has the correct handlers
"""
if multiprocessing_logger:
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
logger.propagate = True
else:
logger = logging.getLogger(__name__)
# If the logger doesn't have any handlers yet, add them. Otherwise, just return the logger
if not len(logger.handlers):
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(message)s')
# Create file logging handler
fh = logging.FileHandler(log_file_path)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
# Create console logging handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
return logger
def extract_layers_and_annotations(self, check_for_annotations: bool = True):
"""Extract the information about all the layers in the high magnification acquisition layers and the annotations
Go through the XML file and submit all LayerGroups for processing. The LayerGroups contain both the Layer with
the _name_of_highmag_layer (=> the actual images) as well as the Annotation Layers containing the annotations.
At the end of processing, check whether the self.layers and the self.annotations dictionaries were filled. If
one of them was not filled, the parser could not find the highmag images or the annotations and raises an
exception.
Args:
check_for_annotations (bool): Whether the parsing should check for the existence of annotations and throw an
error if none are found. Defaults to True, thus checking for annotations.
"""
# Extract the information about all the layers (high magnification acquisition layers)
for xml_category in self._xml_file:
if xml_category.tag.endswith('LayerGroups'):
for sub_category in xml_category:
if sub_category.tag.endswith('LayerGroup'):
self._process_layer_group(sub_category)
# If the parsing did not find any tiles, raise an error
if not self.layers:
raise XmlParsingFailed('Parsing the XML File did not find any tiles (images). Therefore, cannot map '
'annotations')
if check_for_annotations and not self.annotations:
raise XmlParsingFailed('No annotations were found on tiles. Are there annotations in the MAPS project? '
'Are those annotations on the highmag tiles? If so, there may be an issue with the '
'calculation of the positions of the annotations. Check whether '
'find_annotation_tile gave any warnings.')
def _process_layer_group(self, layer_group):
"""Recursively go through the LayerGroup and send TileLayer & AnnotationLayer for processing
Goes through a given LayerGroup. If there are nested LayerGroups, it calls the function recursively. If a Layer
is a TileLayer and its name is the name of the highmag layer, it sends it for processing to
self._process_tile_layer. If a Layer is a Annotation Layer (independent of whether it's in a Layer Group that
has the highmag name or not), it is sent for processing in self._extract_annotation_locations.
Args:
layer_group: Part of the XML object that contains the information for a LayerGroup
"""
log_file_path = str(self.project_folder_path / (self.project_folder_path.name + '.log'))
logger = self.create_logger(log_file_path)
for ggc in layer_group:
# Get the path to the metadata xml files for all the highmag layers,
# the pixel size and the StagePosition of the layers
if ggc.tag.endswith('displayName') and ggc.text == self._name_of_highmag_layer:
logger.info('Extracting images from {} layers'.format(ggc.text))
for highmag in layer_group:
if highmag.tag.endswith('Layers'):
for layer in highmag:
# Check if this layers is a TileLayer or any other kind of layers.
# Only proceed to process TileLayers
layer_type = layer.attrib['{http://www.w3.org/2001/XMLSchema-instance}type']
if layer_type == 'TileLayer':
self._process_tile_layer(layer)
elif layer_type == 'LayerGroup':
# If there are nested LayerGroups, recursively call the function again
# with this LayerGroup
self._process_layer_group(layer)
elif layer_type == 'AnnotationLayer':
self._extract_annotation_locations(layer)
else:
logger.warning('XML Parser does not know how to deal with {} Layers and '
'therefore does not parse them'.format(layer_type))
else:
if ggc.tag.endswith('Layers'):
for layer in ggc:
layer_type = layer.attrib['{http://www.w3.org/2001/XMLSchema-instance}type']
if layer_type == 'AnnotationLayer':
self._extract_annotation_locations(layer)
elif layer_type == 'LayerGroup':
# If there are nested LayerGroups, recursively call the function again
# with this LayerGroup
self._process_layer_group(layer)
def _process_tile_layer(self, layer):
"""Extracts all necessary information of a highmag Tile Layer and saves it to self.layers
Args:
layer: Part of the XML object that contains the information for a TileLayer
"""
layer_type = layer.attrib['{http://www.w3.org/2001/XMLSchema-instance}type']
assert (layer_type == 'TileLayer')
for layer_content in layer:
if layer_content.tag.endswith('metaDataLocation'):
metadata_location = layer_content.text
self.layers[metadata_location] = {}
try:
for layer_content in layer:
if layer_content.tag.endswith('totalHfw'):
# noinspection PyUnboundLocalVariable
self.layers[metadata_location]['totalHfw'] = float(list(layer_content.attrib.values())[1])
if layer_content.tag.endswith('tileHfw'):
# noinspection PyUnboundLocalVariable
self.layers[metadata_location]['tileHfw'] = float(list(layer_content.attrib.values())[1])
if layer_content.tag.endswith('overlapHorizontal'):
# noinspection PyUnboundLocalVariable
self.layers[metadata_location]['overlapHorizontal'] = float(layer_content[0].text) / 100.
if layer_content.tag.endswith('overlapVertical'):
# noinspection PyUnboundLocalVariable
self.layers[metadata_location]['overlapVertical'] = float(layer_content[0].text) / 100.
if layer_content.tag.endswith('rotation'):
# noinspection PyUnboundLocalVariable
self.layers[metadata_location]['rotation'] = float(list(layer_content.attrib.values())[1])
if layer_content.tag.endswith('rows'):
# noinspection PyUnboundLocalVariable
self.layers[metadata_location]['rows'] = int(layer_content.text)
if layer_content.tag.endswith('columns'):
# noinspection PyUnboundLocalVariable
self.layers[metadata_location]['columns'] = int(layer_content.text)
if layer_content.tag.endswith('scanResolution'):
for scanres_info in layer_content:
if scanres_info.tag.endswith('height'):
height = int(scanres_info.text)
if self.img_height == height or self.img_height == 0:
self.img_height = height
else:
raise Exception(
'Image height needs to be constant for the whole {} layer. It was {} before and '
'is {} in the current layer'.format(self._name_of_highmag_layer,
self.img_height, height))
if scanres_info.tag.endswith('width'):
width = int(scanres_info.text)
if self.img_width == width or self.img_width == 0:
self.img_width = width
else:
raise Exception(
'Image width needs to be constant for the whole {} layer. It was {} before and '
'is {} in the current layer'.format(self._name_of_highmag_layer,
self.img_width, width))
if layer_content.tag.endswith('pixelSize'):
pixel_size = float(layer_content.attrib['Value'])
if self.pixel_size == pixel_size or self.pixel_size == 0:
self.pixel_size = pixel_size
else:
raise Exception('Pixel size needs to be constant for the whole {} layer. It was {} before and '
'is {} in the current layer'.format(self._name_of_highmag_layer,
self.pixel_size, pixel_size))
if layer_content.tag.endswith('StagePosition'):
for positon_info in layer_content:
if positon_info.tag == '{http://schemas.datacontract.org/2004/07/Fei.Applications.SAL}x':
# noinspection PyUnboundLocalVariable
self.layers[metadata_location][
'StagePosition_center_x'] = float(positon_info.text)
elif positon_info.tag == '{http://schemas.datacontract.org/2004/07/Fei.Applications.SAL}y':
# noinspection PyUnboundLocalVariable
self.layers[metadata_location][
'StagePosition_center_y'] = float(positon_info.text)
except NameError:
raise XmlParsingFailed("Can't find the metaDataLocation in the MAPS XML File")
def _extract_annotation_locations(self, annotation_layer):
"""Extract annotation metadata from the XML file and saves them to the self.annotations dictionary
Only Sites Of Interest Annotations are processed. Areas of Interest are ignored. Gets the
x & y position of the annotation & the annotation name and saves them in the annotations dictionary.
The keys are the names of the annotations (MAPS enforces uniqueness), its values are a dictionary containing the
StagePosition_x & StagePosition_y positions of the annotation (in m => global coordinate system for the
experiment)
"""
for potential_annotation_content in annotation_layer:
# Only check Sites Of Interest, not Area of Interest. Both are Annotation Layers, but Areas of Interest
# have the isArea value as true
if potential_annotation_content.tag.endswith('isArea') and potential_annotation_content.text == 'false':
for annotation_content in annotation_layer:
if annotation_content.tag.endswith('RealDisplayName'):
annotation_name = self.create_valid_name(annotation_content.text)
self.annotations[annotation_name] = {}
try:
for annotation_content in annotation_layer:
if annotation_content.tag.endswith('StagePosition'):
for a in annotation_content:
if a.tag == '{http://schemas.datacontract.org/2004/07/Fei.Applications.SAL}x':
# noinspection PyUnboundLocalVariable
self.annotations[annotation_name]['StagePosition_x'] = float(a.text)
elif a.tag == '{http://schemas.datacontract.org/2004/07/Fei.Applications.SAL}y':
# noinspection PyUnboundLocalVariable
self.annotations[annotation_name]['StagePosition_y'] = float(a.text)
except NameError:
raise XmlParsingFailed("Can't find the Annotations Names in the MAPS XML File")
def get_relative_tile_locations(self):
"""Read in all the metadata files for the different layers to get the relative tile positions
Each layer has its own metadata XML file that contains the relative positions of all the tiles in the layer.
This function goes through all of them, extracts the information and saves it to the tiles dictionary.
The keys are the combined layer name & filename of the tile and the values are a dictionary again. Each tile
contains the information about what layer it belongs to (layer, key to the layer dict), the path to the image as
a Path variable (img_path), its filename, the name of the layer (layer_name) and its relative position x & y
within that layer (RelativeTilePosition_x & RelativeTilePosition_y)
"""
metadata_filename = 'StitchingData.xml'
for metadata_location in self.layers:
tile_image_folder_path = ''
metadata_path = self.project_folder_path.joinpath(
self.convert_windows_pathstring_to_path_object(metadata_location)) / metadata_filename
try:
metadata_root = ET.parse(metadata_path).getroot()
except FileNotFoundError:
log_file_path = str(self.project_folder_path / (self.project_folder_path.name + '.log'))
logger = self.create_logger(log_file_path)
logger.warn('Could not find the Metadata file for layer {}. Skipping it. If this layer contained ' +
'annotations, those cannot be stitched afterwards.'.format(metadata_path))
continue
for metadata_child in metadata_root:
if metadata_child.tag.endswith('tileSet'):
for metadata_grandchild in metadata_child:
if metadata_grandchild.tag.endswith('TileImageFolder'):
tile_image_folder_path = metadata_grandchild.text
current_layer = tile_image_folder_path.split('\\')[-1]
self.layers[metadata_location]['layer_name'] = current_layer
if metadata_grandchild.tag.endswith('_tileCollection'):
for ggc in metadata_grandchild:
if ggc.tag.endswith('_innerCollection'):
for gggc in ggc:
for keyvalue in gggc:
if keyvalue.tag.endswith('Value'):
for value in keyvalue:
if value.tag.endswith('ImageFileName'):
# noinspection PyUnboundLocalVariable
tile_name = current_layer + '_' + value.text
self.tiles[tile_name] = {'layers': metadata_location,
'img_path': self.convert_img_path_to_local_path(
tile_image_folder_path),
'filename': value.text,
'layer_name': current_layer}
for value in keyvalue:
if value.tag.endswith('PositioningDetails'):
for positioning_detail in value:
if positioning_detail.tag.endswith(
self._position_to_extract):
for position in positioning_detail:
if position.tag == '{http://schemas.datacontract.' \
'org/2004/07/System.Drawing}x':
# noinspection PyUnboundLocalVariable
self.tiles[tile_name][
'RelativeTilePosition_x'] = float(
position.text)
elif position.tag == '{http://schemas.' \
'datacontract.org/2004/07/' \
'System.Drawing}y':
# noinspection PyUnboundLocalVariable
self.tiles[tile_name][
'RelativeTilePosition_y'] = float(
position.text)
def calculate_absolute_tile_coordinates(self):
"""Calculate the absolute stage positions of all tiles based on their relative positions
Calculate the absolute stage position of the center of each tile based on the relative tile positions, the
rotation of the layer and the absolute stage position of the center of the layer. The resulting position is
saved to the _tile_center_stage_positions and the corresponding tile name to the _tile_names list.
"""
for current_layer_key in self.layers:
current_layer = self.layers[current_layer_key]
# Unsure if it's height/width or width/height because they are always the same on Talos
current_layer['tileVfw'] = self.img_height / self.img_width * current_layer['tileHfw']
horizontal_field_width = (current_layer['columns'] - 1) * current_layer['tileHfw'] * (
1 - current_layer['overlapHorizontal']) + current_layer['tileHfw']
vertical_field_width = (current_layer['rows'] - 1) * current_layer['tileVfw'] * (
1 - current_layer['overlapHorizontal']) + current_layer['tileVfw']
relative_0_x = current_layer['StagePosition_center_x'] - math.sin(
current_layer['rotation'] / 180 * math.pi) * vertical_field_width / 2 + math.cos(
current_layer['rotation'] / 180 * math.pi) * horizontal_field_width / 2
relative_0_y = current_layer['StagePosition_center_y'] - math.cos(
current_layer['rotation'] / 180 * math.pi) * vertical_field_width / 2 - math.sin(
current_layer['rotation'] / 180 * math.pi) * horizontal_field_width / 2
relative_0 = np.array([relative_0_x, relative_0_y])
self.layers[current_layer_key]['StagePosition_corner_x'] = relative_0[0]
self.layers[current_layer_key]['StagePosition_corner_y'] = relative_0[1]
for current_tile_name in self.tiles:
current_tile = self.tiles[current_tile_name]
if current_tile['layers'] == current_layer_key:
relative_x_stepsize = np.array([self.pixel_size * math.cos(current_layer['rotation']
/ 180 * math.pi),
self.pixel_size * math.sin(current_layer['rotation']
/ 180 * math.pi)])
relative_y_stepsize = np.array([self.pixel_size * math.sin(current_layer['rotation']
/ 180 * math.pi),
self.pixel_size * math.cos(current_layer['rotation']
/ 180 * math.pi)])
# absolute_tile_pos is the position of the corner of the tile in absolute Stage Position
# coordinates, the tile_center_stage_position are the Stage Position coordinates of the tile
absolute_tile_pos = relative_0 + current_tile['RelativeTilePosition_x'] * relative_x_stepsize \
+ current_tile['RelativeTilePosition_y'] * relative_y_stepsize
tile_center_stage_position = absolute_tile_pos + self.img_width / 2 * relative_x_stepsize \
+ self.img_height / 2 * relative_y_stepsize
self._tile_center_stage_positions.append(tile_center_stage_position)
self._tile_names.append([current_layer['layer_name'], current_tile_name])
# noinspection PyTypeChecker
def find_annotation_tile(self):
"""Find the image tile in which each annotation is
Based on the absolute stage position of the annotations and the calculated stage positions of the center of the
tiles, this function calculates the tiles within which all annotation are and saves this information to the
annotation_tiles dictionary. The keys are the names of the annotation. The values are the key to the
corresponding layer in the layer dict (layer), the path to the image as a Path variable (img_path), its
filename, the name of the layer (layer_name) and the relative position x & y of the tile within that layer
(RelativeTilePosition_x & RelativeTilePosition_y), the absolute stage position of the annotation
(Annotation_StagePosition_x and Annotation_StagePosition_y), the position of the annotation within the tile
image (in pixels, Annotation_tile_img_position_x & Annotation_tile_img_position_y).
The surrounding_tile_names and surrounding_tile_exists are added in determine_surrounding_tiles
"""
# If the min distance is smaller than the diagonal distance to the edge of the image from its center,
# we have found the closest tile and the annotation is within that tile.
# If the distance is larger than the diagonal distance to the edge, the annotation is not inside of any tile
# and thus shouldn't be exported
log_file_path = str(self.project_folder_path / (self.project_folder_path.name + '.log'))
logger = self.create_logger(log_file_path)
distance_threshold = np.square(self.img_height / 2 * self.pixel_size) \
+ np.square(self.img_width / 2 * self.pixel_size)
for annotation_name in self.annotations:
a_coordinates = np.array([self.annotations[annotation_name]['StagePosition_x'],
self.annotations[annotation_name]['StagePosition_y']])
distance_map = np.square(np.array(self._tile_center_stage_positions) - a_coordinates)
quadratic_distance = distance_map[:, 0] + distance_map[:, 1]
tile_index = np.argmin(quadratic_distance)
current_tile = self.tiles[self._tile_names[tile_index][1]]
if quadratic_distance[tile_index] < distance_threshold:
self.annotation_tiles[annotation_name] = copy.deepcopy(current_tile)
self.annotation_tiles[annotation_name]['pixel_size'] = self.pixel_size
self.annotation_tiles[annotation_name]['Annotation_StagePosition_x'] = \
self.annotations[annotation_name]['StagePosition_x']
self.annotation_tiles[annotation_name]['Annotation_StagePosition_y'] = \
self.annotations[annotation_name]['StagePosition_y']
# Calculate the position of the fork within the image
distance_to_center = (np.array(self._tile_center_stage_positions[tile_index]) - a_coordinates)
# Calculation of annotation position is complicated, because of image rotation.
rotation = self.layers[self.annotation_tiles[annotation_name]['layers']]['rotation']
relative_x_stepsize = np.array([self.pixel_size * math.cos(rotation / 180 * math.pi),
self.pixel_size * math.sin(rotation / 180 * math.pi)])
relative_y_stepsize = np.array([self.pixel_size * math.sin(rotation / 180 * math.pi),
self.pixel_size * math.cos(rotation / 180 * math.pi)])
# Calculation based on the solution for the linear algebra problem Ax=b solved with Wolfram Alpha for x,
# A being the relative step_sizes, x the x & y shifts & b being the distance to center.
# This solution works for relative_y_stepsize[1] * relative_x_stepsize[0] !=
# relative_x_stepsize[1] * relative_y_stepsize[0] and relative_y_stepsize[1] != 0
# This generally seems to hold up for all rotations I have tried
try:
x_shift = (relative_y_stepsize[1] * distance_to_center[0] - distance_to_center[1] *
relative_y_stepsize[0]) / (relative_y_stepsize[1] * relative_x_stepsize[0] -
relative_x_stepsize[1] * relative_y_stepsize[0])
y_shift = (relative_x_stepsize[1] * distance_to_center[0] - distance_to_center[1] *
relative_x_stepsize[0]) / (relative_x_stepsize[1] * relative_y_stepsize[0] -
relative_y_stepsize[1] * relative_x_stepsize[0])
except ZeroDivisionError:
logger.warning('Formula for the calculation of the annotation position within the image '
'does not work for these parameters, a rotation of {} leads to divison by 0. The '
'annotation marker is placed in the middle of the image because the location '
'could not be calculated'.format(rotation))
x_shift = 0
y_shift = 0
annotation_img_position = [int(round(self.img_height / 2 - x_shift)),
int(round(self.img_width / 2 - y_shift))]
self.annotation_tiles[annotation_name]['Annotation_tile_img_position_x'] = annotation_img_position[0]
self.annotation_tiles[annotation_name]['Annotation_tile_img_position_y'] = annotation_img_position[1]
else:
logger.warning('Annotation {} is not within any of the tiles and will be ignored'
.format(annotation_name))
def determine_surrounding_tiles(self):
"""Checks whether each annotation tile has surrounding tiles to be stitched with it
For each annotation tile, it checks whether the surrounding tiles in a given stitch_radius exist. It saves a
boolean list of their existence (including its own existence) to surrounding_tile_exists and the a list of names
of the surrounding filenames to surrounding_tile_names of the annotation_tiles dictionary
"""
# Take in the center tile and determine the names & existence of stitch_radius tiles around it.
# In default stitch-radius = 1, it searches for the 8 tiles surrounding the center tile
for annotation_name in self.annotation_tiles:
center_filename = self.annotation_tiles[annotation_name]['filename']
self.annotation_tiles[annotation_name]['surrounding_tile_names'] = []
self.annotation_tiles[annotation_name]['surrounding_tile_exists'] = []
x = int(center_filename[5:8])
y = int(center_filename[9:12])
for i in range(-self.stitch_radius, self.stitch_radius + 1):
for j in range(-self.stitch_radius, self.stitch_radius + 1):
# Create the filenames
new_filename = center_filename[:5] + f'{x + i:03}' + '-' + f'{y + j:03}' + center_filename[12:]
self.annotation_tiles[annotation_name]['surrounding_tile_names'].append(new_filename)
# Check whether those files exist
img_path = self.annotation_tiles[annotation_name]['img_path'] / new_filename
if img_path.is_file():
self.annotation_tiles[annotation_name]['surrounding_tile_exists'].append(True)
else:
self.annotation_tiles[annotation_name]['surrounding_tile_exists'].append(False)
@staticmethod
def save_annotation_tiles_to_csv(annotation_tiles, base_header, csv_path, batch_size=0):
"""Saves the information about all annotations to a csv file
Goes through the annotation_tiles dictionary and saves it to a csv file. Overwrites any existing file in
the same location. Can write everything into one csv file (default) or into multiple batches of a given size
Args:
annotation_tiles (dict): annotation tiles dictionary with a structure like self.annotation_tiles
base_header (list): list of strings that will be headers but will not contain any content
csv_path (Path): pathlib Path to the csv file that will be created. Must end in .csv
batch_size (int): The size of each batch. Defaults to 0. If it's 0, everything is saved into one csv file.
Otherwise, the dataframe is divided into batches of batch_size and saved to separate csv files
Returns:
list: list of all the paths to the saved csv files
"""
assert (str(csv_path).endswith('.csv'))
csv_files = []
# Initialize empty csv file (or csv files if batch mode is used)
base_header_2 = ['Image'] + base_header
# If there are annotations, save them. Otherwise, log a warning.
if annotation_tiles:
header_addition = list(list(annotation_tiles.values())[0].keys())
if batch_size == 0:
csv_header = pd.DataFrame(columns=base_header_2 + header_addition)
csv_header.to_csv(str(csv_path), index=False)
csv_files.append(str(csv_path))
else:
nb_batches = int(math.ceil(len(annotation_tiles.keys()) / batch_size))
for j in range(nb_batches):
csv_batch_path = str(csv_path)[:-4] + '_{}.csv'.format(f'{j:05}')
csv_header = | pd.DataFrame(columns=base_header_2 + header_addition) | pandas.DataFrame |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from decimal import Decimal
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from databricks import koalas
from databricks.koalas.testing.utils import ReusedSQLTestCase
class ReshapeTest(ReusedSQLTestCase):
def test_get_dummies(self):
for data in [pd.Series([1, 1, 1, 2, 2, 1, 3, 4]),
# pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category'),
# pd.Series(pd.Categorical([1, 1, 1, 2, 2, 1, 3, 4], categories=[4, 3, 2, 1])),
pd.DataFrame({'a': [1, 2, 3, 4, 4, 3, 2, 1],
# 'b': pd.Categorical(list('abcdabcd')),
'b': list('abcdabcd')})]:
exp = pd.get_dummies(data)
ddata = koalas.from_pandas(data)
res = koalas.get_dummies(ddata)
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_object(self):
df = pd.DataFrame({'a': [1, 2, 3, 4, 4, 3, 2, 1],
# 'a': pd.Categorical([1, 2, 3, 4, 4, 3, 2, 1]),
'b': list('abcdabcd'),
# 'c': pd.Categorical(list('abcdabcd')),
'c': list('abcdabcd')})
ddf = koalas.from_pandas(df)
# Explicitly exclude object columns
exp = pd.get_dummies(df, columns=['a', 'c'])
res = koalas.get_dummies(ddf, columns=['a', 'c'])
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.b)
res = koalas.get_dummies(ddf.b)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df, columns=['b'])
res = koalas.get_dummies(ddf, columns=['b'])
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_date_datetime(self):
df = pd.DataFrame({'d': [datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
datetime.date(2019, 1, 1)],
'dt': [datetime.datetime(2019, 1, 1, 0, 0, 0),
datetime.datetime(2019, 1, 1, 0, 0, 1),
datetime.datetime(2019, 1, 1, 0, 0, 0)]})
ddf = koalas.from_pandas(df)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.d)
res = koalas.get_dummies(ddf.d)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.dt)
res = koalas.get_dummies(ddf.dt)
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_boolean(self):
df = pd.DataFrame({'b': [True, False, True]})
ddf = koalas.from_pandas(df)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = | pd.get_dummies(df.b) | pandas.get_dummies |
# Author: <NAME> <<EMAIL>>
from time import time
from datetime import datetime
import os, sys
import numpy as np
from scipy.stats.mstats import gmean
import scipy.spatial.distance as ssd
import scipy.cluster.hierarchy as hc
import pandas as pd
import pickle
import gensim
import spacy
import scispacy
from sklearn import linear_model
from sklearn.manifold import TSNE
import glob
import re
#plotting tools
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.ticker as ticker
from matplotlib import transforms
from mpl_toolkits.mplot3d import Axes3D
from wordcloud import WordCloud
from cycler import cycler
import seaborn as sns
"""
The :mod:'model_utilities' module contains many functions that are useful for processing and graphing the topic modeling results.
It also includes the SilentPrinting and Timing classes that you use with the 'with' statement. SilentPrinting stops printing to
the terminal, but is unable to silence MALLET.
"""
def plot_model_comparison(paths, x_column, y_columns, x_label, y_label, graph_title, show=True, fig_save_path=None, csv_save_path=None):
# Use this to combine multiple CompareModels csv outputs into 1 with a mean and standard devation
# Main variables
data_dict = {}
mean_sd_dict = {}
x_data = None
# Setup data_dict y_column keys
for column in y_columns:
data_dict[column] = {}
# Read each file in paths
for path in paths:
df = pd.read_csv(path)
# Setup data_dict x keys and values if not yet done
if x_data is None:
x_data = df[x_column].tolist()
for column in y_columns:
for x in x_data:
data_dict[column][x] = []
# Add file's data to list in data_dict
for column in y_columns:
data = df[column].tolist()
for x in x_data:
data_dict[column][x].append(data.pop(0))
# Calculate mean and Standard deviation for each y value
for y_column in data_dict:
mean_sd_dict[y_column] = {'X':[], 'MEAN':[], 'STDV':[]}
for x in data_dict[y_column]:
mean_sd_dict[y_column]['X'].append(x)
mean_sd_dict[y_column]['MEAN'].append(np.mean(data_dict[y_column][x]))
mean_sd_dict[y_column]['STDV'].append(np.std(data_dict[y_column][x]))
# Plot graph of x VS y with standard deviation for error bars
plt.figure(figsize=(12, 8))
for y_column in mean_sd_dict:
plt.errorbar(mean_sd_dict[y_column]['X'], mean_sd_dict[y_column]['MEAN'],
yerr=mean_sd_dict[y_column]['STDV'], label=y_column,
marker='o', markersize=5, capsize=5, markeredgewidth=1)
plt.title(graph_title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(title='Models', loc='best')
# Saving figure if fig_save_path is entered
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
# Saving a CSV file of the means and standard deviations if csv_save_path is entered
if csv_save_path is not None:
dataframe_dict= {}
for y_column in y_columns:
dataframe_dict[x_column] = mean_sd_dict[y_column]['X']
dataframe_dict[" ".join([y_column, "MEAN"])] = mean_sd_dict[y_column]['MEAN']
dataframe_dict[" ".join([y_column, "STDV"])] = mean_sd_dict[y_column]['STDV']
data = pd.DataFrame.from_dict(dataframe_dict)
data.to_csv(csv_save_path, index=False)
if show:
plt.show()
plt.close() # Closes and deletes graph to free up memory
def dominant_doc_topic_df(model, nlp_data, num_keywords=10):
topics_df = pd.DataFrame()
for i, row_list in enumerate(model[nlp_data.gensim_lda_input()]):
row = row_list[0] if model.per_word_topics else row_list
row = sorted(row, key=lambda x:(x[1]), reverse=True)
for j, (topic_num, prop_topic) in enumerate(row):
if j==0:
wp = model.show_topic(topic_num, topn=num_keywords)
topic_keywords = ", ".join([word for word, prop in wp])
topics_df = topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
topics_df.columns = ["Dominant Topic", "Contribution", "Topic Keywords"]
contents = pd.Series(nlp_data.get_token_text())
topics_df = pd.concat([topics_df, contents], axis=1)
topics_df = topics_df.reset_index()
topics_df.columns = ["Document", "Dominant Topic", "Contribution", "Topic Keywords", "Document Tokens"]
topics_df["Document"] += 1
topics_df["Dominant Topic"] = 1 + topics_df["Dominant Topic"].astype(int)
return topics_df
def best_doc_for_topic(dom_top_df):
sorted_df = pd.DataFrame()
dom_top_df_grouped = dom_top_df.groupby('Dominant Topic')
for i, grp in dom_top_df_grouped:
sorted_df = pd.concat([sorted_df, grp.sort_values(['Contribution'], ascending=False).head(1)], axis=0)
sorted_df.reset_index(drop=True, inplace=True)
sorted_df.columns = ["Best Document", "Topic Number", "Contribution", "Topic Keywords", "Document Tokens"]
sorted_df = sorted_df[["Topic Number", "Contribution", "Topic Keywords", "Best Document", "Document Tokens"]]
return sorted_df
def plot_doc_token_counts_old(dom_top_df=None, nlp_data=None, show=True, fig_save_path=None, bins=None):
if dom_top_df is not None:
doc_lens = [len(doc) for doc in dom_top_df["Document Tokens"]]
if nlp_data is not None:
doc_lens = np.array(nlp_data.sklearn_lda_input().sum(axis=1)).flatten()
fig = plt.figure(figsize=(12,7), dpi=160)
plt.hist(doc_lens, bins = 500, color='navy')
# Prints texts on the graph at x=400
x = 400
plt.text(x, 120, "Documents")
text = plt.text(x, 110, "Total Tokens")
plt.text(x, 100, "Mean")
plt.text(x, 90, "Median")
plt.text(x, 80, "Stdev")
plt.text(x, 70, "1%ile")
plt.text(x, 60, "99%ile")
#This is for offsetting the data so it will appear even
canvas = fig.canvas
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
# This prints the statistics
plt.text(x, 120, " : " + str(len(doc_lens)), transform=t)
plt.text(x, 110, " : " + str(np.sum(doc_lens)), transform=t)
plt.text(x, 100, " : " + str(round(np.mean(doc_lens), 1)), transform=t)
plt.text(x, 90, " : " + str(round(np.median(doc_lens), 1)), transform=t)
plt.text(x, 80, " : " + str(round(np.std(doc_lens),1)), transform=t)
plt.text(x, 70, " : " + str(np.quantile(doc_lens, q=0.01)), transform=t)
plt.text(x, 60, " : " + str(np.quantile(doc_lens, q=0.99)), transform=t)
plt.gca().set(xlim=(0, 500), ylabel='Number of Documents', xlabel='Document Token Count')
plt.tick_params(size=16)
plt.xticks(np.linspace(0,500,11))
plt.title('Distribution of Document Token Counts', fontdict=dict(size=22))
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def plot_doc_token_counts(dom_top_df=None, nlp_data=None, show=True, fig_save_path=None, bins=None):
if dom_top_df is not None:
doc_lens = [len(doc) for doc in dom_top_df["Document Tokens"]]
if nlp_data is not None:
doc_lens = np.array(nlp_data.sklearn_lda_input().sum(axis=1)).flatten()
if bins is None:
bins = 50 * math.ceil(max(doc_lens)/50)
if max(doc_lens) - np.quantile(doc_lens, q=0.99) < bins * 0.2:
bins += 50 * math.ceil((bins*0.25)/50)
bin_list = [i+1 for i in range(bins)]
fig = plt.figure(figsize=(12,7), dpi=160)
plt.hist(doc_lens, bins = bin_list, color='navy', rwidth=None)
# Prints texts on the graph at position x
x = 0.79
t = fig.transFigure
plt.text(x, 0.88, "Documents", transform=t)
text = plt.text(x, 0.85, "Total Tokens", transform=t)
plt.text(x, 0.82, "Mean", transform=t)
plt.text(x, 0.79, "Median", transform=t)
plt.text(x, 0.76, "Stdev", transform=t)
plt.text(x, 0.73, "1%ile", transform=t)
plt.text(x, 0.70, "99%ile", transform=t)
#This is for offsetting the data so it will appear even
canvas = fig.canvas
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
# This prints the statistics
plt.text(x, 0.88, " : " + str(len(doc_lens)), transform=t)
plt.text(x, 0.85, " : " + str(np.sum(doc_lens)), transform=t)
plt.text(x, 0.82, " : " + str(round(np.mean(doc_lens), 1)), transform=t)
plt.text(x, 0.79, " : " + str(round(np.median(doc_lens), 1)), transform=t)
plt.text(x, 0.76, " : " + str(round(np.std(doc_lens),1)), transform=t)
plt.text(x, 0.73, " : " + str(np.quantile(doc_lens, q=0.01)), transform=t)
plt.text(x, 0.70, " : " + str(np.quantile(doc_lens, q=0.99)), transform=t)
plt.gca().set(xlim=(0, bins), ylabel='Number of Documents', xlabel='Document Token Count')
plt.tick_params(size=16)
#plt.xticks(np.linspace(0,500,11))
plt.title('Distribution of Document Token Counts', fontdict=dict(size=22))
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def create_wordcloud(topic, model, nlp_data, seed=100, num_w=20, fig_dpi=400, topic_names=None,
show=True, fig_save_path=None, colormap='tab10', horizontal_pref=0.8):
cloud = WordCloud(background_color='white', width=1000, height=1000, max_words=num_w, colormap=colormap,
prefer_horizontal=horizontal_pref, random_state=seed)
topics = model.show_topics(num_topics=-1, num_words=num_w, formatted=False)
cloud.generate_from_frequencies(dict(topics[topic-1][1]), max_font_size=300)
plt.figure(figsize=(2,2), dpi=fig_dpi)
plt.imshow(cloud)
if topic_names is None:
plt.title('Topic {}'.format(topic+1), fontdict=dict(size=16), pad=10)
else:
plt.title(topic_names[topic+1], fontdict=dict(size=16), pad=10)
plt.axis('off')
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show: # It shows ugly but the actual save file looks good.
plt.show()
plt.close()
def create_multi_wordclouds(n_topics, n_horiz, model, nlp_data, seed=100, num_w=20, fig_dpi=400, topic_names=None, title_font=15,
show=True, fig_save_path=None, colormap='tab10', horizontal_pref=0.8):
if isinstance(n_topics, int):
topics_list = list(range(n_topics))
else:
topics_list = [i-1 for i in n_topics]
n_topics = len(topics_list)
cloud = WordCloud(background_color='white', width=1000, height=1000, max_words=num_w, colormap=colormap,
prefer_horizontal=horizontal_pref, random_state=seed)
topics = model.show_topics(num_topics=-1, num_words=num_w, formatted=False)
x_len = n_horiz
y_len = math.ceil(n_topics/n_horiz)
fig, axes = plt.subplots(y_len, x_len, figsize=(2*x_len,2*y_len), dpi=fig_dpi,
sharex=True, sharey=True, squeeze=False, constrained_layout=True)
for i, ax in enumerate(axes.flatten()):
if i < n_topics:
fig.add_subplot(ax)
topic_words = dict(topics[topics_list[i]][1])
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
if topic_names is None:
plt.gca().set_title('Topic {}'.format(topics_list[i]+1), fontdict=dict(size=title_font), pad=10)
else:
plt.gca().set_title(topic_names[topics_list[i]+1], fontdict=dict(size=title_font), pad=10)
plt.gca().axis('off')
else:
fig.add_subplot(ax)
plt.gca().axis('off')
#plt.suptitle('Topic Wordclouds', fontdict=dict(size=16))
plt.axis('off')
plt.margins(x=0, y=0)
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def color_doc_topics(model, doc, nlp_data, max_chars=120, dpi=150, show=True, fig_save_path=None, topics=5, min_phi=None,
topic_names=None, incl_perc=False, highlight=False, highlight_topic_names=False):
# The output file looks better than show
colors = [color for name, color in mcolors.TABLEAU_COLORS.items()]
if topics > 10: # There are only 10 colors so the max is 10. Change above to add more colors for more topics
topics = 10
# This is for the lemmetazation step
doc_prep = gensim.utils.simple_preprocess(str(doc), deacc=True, min_len=2, max_len=30)
#This is for processing the string while retaining the original characters since simple_preprocess removes punctuation and accents
#It splits the string by ' ' and then individually processes the chunks into tokens and finds there location in the string
#Finally a list is made with strings that directly translate to tokens and preserves non-token strings
doc_raw_split = str(doc).split()
doc_raw_word_list = []
raw_token_dict = {}
for string_piece in doc_raw_split:
tokens = gensim.utils.simple_preprocess(str(string_piece), deacc=True, min_len=1, max_len=30)
working_string = gensim.utils.deaccent(string_piece.lower())
output_string = string_piece
for token in tokens:
if token in working_string:
start_index = working_string.find(token)
end_index = start_index + len(token)
front_part = output_string[:start_index]
token_part = output_string[start_index:end_index]
output_string = output_string[end_index:]
working_string = working_string[end_index:]
if len(front_part) > 0:
doc_raw_word_list.append(front_part)
raw_token_dict[front_part] = False
doc_raw_word_list.append(token_part)
raw_token_dict[token_part] = token
if len(output_string) > 0: # This saves strings that do not become tokens, False prevents them from being in the wordset
doc_raw_word_list.append(output_string)
raw_token_dict[output_string] = False
# This is for finding all index locations of the tokens within the original raw string list
wordset = set([raw_token_dict[word] for word in raw_token_dict.keys() if raw_token_dict[word]])
doc_index_dict = {}
for word in wordset:
word_indexes = [i for i, w in enumerate(doc_raw_word_list) if raw_token_dict[w] == word]
doc_index_dict[word] = word_indexes
token_index_dict = {}
token_list = []
# This is for lemmitazation of the text and linking the lemma to its original token index locations
nlp = spacy.load(nlp_data.spacy_lib, disable=['parser','ner'])
allowed_postags = ['NOUN', 'ADJ', 'VERB','ADV']
for word in doc_prep:
if word not in nlp_data.stopwords:
token = nlp(word)[0]
if token.pos_ in allowed_postags and token.lemma_ not in ['-PRON-']:
token_list.append(token.lemma_)
if token.lemma_ in token_index_dict:
token_index_dict[token.lemma_] = list(set(token_index_dict[token.lemma_] + doc_index_dict[word]))
else:
token_index_dict[token.lemma_] = doc_index_dict[word]
for token in token_index_dict:
token_index_dict[token] = sorted(set(token_index_dict[token]))
# This processes the n-grams based on the model's n-gram settings and combines index locations for the n-gram
processed_tokens = nlp_data.process_ngrams_([token_list])[0]
final_token_dict = {}
for token in processed_tokens:
if token not in final_token_dict:
final_token_dict[token] = []
split_tokens = token.split('_')
for split_token in split_tokens:
final_token_dict[token].append(token_index_dict[split_token].pop(0))
# This is where the text is processed by the model and the top n models are saved
topic_perc, wordid_topics, wordid_phivalues = model.get_document_topics(
nlp_data.gensim_lda_input([" ".join(processed_tokens)])[0], per_word_topics=True,
minimum_probability=0.001, minimum_phi_value=min_phi)
topic_perc_sorted = sorted(topic_perc, key=lambda x:(x[1]), reverse=True)
top_topics = [topic[0] for i, topic in enumerate(topic_perc_sorted) if i < topics]
top_topics_color = {top_topics[i]:i for i in range(len(top_topics))}
word_dom_topic = {}
# This links the individual word lemmas to its best topic within available topics
for wd, wd_topics in wordid_topics:
for topic in wd_topics:
if topic in top_topics:
word_dom_topic[model.id2word[wd]] = topic
break
# Links the index location to a color
index_color_dict = {}
for token in final_token_dict:
if token in word_dom_topic:
for i in final_token_dict[token]:
index_color_dict[i] = top_topics_color[word_dom_topic[token]]
# this is for assembling the individual lines of the graph based on character length and position of punctuation
add_lines = math.ceil(len(top_topics_color)/5)
last_index = len(doc_raw_word_list) - 1
line_len = 0
line_num = 0
doc_raw_lines = [[]]
no_space_list = [".", ",", ")", ":", "'"]
for i, word in enumerate(doc_raw_word_list):
word_len = len(word)
if line_len + word_len < max_chars or (word in no_space_list and line_len <= max_chars):
if word == '(':
if i != last_index:
if (line_len + word_len + len(doc_raw_word_list[i+1]) + 1 >= max_chars
and doc_raw_word_list[i+1] not in no_space_list):
line_num += 1
line_len = 0
doc_raw_lines.append([])
else:
line_num += 1
line_len = 0
doc_raw_lines.append([])
line_len += word_len + 1
doc_raw_lines[line_num].append(i)
line_num += 1
# This creates the figure and subplots
lines = line_num + add_lines
fig, axes = plt.subplots(lines + 1, 1, figsize=(math.ceil(max_chars/8), math.ceil(lines/2)), dpi=dpi,
squeeze=True, constrained_layout=True)
axes[0].axis('off')
plt.axis('off')
indent = 0
# This is the loop for drawing the text
for i, ax in enumerate(axes):
t = ax.transData
canvas = ax.figure.canvas
if i > add_lines:
x = 0.06
line = i - add_lines - 1
for index in doc_raw_lines[line]:
word = doc_raw_word_list[index]
if word[-1] == "(":
pass
elif index != last_index:
if doc_raw_word_list[index+1][0] not in no_space_list:
word = word + " "
if index in index_color_dict:
color = colors[index_color_dict[index]]
else:
color = 'black'
if highlight:
bbox=dict(facecolor=color, edgecolor=[0,0,0,0], pad=0, boxstyle='round')
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color='black',
transform=t, fontweight=700)
if color != 'black':
text.set_bbox(bbox)
else:
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
ax.axis('off')
elif i < add_lines:
x = 0.06
if i == 0:
word = "Topics: "
color = 'black'
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
indent = ex.width
else:
color = 'black'
text = ax.text(x, 0.5, "", horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=indent, units='dots')
for num, index in enumerate(range(i*5, len(top_topics))):
if num < 5:
if topic_names is None:
word = "Topic {}, ".format(top_topics[index]+1)
else:
word = topic_names[top_topics[index]+1] + ", "
if incl_perc:
topic_perc_dict = dict(topic_perc_sorted)
word = "{:.1f}% ".format(topic_perc_dict[top_topics[index]]*100) + word
color = colors[top_topics_color[top_topics[index]]]
if highlight_topic_names:
bbox=dict(facecolor=color, edgecolor=[0,0,0,0], pad=0, boxstyle='round')
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color='black',
transform=t, fontweight=700)
if color != 'black':
text.set_bbox(bbox)
else:
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
ax.axis('off')
else:
ax.axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.suptitle('Document Colored by Top {} Topics'.format(topics),
fontsize=22, y=0.95, fontweight=700)
# This saves and/or shows the plot. Note: Saved file looke better than the drawn plot
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def docs_per_topic(model, nlp_data=None, doc_list=None, corpus=None):
if corpus is None:
if doc_list is None:
corpus = nlp_data.gensim_lda_input()
else:
corpus = nlp_data.process_new_corpus(doc_list)['gensim']
num_topics = model.num_topics
dominant_topics = []
topic_percantages = []
for i, corp in enumerate(corpus):
topic_perc, wordid_topics, wordidphvalues = model.get_document_topics(
corp, per_word_topics=True)
dominant_topic = sorted(topic_perc, key = lambda x: x[1], reverse=True)[0][0]
dominant_topics.append((i, dominant_topic))
topic_percantages.append(topic_perc)
df = pd.DataFrame(dominant_topics, columns=['Document', 'Dominant Topic'])
docs_by_dom_topic = df.groupby('Dominant Topic').size()
df_docs_by_dom_topic = docs_by_dom_topic.to_frame().reset_index()
df_docs_by_dom_topic.columns = ['Dominant Topic', 'Document Count']
present_topics = df_docs_by_dom_topic['Dominant Topic'].tolist()
absent_topics = [i for i in range(num_topics) if i not in present_topics]
add_rows = {'Dominant Topic':absent_topics, 'Document Count':[]}
for t in absent_topics:
add_rows['Document Count'].append(0)
if len(absent_topics) > 0:
df_add_rows = pd.DataFrame(add_rows)
df_docs_by_dom_topic = df_docs_by_dom_topic.append(df_add_rows, ignore_index=True)
df_docs_by_dom_topic.sort_values('Dominant Topic', inplace=True)
df_docs_by_dom_topic['Dominant Topic'] += 1
topic_weight_doc = pd.DataFrame([dict(t) for t in topic_percantages])
df_topic_weight_doc = topic_weight_doc.sum().to_frame().reset_index()
df_topic_weight_doc.columns = ['Topic', 'Document Weight']
present_topics = df_topic_weight_doc['Topic'].tolist()
absent_topics = [i for i in range(num_topics) if i not in present_topics]
add_rows = {'Topic':absent_topics, 'Document Weight':[]}
for t in absent_topics:
add_rows['Document Weight'].append(0.0)
if len(absent_topics) > 0:
df_add_rows = pd.DataFrame(add_rows)
df_topic_weight_doc = df_topic_weight_doc.append(df_add_rows, ignore_index=True)
df_topic_weight_doc['Topic'] += 1
df_topic_weight_doc.sort_values('Topic', inplace=True)
df_topic_weight_doc.reset_index(drop=True, inplace=True)
return df_docs_by_dom_topic, df_topic_weight_doc
def doc_topics_per_time(model, nlp_data, year_res=5, df=None, data_column=None, year_column=None, year_list=None,
year_start=None, year_end=None):
if df is not None:
data = nlp_data.process_new_corpus(df[data_column].tolist())['gensim']
year_list = df[year_column]
elif year_list is not None:
data = nlp_data.gensim_lda_input()
else:
print("No year/data given")
return None
grouped_df = pd.DataFrame(list(zip(data, year_list)), columns=['data', 'year']).groupby('year')
year_doc_dict = {}
for year, group in grouped_df:
if year_start is None:
year_doc_dict[int(year)] = group['data'].tolist()
elif year >= year_start:
year_doc_dict[int(year)] = group['data'].tolist()
years = sorted(year_doc_dict.keys())
final_year_doc_dict = {}
if year_start is None:
year_start = years[0]
if year_end is None:
year_end = years[-1]
all_years = list(range(year_start, year_end+1))
for year in all_years:
if year not in years:
final_year_doc_dict[year] = []
else:
final_year_doc_dict[year] = year_doc_dict[year]
years = sorted(final_year_doc_dict.keys())
intervals = {}
year_range = []
years_label = None
num_years = len(years)
num_intervals = math.ceil(num_years / year_res)
print("Number of years: {} \nNumber of intervals: {}".format(num_years, num_intervals))
n = year_res
for i in range(num_intervals):
index = i*n
year_range = [years[index] + num for num in range(year_res)]
if index + year_res <= num_years:
years_label = str(years[index]) + " to " + str(years[index + n - 1])
else:
years_label = str(years[index]) + " to " + str(years[-1])
intervals[years_label] = []
for year in year_range:
if year in years:
intervals[years_label].extend(final_year_doc_dict[year])
master_dict_tn = {}
master_dict_tw = {}
for key in intervals:
print("Processing {} docs from {}...".format(len(intervals[key]), key))
df_topic_num, df_topic_weights = docs_per_topic(model, corpus=intervals[key])
master_dict_tn['Topic'] = df_topic_num['Dominant Topic'].tolist()
master_dict_tn[key] = df_topic_num['Document Count'].tolist()
master_dict_tw['Topic'] = df_topic_weights['Topic'].tolist()
master_dict_tw[key] = df_topic_weights['Document Weight'].tolist()
df_doc_counts_by_year = pd.DataFrame(master_dict_tn)
df_doc_weights_by_year = pd.DataFrame(master_dict_tw)
return df_doc_counts_by_year, df_doc_weights_by_year
def plot_doc_topics_per_time(df_data, n_topics, n_horiz=5, fig_dpi=150, ylabel=None, xlabel=None, topic_names=None, show=True,
fig_save_path=None, relative_val=True, x_val=None, xtick_space=None, xmintick_space=None, hide_x_val=True,
df_data2=None, relative_val2=True, ylabel2=None, colors=['tab:blue', 'tab:orange'], linear_reg=False):
# df_data needs to be one of the outputs from doc_topics_per_time dataframes or data frame with topics in first column and labeled 'Topic'
columns = list(df_data.columns)[1:]
column_totals = df_data.loc[:,columns[0]:].sum(axis=0)
column_totals_list = list(column_totals)
graphs = {}
graphs2 = {}
if isinstance(n_topics, int):
topics_list = list(range(1, n_topics + 1))
else:
topics_list = [i for i in n_topics].sort()
for topic in topics_list:
data = df_data.loc[df_data['Topic'] == topic, columns[0]:]
data2 = None
plot2 = False
if relative_val:
data = data / column_totals_list
data.fillna(0, inplace=True)
graphs[topic] = data.values.flatten().tolist()
else:
graphs[topic] = data.values.flatten().tolist()
if df_data2 is not None:
data2 = df_data2.loc[df_data2['Topic'] == topic, columns[0]:]
plot2 = True
if relative_val2:
data2 = data2 / column_totals_list
graphs2[topic] = data2.values.flatten().tolist()
else:
graphs2[topic] = data2.values.flatten().tolist()
# Plotting
x_len = n_horiz
y_len = math.ceil(len(topics_list)/n_horiz)
if x_val is None:
x_val = list(range(1, len(columns)+1))
diff_axis = False
if not relative_val == relative_val2:
diff_axis = True
ax2_list = []
fig, axes = plt.subplots(y_len, x_len, figsize=(2*x_len, 1.5*y_len), dpi=fig_dpi,
sharex=True, sharey=True, squeeze=False, constrained_layout=True)
for i, ax in enumerate(axes.flatten()):
if i < n_topics:
ax.plot(x_val, graphs[topics_list[i]], color=colors[0])
if plot2 and diff_axis:
ax2 = ax.twinx()
ax2_list.append(ax2)
ax2_list[0].get_shared_y_axes().join(*ax2_list)
ax2.plot(x_val, graphs2[topics_list[i]], color=colors[1])
if (i + 1) % x_len > 0 and (i + 1) != len(topics_list):
ax2.set_yticklabels([])
elif plot2:
ax.plot(x_val, graphs2[topics_list[i]], color=colors[1])
if topic_names is not None:
ax.title.set_text(topic_names[i+1])
else:
ax.title.set_text('Topic {}'.format(topics_list[i]))
if xtick_space is not None: ax.xaxis.set_major_locator(ticker.MultipleLocator(xtick_space))
if xmintick_space is not None: ax.xaxis.set_minor_locator(ticker.MultipleLocator(xmintick_space))
if hide_x_val:ax.set_xticklabels([])
for label in ax.get_xticklabels():
label.set_rotation(45)
label.set_ha('right')
else:
ax.axis('off')
if plot2 and diff_axis and False:
print(len(ax2_list))
ax2_list[0].get_shared_y_axes().join(*ax2_list)
#plt.tight_layout()
if xlabel is not None:
fig.text(0.5, 0, xlabel, ha='center', va='top', fontsize=14)
if ylabel is not None:
fig.text(0, 0.5, ylabel, ha='right', va='center', fontsize=14, rotation=90)
if ylabel2 is not None and plot2 and diff_axis:
fig.text(1, 0.5, ylabel2, ha='left', va='center', fontsize=14, rotation=90)
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
if linear_reg:
x = np.array(range(len(columns))).reshape(-1, 1)
lr_dict = {
'Topic':[],
'Coefficient':[],
'R^2':[]
}
for topic in graphs:
lin_reg_mod = linear_model.LinearRegression()
lin_reg_mod.fit(x, graphs[topic])
if topic_names is not None:
lr_dict['Topic'].append(topic_names[topic])
else:
lr_dict['Topic'].append(topic)
lr_dict['Coefficient'].append(lin_reg_mod.coef_[0])
lr_dict['R^2'].append(lin_reg_mod.score(x, graphs[topic]))
df_lr = pd.DataFrame(lr_dict)
return df_lr
def graph(x, y, title=None, x_label=None, y_label=None, show=False, fig_save_path=None):
plt.figure(figsize=(4,3), dpi=300)
plt.plot(x, y, marker='.')
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(rotation=30, ha='right')
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def graph_multi(x_list, y_list, label_list, legend=None, legend_params={'loc':'best'}, title=None, x_label=None, y_label=None, show=False, fig_save_path=None):
plt.figure(figsize=(4,3), dpi=300)
for i, label in enumerate(label_list):
plt.plot(x_list[i], y_list[i], label=label, marker='.')
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(title=legend, **legend_params)
plt.xticks(rotation=30, ha='right')
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def plot_tsne_doc_cluster3d(model, nlp_data, doc_list=None, corpus=None, min_tw=None, marker_size=1, seed=2020,
show_topics=False, custom_titles=None, show=True, fig_save_path=None):
if corpus is None:
if doc_list is None:
corpus = nlp_data.gensim_lda_input()
else:
corpus = nlp_data.process_new_corpus(doc_list)['gensim']
n_topics = model.num_topics
topic_weights= {}
for i in range(n_topics):
topic_weights[i] = []
for i, row_list in enumerate(model.get_document_topics(corpus)):
temp_dict = {t:w for t, w in row_list}
for topic in range(n_topics):
if topic in temp_dict:
topic_weights[topic].append(temp_dict[topic])
else:
topic_weights[topic].append(0)
arr = | pd.DataFrame(topic_weights) | pandas.DataFrame |
from pycorda import Node
from datetime import datetime
import matplotlib
from matplotlib import pyplot
import pandas as pd
import chart_studio, chart_studio.plotly as py, plotly.graph_objs as go
from sklearn import linear_model as lm
# Format for timestamp string is YYYY-MM-DD HH:MM:SS.FFF
def plot_time_series(timestamp_column, title=None):
"""Plots time series for a given sequence of timestamps
Parameters
----------
timestamp_column : iterable object
iterable of timestamp strings in the %Y-%m-%d %H:%M:%S.%f format
title : str, optional
figure title
"""
dt_list = [datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f') for timestamp in timestamp_column]
dates = matplotlib.dates.date2num(dt_list)
fig, ax = pyplot.subplots()
if title is not None:
ax.set_title(title)
ax.plot_date(dates, [0]*len(dates))
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M:%S.%f')
fig.autofmt_xdate()
def plot_ids(ids, fontsize, title=None):
"""Plots IDs as labelled equally spaced points
Parameters
----------
ids : iterable object
iterable of ID strings
fontsize : int
font size of point labels
title : str, optional
figure title
"""
sorted_ids = sorted(ids)
n = len(ids)
points = range(n)
fig, ax = pyplot.subplots()
if title is not None:
ax.set_title(title)
ax.scatter(points, [0]*n)
for i, txt in enumerate(sorted_ids):
ax.annotate(txt, (points[i], 0.001), ha='center', fontsize=fontsize)
ax.set_xlim(-0.5, min(5, n))
class Plotter(object):
"""Plotter object for plotting data obtained from a database node
tbname_ts methods will plot time series for table TBNAME. After choosing which plots
to create by calling the relevant methods, use the show method to
display the plots.
"""
def __init__(self, node):
"""
Parameters
----------
node: pycorda.Node
node used to gather data for display
"""
self.node = node
def publish_timeseries_fungible_qty_plotly(self, contract, user,api_key):
chart_studio.tools.set_credentials_file(username=user,api_key=api_key)
vault_states = self.node.get_vault_states()
vault_states = vault_states[vault_states.CONTRACT_STATE_CLASS_NAME==contract]
vault_fungible_states = self.node.get_vault_fungible_states()
df = vault_states.merge(vault_fungible_states)[['RECORDED_TIMESTAMP','QUANTITY']]
df['RECORDED_TIMESTAMP'] = | pd.to_datetime(df['RECORDED_TIMESTAMP']) | pandas.to_datetime |
"""
This function makes the Figures associated with the paper
for Dune Aspect Ratio.
Figures 1 and 6 were made in Illustrator!
<NAME>, 5/6/2020
"""
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import statsmodels.api as sm
import datetime as dt
import pandas as pd
import numpy as np
import os
# Set general variables
stretches = [-60, -40, -20, 1, 20, 40, 60]
stretch_labels = ['1.6', '1.4', '1.2', '1.0', '0.8', '0.6', '0.4']
experiments = ['Toes Aligned', 'Crests Aligned', 'Heels Aligned', 'Fenced']
# Setup the figure style
font = {
'fontname': 'Arial',
'fontweight': 'normal',
'fontsize': 14}
figure_dpi = 300
figure_cm = 8
figure_inches = figure_cm * 0.393701
figure_size = (figure_inches, figure_inches)
line_width = 0.75
edge_color = 'black'
stretch_cmap = plt.cm.cividis_r(np.linspace(0, 1, len(stretches)))
duration_cmap = plt.cm.viridis_r(np.linspace(0, 1, len([1, 12, 18, 24, 36, 48])))
# Set paths
FIGURE_DIR = os.path.join('..', 'Figures')
DATA_DIR = os.path.join('..', 'Data')
"""
Functions for Machine Learning
"""
def run_grid_search_tree(X, y, cv=5):
"""
Run a grid search with cross-validation
and print out the results. This is for
a Decision Tree regression for dune volume
change based on initial beach width and
aspect ratio. Return the best model
"""
# Set a dictionary of parameters to check
params = {
'clf__max_depth': range(1, 33),
'clf__min_samples_split': np.arange(start=0.1, stop=0.6, step=0.1),
'clf__min_samples_leaf': np.arange(start=0.1, stop=0.6, step=0.1),
}
# Scale the data
pipe = Pipeline([
('scale', StandardScaler()),
('clf', RandomForestRegressor(random_state=0))])
# Setup the grid search
clf = GridSearchCV(estimator=pipe,
param_grid=params,
n_jobs=-1,
refit=True,
cv=cv,
verbose=True)
# Run the grid search
clf.fit(X, y)
return clf
def run_grid_search_svr(X, y, cv=5):
"""
Run a grid search with cross-validation
and print out the results. This is for
a support vector regression for dune volume
change based on initial beach width and
aspect ratio. Return the best model
"""
# Set a dictionary of parameters to check
params = {
'clf__C': np.logspace(-6, 3, 10),
'clf__epsilon': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],
'clf__kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
'clf__degree': [1, 2, 3, 4]
}
# Scale the data
pipe = Pipeline([
('scale', StandardScaler()),
('clf', SVR(kernel='linear', C=1))])
# Setup the grid search
clf = GridSearchCV(estimator=pipe,
param_grid=params,
n_jobs=-1,
refit=True,
cv=cv,
verbose=True)
# Run the grid search
clf.fit(X, y)
return clf
"""
Functions to load and format data
"""
def load_field_data(year):
"""
Load morphometrics measured from field data
for Bogue Banks for the given year
"""
# Set a path to the data
fname = os.path.join(DATA_DIR, f'Morphometrics for Bogue Banks {year}.csv')
# Load the data into a dataframe
df = pd.read_csv(fname, delimiter=',', header=0)
# Add a column for the dune shape
df['Ratio'] = (df['yCrest'] - df['yToe']) / (df['xToe'] - df['xHeel'])
return df
def load_volume_loss(experiment, backup=False):
"""
Calculate the volume loss for XBeach runs as a magnitude and proportion
"""
# Set the stretches
dune_stretches = [-60, -40, -20, 1, 20, 40, 60]
storm_stretches = [1, 12, 18, 24, 36, 48]
# Set empty lists
d_volume, d_volume_proportion, beach_width, beach_slope, o_volume = [], [], [], [], []
# Set an empty dict
volume_time = dict()
# Loop through the stretches
for dune in dune_stretches:
for weather in storm_stretches:
# Load the data
data_fname = os.path.join(DATA_DIR,
'XBeach Morphometrics',
experiment,
f'Dune Complexity {dune} {weather} Morphometrics.csv')
data = pd.read_csv(data_fname)
# Pull out the overwash volume column before removing NaNs
overwash = data['Overwash Volume'].dropna()
# Remove NaNs
data = | pd.read_csv(data_fname) | pandas.read_csv |
"""Sky brightnes approzimation using Zernike polynomials
The form and notation used here follow:
<NAME>., <NAME>., <NAME>., <NAME>. & VSIA
Standards Taskforce Members. Vision science and its
applications. Standards for reporting the optical aberrations of
eyes. J Refract Surg 18, S652-660 (2002).
"""
# imports
from math import factorial
import logging
import os
import warnings
from glob import glob
from functools import lru_cache
import numpy as np
import pandas as pd
from numexpr import NumExpr
from sklearn.linear_model import LinearRegression
import scipy.optimize
from scipy.interpolate import interp1d
import palpy
import healpy
import rubin_sim.utils as utils
from rubin_sim.data import get_data_dir
# constants
logging.basicConfig(format="%(asctime)s %(message)s")
LOGGER = logging.getLogger(__name__)
TELESCOPE = utils.Site("LSST")
SIDEREAL_TIME_SAMPLES_RAD = np.radians(np.arange(361, dtype=float))
BANDS = ("u", "g", "r", "i", "z", "y")
# exception classes
# interface functions
def fit_pre(npy_fname, npz_fname, *args, **kwargs):
"""Fit Zernike coefficients to a pre-computed data set
Parameters
----------
npy_fname : `str`
File name of the SkyBrightessPre <MJD>_<MDJ>.npy file
npz_fname : `str`
File name of the SkyBrightessPre <MJD>_<MDJ>.npz file
other arguments are passed to the ZernikeSky constructor.
Returns
-------
zernike_coeffs : `pd.DataFrame`
A DataFrame with the coefficients, indexed by band and mjd.
"""
# Load the pre-computed data
npz = np.load(npz_fname, allow_pickle=True)
npz_hdr = npz["header"][()]
npz_data = npz["dict_of_lists"][()]
pre_sky = np.load(npy_fname, allow_pickle=True)
mjds = npz_data["mjds"]
alt = npz_hdr["alt"]
az = npz_hdr["az"]
zernike_coeffs_by_band = []
zernike_sky = ZernikeSky(*args, **kwargs)
for band in pre_sky.dtype.fields.keys():
LOGGER.info("Starting %s band", band)
zernike_coeff_arrays = []
for mjd_idx, mjd in enumerate(mjds):
zernike_coeff_arrays.append(
zernike_sky.fit_coeffs(alt, az, pre_sky[band][mjd_idx], mjd)
)
if mjd_idx % 1000 == 0:
msg = f"Finished {mjd_idx*100.0/float(len(mjds)):.2f}%"
LOGGER.debug(msg)
zernike_coeffs_by_band.append(
pd.DataFrame(
zernike_coeff_arrays,
columns=np.arange(len(zernike_coeff_arrays[0])),
index=pd.MultiIndex.from_arrays(
[np.full_like(mjds, band, dtype=type(band)), mjds],
names=["band", "mjd"],
),
)
)
zernike_coeffs = pd.concat(zernike_coeffs_by_band)
return zernike_coeffs
def bulk_zernike_fit(data_dir, out_fname, *args, **kwargs):
"""Fit Zernike coeffs to all SkyBrightnessPre files in a directory.
Parameters
----------
data_dir : `str`
Name of the directory in which to look for SkyBrightnessPre
data files.
out_fname: `str`
Name of the file in which to save fit coefficients.
other arguments are passed to the ZernikeSky constructor.
Returns
-------
zernike_coeffs : `pd.DataFrame`
A DataFrame with the coefficients, indexed by band and mjd.
"""
zernike_coeff_batches = []
for npz_fname in glob(os.path.join(data_dir, "?????_?????.npz")):
LOGGER.info("Processing %s", npz_fname)
npy_fname = os.path.splitext(npz_fname)[0] + ".npy"
zernike_coeff_batch = fit_pre(npy_fname, npz_fname, *args, **kwargs)
zernike_coeff_batches.append(zernike_coeff_batch)
zernike_coeffs = pd.concat(zernike_coeff_batches)
zernike_coeffs.sort_index(level="mjd", inplace=True)
if out_fname is not None:
zernike_coeffs.to_hdf(out_fname, "zernike_coeffs", complevel=6)
zernike_sky = ZernikeSky(*args, **kwargs)
zernike_metadata = pd.Series(
{"order": zernike_sky.order, "max_zd": zernike_sky.max_zd}
)
zernike_metadata.to_hdf(out_fname, "zernike_metadata")
return zernike_coeffs
# classes
class ZernikeSky:
"""Zernike sky approximator.
Parameters
----------
order : `int`, optional
The order of the Zernike polynomial to use. Default is 6.
nside : `int`, optional
The nside of the healpix array to pre-compute Zernike Z terms for.
Default is 32.
max_zd : `float`, optional
The maximum zenith distance, in degrees. This value will correspond
to rho=1 in the Thibos et al. (2002) notation.
Default is 67.
dtype : `type`: optional
The numpy type to use for all calculations. Default is `np.float64`.
"""
def __init__(self, order=6, nside=32, max_zd=67, dtype=np.float64):
self.order = order
self.dtype = dtype
self.nside = nside
# Sets the value of zd where rho (radial coordinate of the
# unit disk in which Zernike polynomials are orthogonal) = 1
self.max_zd = max_zd
# a list of functions to calculate big Z given rho, phi,
# following eqn 1 of Thibos et al. (2002). The jth element of
# the list returns the jth Z, following the indexing
# convertions of Thibos et al. eqn 4.
#
# Should switch to using functools.cached_property in python 3.8
self._z_function = self._build_z_functions()
# A function that calculates the full Zernike approximation,
# taking rho and phi as arguments.
#
# numexpr can only compile functions with a limited number of
# arguments. If the order is too high, sum the terms
# separately
if order <= 7:
self._zern_function = self._build_zern_function()
else:
self._zern_function = self._compute_sky_by_sum
# big Z values for all m,n at all rho, phi in the
# pre-defined healpix coordinate, following eqn 1 of Thibos et
# al. (2002) The array returned should be indexed with j,
# Should switch to using functools.cached_property in python 3.8
self.healpix_z = self._compute_healpix_z()
self._interpolate_healpix_z = interp1d(
SIDEREAL_TIME_SAMPLES_RAD, self.healpix_z, axis=0, kind="nearest"
)
# A pd.DataFrame of zernike coeffs, indexed by mjd, providing the
# Zernike polynomial coefficients for the approximation of the
# sky at that time. That is, self._coeffs[5, 3] is the
# j=3 coefficient of the approximation of the sky at
# mjd=self.mjds[5], where j is defined as in Thibos et al. eqn 4.
self._coeffs = pd.DataFrame()
def load_coeffs(self, fname, band):
"""Load Zernike coefficients from a file.
Parameters
----------
fname : `str`
The file name of the hdf5 file with the Zernike coeffs.
band : `str`
The band to load.
"""
zernike_metadata = pd.read_hdf(fname, "zernike_metadata")
assert self.order == zernike_metadata["order"]
assert self.max_zd == zernike_metadata["max_zd"]
all_zernike_coeffs = pd.read_hdf(fname, "zernike_coeffs")
self._coeffs = all_zernike_coeffs.loc[band]
self._coeff_calc_func = interp1d(
self._coeffs.index.values, self._coeffs.values, axis=0
)
def compute_sky(self, alt, az, mjd=None):
"""Estimate sky values
Parameters
----------
alt : `np.ndarray`, (N)
An array of altitudes above the horizon, in degrees
az : `np.ndarray`, (N)
An array of azimuth coordinates, in degrees
mjd : `float`
The time (floating point MJD) at which to estimate the sky.
Returns
-------
`np.ndarray` (N) of sky brightnesses (mags/asec^2)
"""
rho = self._calc_rho(alt)
phi = self._calc_phi(az)
result = self._zern_function(rho, phi, *tuple(self.coeffs(mjd)))
return result
def _compute_sky_by_sum(self, rho, phi, *coeffs):
z = self._compute_z(rho, phi)
if len(z.shape) == 2:
result = np.sum(np.array(coeffs) * z, axis=1)
else:
result = np.sum(np.array(coeffs) * z)
return result
def compute_healpix(self, hpix, mjd=None):
"""Estimate sky values
Parameters
----------
hpix : `int`, (N)
Array of healpix indexes of the desired coordinates.
mjd : `float`
The time (floating point MJD) at which to estimate the sky.
Returns
-------
`np.ndarray` (N) of sky brightnesses (mags/asec^2)
"""
interpolate_healpix_z = self._interpolate_healpix_z
gmst = palpy.gmst(mjd)
mjd_healpix_z = interpolate_healpix_z(gmst)
# mjd_healpix_z = self.healpix_z[int(np.degrees(gmst))]
if hpix is None:
result = np.sum(self.coeffs(mjd) * mjd_healpix_z, axis=1)
else:
result = np.sum(self.coeffs(mjd) * mjd_healpix_z[hpix], axis=1)
return result
def coeffs(self, mjd):
"""Zerinke coefficients at a time
Parameters
----------
mjd : `float`
The time (floating point MJD) at which to estimate the sky.
Returns
-------
`np.ndarray` of Zernike coefficients following the OSA/ANSI
indexing convention described in Thibos et al. (2002).
"""
if len(self._coeffs) == 1:
these_coeffs = self._coeffs.loc[mjd]
else:
calc_these_coeffs = self._coeff_calc_func
these_coeffs = calc_these_coeffs(mjd)
return these_coeffs
def fit_coeffs(self, alt, az, sky, mjd, min_moon_sep=10, maxdiff=False):
"""Fit Zernike coefficients to a set of points
Parameters
----------
alt : `np.ndarray`, (N)
An array of altitudes above the horizon, in degrees
az : `np.ndarray`, (N)
An array of azimuth coordinates, in degrees
sky : `np.ndarray`, (N)
An array of sky brightness values (mags/asec^2)
mjd : `float`
The time (floating point MJD) at which to estimate the sky.
maxdiff : `bool`
Minimize the maximum difference between the estimate and data,
rather than the default RMS.
"""
# Do not fit too close to the moon
alt_rad, az_rad = np.radians(alt), np.radians(az)
gmst_rad = palpy.gmst(mjd)
lst_rad = gmst_rad + TELESCOPE.longitude_rad
moon_ra_rad, moon_decl_rad, moon_diam = palpy.rdplan(
mjd, 3, TELESCOPE.longitude_rad, TELESCOPE.latitude_rad
)
moon_ha_rad = lst_rad - moon_ra_rad
moon_az_rad, moon_el_rad = palpy.de2h(
moon_ha_rad, moon_decl_rad, TELESCOPE.latitude_rad
)
moon_sep_rad = palpy.dsepVector(
np.full_like(az_rad, moon_az_rad),
np.full_like(alt_rad, moon_el_rad),
az_rad,
alt_rad,
)
moon_sep = np.degrees(moon_sep_rad)
rho = self._calc_rho(alt)
phi = self._calc_phi(az)
good_points = np.logical_and(rho <= 1.0, moon_sep > min_moon_sep)
rho = rho[good_points]
phi = phi[good_points]
sky = sky[good_points]
alt = alt[good_points]
az = az[good_points]
num_points = len(alt)
assert len(az) == num_points
assert len(sky) == num_points
z = np.zeros((num_points, self._number_of_terms), dtype=self.dtype)
for j in np.arange(self._number_of_terms):
compute_z = self._z_function[j]
z[:, j] = compute_z(rho, phi)
# If the points being fit were evenly distributed across the sky,
# we might be able to get away with a multiplication rather than
# a linear regression, but we might be asked to fit masked data
zern_fit = LinearRegression(fit_intercept=False).fit(z, sky)
fit_coeffs = zern_fit.coef_
if maxdiff:
def max_abs_diff(test_coeffs):
max_resid = np.max(
np.abs(np.sum(test_coeffs * z, axis=1) - sky)
)
return max_resid
min_fit = scipy.optimize.minimize(max_abs_diff, fit_coeffs)
fit_coeffs = min_fit.x
self._coeffs = pd.DataFrame(
[fit_coeffs],
columns=np.arange(len(fit_coeffs)),
index=pd.Index([mjd], name="mjd"),
)
return fit_coeffs
def _compute_healpix_z(self):
# Compute big Z values for all m,n at all rho, phi in the
# pre-defined healpix coordinate, following eqn 1 of Thibos et
# al. (2002) The array returned should be indexed with j,
# following the conventions of eqn 4.
sphere_npix = healpy.nside2npix(self.nside)
sphere_ipix = np.arange(sphere_npix)
ra, decl = healpy.pix2ang(self.nside, sphere_ipix, lonlat=True)
num_st = len(SIDEREAL_TIME_SAMPLES_RAD)
healpix_z = np.full(
[num_st, sphere_npix, self._number_of_terms], np.nan
)
for st_idx, gmst_rad in enumerate(SIDEREAL_TIME_SAMPLES_RAD):
lst_rad = gmst_rad + TELESCOPE.longitude_rad
ha_rad = lst_rad - np.radians(ra)
az_rad, alt_rad = palpy.de2hVector(
ha_rad, np.radians(decl), TELESCOPE.latitude_rad
)
sphere_az, sphere_alt = np.degrees(az_rad), np.degrees(alt_rad)
# We only need the half sphere above the horizen
visible_ipix = sphere_ipix[sphere_alt > 0]
alt, az = sphere_alt[visible_ipix], sphere_az[visible_ipix]
rho = self._calc_rho(alt)
phi = self._calc_phi(az)
healpix_z[st_idx, visible_ipix] = self._compute_z(rho, phi)
return healpix_z
def _compute_horizan_healpix_z(self):
# Compute big Z values for all m,n at all rho, phi in the
# pre-defined healpix coordinate, following eqn 1 of Thibos et
# al. (2002) The array returned should be indexed with j,
# following the conventions of eqn 4.
sphere_npix = healpy.nside2npix(self.nside)
sphere_ipix = np.arange(sphere_npix)
sphere_az, sphere_alt = healpy.pix2ang(
self.nside, sphere_ipix, lonlat=True
)
# We only need the half sphere above the horizen
ipix = sphere_ipix[sphere_alt > 0]
alt, phi_deg = sphere_alt[ipix], sphere_az[ipix]
rho = self._calc_rho(alt)
rho, phi = (90.0 - alt) / self.max_zd, np.radians(phi_deg)
healpix_z = self._compute_z(rho, phi)
return healpix_z
def _compute_z(self, rho, phi):
# Compute big Z values for all m,n at rho, phi
# following eqn 1 of Thibos et al. (2002)
# The array returned should be indexed with j,
# following the conventions of eqn 4.
try:
npix = len(rho)
z = np.zeros((npix, self._number_of_terms), dtype=self.dtype)
for j in np.arange(self._number_of_terms):
compute_z = self._z_function[j]
z[:, j] = compute_z(rho, phi)
except TypeError:
z = np.zeros(self._number_of_terms, dtype=self.dtype)
for j in np.arange(self._number_of_terms):
compute_z = self._z_function[j]
z[j] = compute_z(rho, phi)
return z
def _build_z_functions(self):
z_functions = []
for j in np.arange(self._number_of_terms):
z_functions.append(self._make_z_function(j))
return z_functions
def _build_zern_function(self):
coeffs = [f"c{j}" for j in np.arange(self._number_of_terms)]
expression = ""
for j, coeff in enumerate(coeffs):
zern_z_expr = self._make_z_expression(j)
if zern_z_expr == "(1)":
term = f"{coeff}"
else:
term = f"{coeff}*({zern_z_expr})"
if expression == "":
expression = term
else:
expression += f" + {term}"
arg_types = []
if expression.find("rho") >= 0:
arg_types.append(("rho", self.dtype),)
if expression.find("phi") >= 0:
arg_types.append(("phi", self.dtype),)
for coeff in coeffs:
arg_types.append((coeff, self.dtype),)
arg_types = tuple(arg_types)
zern_function = NumExpr(expression, arg_types)
return zern_function
@property
def _number_of_terms(self):
n_terms = np.sum(np.arange(self.order) + 1)
return n_terms
def _make_r_expression(self, m, n):
if (n - m) % 2 == 1:
return 0
assert n >= m
assert m >= 0
m = int(m)
n = int(n)
num_terms = 1 + (n - m) // 2
expression = "("
for k in range(num_terms):
# From eqn 2 of Thibos et al. (2002)
coeff = (((-1) ** k) * factorial(n - k)) / (
factorial(k)
* factorial(int((n + m) / 2 - k))
* factorial(int((n - m) / 2 - k))
)
assert coeff == int(coeff)
coeff = int(coeff)
power = n - 2 * k
if len(expression) > 1:
expression += " + "
if power == 0:
expression += f"{coeff}"
elif power == 1:
expression += f"{coeff}*rho"
else:
expression += f"{coeff}*rho**{power}"
expression += ")"
return expression
def _make_z_expression(self, j=None, mprime=None, n=None):
if j is None:
assert mprime is not None
assert n is not None
else:
assert mprime is None
assert n is None
# From eqn 5 in Thibos et al. (2002)
n = np.ceil((-3 + np.sqrt(9 + 8 * j)) / 2).astype(int)
# From eqn 6 in Thibos et al. (2002)
mprime = 2 * j - n * (n + 2)
m = np.abs(mprime)
r = self._make_r_expression(m, n)
# From eqn. 3 of Thibos et al. 2002, again
delta = 1 if m == 0 else 0
big_nsq = 2 * (n + 1) / (1 + delta)
assert int(big_nsq) == big_nsq
big_nsq = int(big_nsq)
if mprime == 0:
expression = f"sqrt({big_nsq})*{r}"
elif mprime > 0:
expression = f"sqrt({big_nsq})*{r}*cos({m}*phi)"
elif mprime < 0:
expression = f"sqrt({big_nsq})*{r}*sin({m}*phi)"
else:
assert False
return expression
def _make_z_function(self, j=None, mprime=None, n=None):
expression = self._make_z_expression(j, mprime, n)
arg_types = []
if expression.find("rho") >= 0:
arg_types.append(("rho", self.dtype),)
if expression.find("phi") >= 0:
arg_types.append(("phi", self.dtype),)
arg_types = tuple(arg_types)
raw_z_function = NumExpr(expression, arg_types)
# Create functions with dummy arguments so that
# terms that do not require both phi and rho can
# still accept them, such that all z_functions
# can be called in the same way.
if len(arg_types) == 0:
def z_function(rho=None, phi=None):
return raw_z_function()
elif len(arg_types) == 1:
def z_function(rho, phi=None):
return raw_z_function(rho)
else:
z_function = raw_z_function
return z_function
def _calc_rho(self, alt):
zd = 90.0 - alt
if np.isscalar(alt) and zd > self.max_zd:
return np.nan
rho = zd / self.max_zd
if not np.isscalar(alt):
rho[zd > self.max_zd] = np.nan
return rho
def _calc_phi(self, az):
phi = np.radians(az)
return phi
class SkyBrightnessPreData:
"""Manager for raw pre-computed sky brightness data
Parameters
----------
base_fname : `str`
Base name for data files to load.
bands: `List` [`str`]
Name of bands to read.
pre_data_dir : `str`
Name of source directory for pre-computed sky brightness data.
max_num_mjds : `int`
If there are more than this number of MJDs in the requested
data files, sample this many out of the total.
"""
def __init__(
self, fname_base, bands, pre_data_dir=None, max_num_mjds=None
):
if pre_data_dir is None:
try:
self.pre_data_dir = os.environ["SIMS_SKYBRIGHTNESS_DATA"]
except KeyError:
self.pre_data_dir = "."
else:
self.pre_data_dir = pre_data_dir
self.fname_base = fname_base
self.max_num_mjds = max_num_mjds
self.times = None
self.sky = None
self.metadata = {}
self.load(fname_base, bands)
def load(self, fname_base, bands="ugrizy"):
"""Load pre-computed sky values.
Parameters
----------
base_fname : `str`
Base name for data files to load.
bands: `List` [`str`]
Name of bands to read.
"""
npz_fname = os.path.join(self.pre_data_dir, fname_base + "." + "npz")
npy_fname = os.path.join(self.pre_data_dir, fname_base + "." + "npy")
npz = np.load(npz_fname, allow_pickle=True)
npz_hdr = npz["header"][()]
npz_data = npz["dict_of_lists"][()]
pre_sky = np.load(npy_fname, allow_pickle=True)
alt = npz_hdr["alt"]
az = npz_hdr["az"]
alt_rad, az_rad = np.radians(alt), np.radians(az)
self.metadata = npz_hdr
self.times = pd.DataFrame(
{
k: npz_data[k]
for k in npz_data.keys()
if npz_data[k].shape == npz_data["mjds"].shape
}
)
read_mjds = len(self.times)
if self.max_num_mjds is not None:
read_mjd_idxs = pd.Series(np.arange(read_mjds))
mjd_idxs = read_mjd_idxs.sample(self.max_num_mjds)
else:
mjd_idxs = np.arange(read_mjds)
skies = []
for mjd_idx in mjd_idxs:
mjd = npz_data["mjds"][mjd_idx]
gmst_rad = palpy.gmst(mjd)
lst_rad = gmst_rad + TELESCOPE.longitude_rad
ha_rad, decl_rad = palpy.dh2eVector(
az_rad, alt_rad, TELESCOPE.latitude_rad
)
ra_rad = (lst_rad - ha_rad) % (2 * np.pi)
moon_ra_rad = npz_data["moonRAs"][mjd_idx]
moon_decl_rad = npz_data["moonDecs"][mjd_idx]
moon_ha_rad = lst_rad - moon_ra_rad
moon_az_rad, moon_el_rad = palpy.de2h(
moon_ha_rad, moon_decl_rad, TELESCOPE.latitude_rad
)
moon_sep = palpy.dsepVector(
np.full_like(az_rad, moon_az_rad),
np.full_like(alt_rad, moon_el_rad),
az_rad,
alt_rad,
)
for band in bands:
skies.append(
pd.DataFrame(
{
"band": band,
"mjd": npz_data["mjds"][mjd_idx],
"gmst": np.degrees(gmst_rad),
"lst": np.degrees(lst_rad),
"alt": alt,
"az": az,
"ra": np.degrees(ra_rad),
"decl": np.degrees(decl_rad),
"moon_ra": np.degrees(
npz_data["moonRAs"][mjd_idx]
),
"moon_decl": np.degrees(
npz_data["moonDecs"][mjd_idx]
),
"moon_alt": np.degrees(
npz_data["moonAlts"][mjd_idx]
),
"moon_az": np.degrees(moon_az_rad),
"moon_sep": np.degrees(moon_sep),
"sun_ra": np.degrees(npz_data["sunRAs"][mjd_idx]),
"sun_decl": np.degrees(
npz_data["sunDecs"][mjd_idx]
),
"sun_alt": np.degrees(
npz_data["sunAlts"][mjd_idx]
),
"sky": pre_sky[band][mjd_idx],
}
)
)
self.sky = | pd.concat(skies) | pandas.concat |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = | tm.box_expected(rng, box) | pandas.util.testing.box_expected |
"""
postprocess_flow_forecasts.py
Author: <NAME>
Copyright March 2020
License: BSD 3 Clause
Updated: June 2020
Identifies flows forecasted to experience a return period level flow on streams from a preprocessed list of stream
COMID's in each region
"""
import datetime
import glob
import logging
import os
import sys
import numpy as np
import pandas as pd
import xarray
import netCDF4 as nc
# todo make process_region into process_date which gets called by process_region, then you can pick whether to aggregate all days or none
def merge_forecast_qout_files(rapidio_region_output):
# pick the most recent date, append to the file path
recent_date = sorted(os.listdir(rapidio_region_output))
while recent_date[-1].endswith('.csv'):
recent_date.remove(recent_date[-1])
recent_date = recent_date[-1]
qout_folder = os.path.join(rapidio_region_output, recent_date)
# list the forecast files
prediction_files = sorted(glob.glob(os.path.join(qout_folder, 'Qout*.nc')))
# merge them into a single file joined by ensemble number
ensemble_index_list = []
qout_datasets = []
for forecast_nc in prediction_files:
ensemble_index_list.append(int(os.path.basename(forecast_nc)[:-3].split("_")[-1]))
qout_datasets.append(xarray.open_dataset(forecast_nc).Qout)
return xarray.concat(qout_datasets, pd.Index(ensemble_index_list, name='ensemble')), qout_folder
def check_for_return_period_flow(largeflows_df, forecasted_flows_df, stream_order, rp_data):
max_flow = max(forecasted_flows_df['means'])
# temporary dates
date_r5 = ''
date_r10 = ''
date_r25 = ''
date_r50 = ''
date_r100 = ''
# retrieve return period flow levels from dataframe
r2 = float(rp_data['return_period_2'].values[0])
r5 = float(rp_data['return_period_5'].values[0])
r10 = float(rp_data['return_period_10'].values[0])
r25 = float(rp_data['return_period_25'].values[0])
r50 = float(rp_data['return_period_50'].values[0])
r100 = float(rp_data['return_period_100'].values[0])
# then compare the timeseries to the return period thresholds
if max_flow >= r2:
date_r2 = get_time_of_first_exceedence(forecasted_flows_df, r2)
# if the flow is not larger than the smallest return period, return the dataframe without appending anything
else:
return largeflows_df
# check the rest of the return period flow levels
if max_flow >= r5:
date_r5 = get_time_of_first_exceedence(forecasted_flows_df, r5)
if max_flow >= r10:
date_r10 = get_time_of_first_exceedence(forecasted_flows_df, r10)
if max_flow >= r25:
date_r25 = get_time_of_first_exceedence(forecasted_flows_df, r25)
if max_flow >= r50:
date_r50 = get_time_of_first_exceedence(forecasted_flows_df, r50)
if max_flow >= r100:
date_r100 = get_time_of_first_exceedence(forecasted_flows_df, r100)
try:
lat = float(rp_data['lat'].values)
lon = float(rp_data['lon'].values)
except:
lat = ''
lon = ''
return largeflows_df.append({
'comid': rp_data.index[0],
'stream_order': stream_order,
'stream_lat': lat,
'stream_lon': lon,
'max_forecasted_flow': round(max_flow, 2),
'date_exceeds_return_period_2': date_r2,
'date_exceeds_return_period_5': date_r5,
'date_exceeds_return_period_10': date_r10,
'date_exceeds_return_period_25': date_r25,
'date_exceeds_return_period_50': date_r50,
'date_exceeds_return_period_100': date_r100,
}, ignore_index=True)
def get_time_of_first_exceedence(forecasted_flows_df, flow):
# replace the flows that are too small (don't exceed the return period)
forecasted_flows_df[forecasted_flows_df.means < flow] = np.nan
daily_flows = forecasted_flows_df.dropna()
return daily_flows['times'].values[0]
def postprocess_region(region, rapidio, historical_sim, forecast_records):
# build the propert directory paths
rapidio_region_input = os.path.join(rapidio, 'input', region)
rapidio_region_output = os.path.join(rapidio, 'output', region)
# make the pandas dataframe to store the summary info
largeflows = pd.DataFrame(columns=[
'comid', 'stream_order', 'stream_lat', 'stream_lon', 'max_forecasted_flow', 'date_exceeds_return_period_2',
'date_exceeds_return_period_5', 'date_exceeds_return_period_10', 'date_exceeds_return_period_25',
'date_exceeds_return_period_50', 'date_exceeds_return_period_100'])
# merge the most recent forecast files into a single xarray dataset
logging.info(' merging forecasts')
merged_forecasts, qout_folder = merge_forecast_qout_files(rapidio_region_output)
# collect the times and comids from the forecasts
logging.info(' reading info from forecasts')
times = pd.to_datetime(pd.Series(merged_forecasts.time))
comids = pd.Series(merged_forecasts.rivid)
tomorrow = times[0] + | pd.Timedelta(days=1) | pandas.Timedelta |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameSubclassing:
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
# In reference to PR 9632
class CustomSeries(Series):
@property
def _constructor(self):
return CustomSeries
def custom_series_function(self):
return "OK"
class CustomDataFrame(DataFrame):
"""
Subclasses pandas DF, fills DF with simulation results, adds some
custom plotting functions.
"""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
@property
def _constructor(self):
return CustomDataFrame
_constructor_sliced = CustomSeries
def custom_frame_function(self):
return "OK"
data = {"col1": range(10), "col2": range(10)}
cdf = CustomDataFrame(data)
# Did we get back our own DF class?
assert isinstance(cdf, CustomDataFrame)
# Do we get back our own Series class after selecting a column?
cdf_series = cdf.col1
assert isinstance(cdf_series, CustomSeries)
assert cdf_series.custom_series_function() == "OK"
# Do we get back our own DF class after slicing row-wise?
cdf_rows = cdf[1:5]
assert isinstance(cdf_rows, CustomDataFrame)
assert cdf_rows.custom_frame_function() == "OK"
# Make sure sliced part of multi-index frame is custom class
mcol = pd.MultiIndex.from_tuples([("A", "A"), ("A", "B")])
cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi["A"], CustomDataFrame)
mcol = pd.MultiIndex.from_tuples([("A", ""), ("B", "")])
cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi2["A"], CustomSeries)
def test_dataframe_metadata(self):
df = tm.SubclassedDataFrame(
{"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"]
)
df.testattr = "XXX"
assert df.testattr == "XXX"
assert df[["X"]].testattr == "XXX"
assert df.loc[["a", "b"], :].testattr == "XXX"
assert df.iloc[[0, 1], :].testattr == "XXX"
# see gh-9776
assert df.iloc[0:1, :].testattr == "XXX"
# see gh-10553
unpickled = tm.round_trip_pickle(df)
tm.assert_frame_equal(df, unpickled)
assert df._metadata == unpickled._metadata
assert df.testattr == unpickled.testattr
def test_indexing_sliced(self):
# GH 11559
df = tm.SubclassedDataFrame(
{"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["a", "b", "c"]
)
res = df.loc[:, "X"]
exp = tm.SubclassedSeries([1, 2, 3], index=list("abc"), name="X")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[:, 1]
exp = tm.SubclassedSeries([4, 5, 6], index=list("abc"), name="Y")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc[:, "Z"]
exp = tm.SubclassedSeries([7, 8, 9], index=list("abc"), name="Z")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc["a", :]
exp = tm.SubclassedSeries([1, 4, 7], index=list("XYZ"), name="a")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[1, :]
exp = tm.SubclassedSeries([2, 5, 8], index=list("XYZ"), name="b")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc["c", :]
exp = tm.SubclassedSeries([3, 6, 9], index=list("XYZ"), name="c")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
def test_subclass_attr_err_propagation(self):
# GH 11808
class A(DataFrame):
@property
def bar(self):
return self.i_dont_exist
with pytest.raises(AttributeError, match=".*i_dont_exist.*"):
A().bar
def test_subclass_align(self):
# GH 12983
df1 = tm.SubclassedDataFrame(
{"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")
)
df2 = tm.SubclassedDataFrame(
{"c": [1, 2, 4], "d": [1, 2, 4]}, index=list("ABD")
)
res1, res2 = df1.align(df2, axis=0)
exp1 = tm.SubclassedDataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
exp2 = tm.SubclassedDataFrame(
{"c": [1, 2, np.nan, 4, np.nan], "d": [1, 2, np.nan, 4, np.nan]},
index=list("ABCDE"),
)
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp2)
res1, res2 = df1.a.align(df2.c)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp1.a)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2.c)
def test_subclass_align_combinations(self):
# GH 12983
df = tm.SubclassedDataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
s = tm.SubclassedSeries([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = tm.SubclassedDataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
# name is lost when
exp2 = tm.SubclassedSeries(
[1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x"
)
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp2)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp1)
def test_subclass_iterrows(self):
# GH 13977
df = tm.SubclassedDataFrame({"a": [1]})
for i, row in df.iterrows():
assert isinstance(row, tm.SubclassedSeries)
tm.assert_series_equal(row, df.loc[i])
def test_subclass_stack(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["a", "b", "c"],
columns=["X", "Y", "Z"],
)
res = df.stack()
exp = tm.SubclassedSeries(
[1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list("aaabbbccc"), list("XYZXYZXYZ")]
)
tm.assert_series_equal(res, exp)
def test_subclass_stack_multi(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],
index=MultiIndex.from_tuples(
list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]
),
columns=MultiIndex.from_tuples(
list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]
),
)
exp = tm.SubclassedDataFrame(
[
[10, 12],
[11, 13],
[20, 22],
[21, 23],
[30, 32],
[31, 33],
[40, 42],
[41, 43],
],
index=MultiIndex.from_tuples(
list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))),
names=["aaa", "ccc", "yyy"],
),
columns=Index(["W", "X"], name="www"),
)
res = df.stack()
tm.assert_frame_equal(res, exp)
res = df.stack("yyy")
tm.assert_frame_equal(res, exp)
exp = tm.SubclassedDataFrame(
[
[10, 11],
[12, 13],
[20, 21],
[22, 23],
[30, 31],
[32, 33],
[40, 41],
[42, 43],
],
index=MultiIndex.from_tuples(
list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))),
names=["aaa", "ccc", "www"],
),
columns=Index(["y", "z"], name="yyy"),
)
res = df.stack("www")
tm.assert_frame_equal(res, exp)
def test_subclass_stack_multi_mixed(self):
# GH 15564
df = tm.SubclassedDataFrame(
[
[10, 11, 12.0, 13.0],
[20, 21, 22.0, 23.0],
[30, 31, 32.0, 33.0],
[40, 41, 42.0, 43.0],
],
index=MultiIndex.from_tuples(
list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]
),
columns=MultiIndex.from_tuples(
list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]
),
)
exp = tm.SubclassedDataFrame(
[
[10, 12.0],
[11, 13.0],
[20, 22.0],
[21, 23.0],
[30, 32.0],
[31, 33.0],
[40, 42.0],
[41, 43.0],
],
index=MultiIndex.from_tuples(
list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))),
names=["aaa", "ccc", "yyy"],
),
columns=Index(["W", "X"], name="www"),
)
res = df.stack()
tm.assert_frame_equal(res, exp)
res = df.stack("yyy")
tm.assert_frame_equal(res, exp)
exp = tm.SubclassedDataFrame(
[
[10.0, 11.0],
[12.0, 13.0],
[20.0, 21.0],
[22.0, 23.0],
[30.0, 31.0],
[32.0, 33.0],
[40.0, 41.0],
[42.0, 43.0],
],
index=MultiIndex.from_tuples(
list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))),
names=["aaa", "ccc", "www"],
),
columns=Index(["y", "z"], name="yyy"),
)
res = df.stack("www")
tm.assert_frame_equal(res, exp)
def test_subclass_unstack(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["a", "b", "c"],
columns=["X", "Y", "Z"],
)
res = df.unstack()
exp = tm.SubclassedSeries(
[1, 4, 7, 2, 5, 8, 3, 6, 9], index=[list("XXXYYYZZZ"), list("abcabcabc")]
)
tm.assert_series_equal(res, exp)
def test_subclass_unstack_multi(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],
index=MultiIndex.from_tuples(
list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]
),
columns=MultiIndex.from_tuples(
list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]
),
)
exp = tm.SubclassedDataFrame(
[[10, 20, 11, 21, 12, 22, 13, 23], [30, 40, 31, 41, 32, 42, 33, 43]],
index=Index(["A", "B"], name="aaa"),
columns=MultiIndex.from_tuples(
list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("cdcdcdcd"))),
names=["www", "yyy", "ccc"],
),
)
res = df.unstack()
| tm.assert_frame_equal(res, exp) | pandas._testing.assert_frame_equal |
import pandas as pd
dates = pd.read_csv('data/movies_data/dates.csv', sep=',')
movies = pd.read_csv('data/movies_data/movies.csv', sep=',')
ratings1 = | pd.read_csv('data/movies_data/ratings1.csv', sep=',') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 10:59:14 2020
@author: <NAME>
"""
#reproducability
from numpy.random import seed
seed(1+347823)
import tensorflow as tf
tf.random.set_seed(1+63493)
import numpy as np
from bayes_opt import BayesianOptimization
from bayes_opt.logger import JSONLogger
from bayes_opt.event import Events
# from bayes_opt.util import load_logs #needed if logs are already available
import os
import pandas as pd
import datetime
from scipy import stats
from matplotlib import pyplot
from sklearn.preprocessing import MinMaxScaler
from uncertainties import unumpy
gpus = tf.config.experimental.list_physical_devices('GPU')
# =============================================================================
#### Functions
# =============================================================================
def load_GW_and_HYRAS_Data(i):
#define where to find the data
pathGW = "./GWData"
pathHYRAS = "./HYRAS"
pathconnect = "/"
#load a list of all sites
well_list = pd.read_csv("./list.txt")
Well_ID = well_list.ID[i]
#load and merge the data
GWData = pd.read_csv(pathGW+pathconnect+Well_ID+'_GW-Data.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
HYRASData = pd.read_csv(pathHYRAS+pathconnect+Well_ID+'_weeklyData_HYRAS.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
data = pd.merge(GWData, HYRASData, how='inner', left_index = True, right_index = True)
return data, Well_ID
def split_data(data, GLOBAL_SETTINGS):
#split the test data from the rest
dataset = data[(data.index < GLOBAL_SETTINGS["test_start"])] #Testdaten abtrennen
#split remaining time series into three parts 80%-10%-10%
TrainingData = dataset[0:round(0.8 * len(dataset))]
StopData = dataset[round(0.8 * len(dataset))+1:round(0.9 * len(dataset))]
StopData_ext = dataset[round(0.8 * len(dataset))+1-GLOBAL_SETTINGS["seq_length"]:round(0.9 * len(dataset))] #extend data according to dealys/sequence length
OptData = dataset[round(0.9 * len(dataset))+1:]
OptData_ext = dataset[round(0.9 * len(dataset))+1-GLOBAL_SETTINGS["seq_length"]:] #extend data according to dealys/sequence length
TestData = data[(data.index >= GLOBAL_SETTINGS["test_start"]) & (data.index <= GLOBAL_SETTINGS["test_end"])]
TestData_ext = | pd.concat([dataset.iloc[-GLOBAL_SETTINGS["seq_length"]:], TestData], axis=0) | pandas.concat |
#!/usr/bin/env python
'''
sentiment_time
--------------
Process the sentiment with respect to time.
'''
import datetime
import json
import os
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# CONSTANTS
# ---------
HOME = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
JSON = os.path.join(HOME, "json")
IMAGES = os.path.join(HOME, "images")
TAGS = os.path.join(HOME, "tags")
MALE_IDOLS = set(json.load(open(os.path.join(TAGS, "men.json"))))
FEMALE_IDOLS = set(json.load(open(os.path.join(TAGS, "women.json"))))
# FUNCTIONS
# ---------
def process(path, sentiment):
'''Extract the sentiment with respect to time'''
with open(path) as f:
data = json.load(f)
if "overall_score" in data:
date = datetime.datetime.strptime(data["date"], "%Y/%m/%d")
sentiment["all"][date] = data["overall_score"]
if any([i in MALE_IDOLS for i in data["tags"]]):
sentiment["men"][date] = data["overall_score"]
if any([i in FEMALE_IDOLS for i in data["tags"]]):
sentiment["women"][date] = data["overall_score"]
def plot(sentiment):
'''Plot the data series'''
# the data is noisy, take the sample every month
all_ = pd.Series(sentiment["all"]).resample("1M", np.mean)
men = pd.Series(sentiment["men"]).resample("1M", np.mean)
women = | pd.Series(sentiment["women"]) | pandas.Series |
#
# Copyright (c) 2017-18 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Retrieve NWS forecast data.
"""
from thetae import Forecast
from thetae.util import to_float, localized_date_to_utc, mph_to_kt
from datetime import datetime, timedelta
from dateutil.parser import parse as parse_iso
import requests
from collections import defaultdict
from xml.etree import cElementTree as eTree
import pandas as pd
import numpy as np
import re
from builtins import str
default_model_name = 'NWS'
def etree_to_dict(t):
"""
Convert an XML tree to a dictionary, courtesy of @K3---rnc (StackOverflow)
"""
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
def xml_to_values(l):
"""
Return a list of values from a list of XML data potentially including null values.
"""
new = []
for element in l:
if isinstance(element, dict):
new.append(None)
else:
new.append(to_float(element))
return new
def xml_to_condition(l):
"""
Returns a list of values from a list of 'weather-condition' XML data.
"""
new = []
for element in l:
if isinstance(element, dict):
key = list(element.keys())[0]
if key.endswith('nil'):
new.append(None)
elif key == 'value':
if isinstance(element[key], list):
new.append(','.join([t['@weather-type'] for t in element[key]]))
elif isinstance(element[key], dict):
new.append(element[key]['@weather-type'])
else:
new.append(str(element[key])[:20])
else:
new.append(None)
else:
try:
new.append(str(element)[:20])
except:
new.append(None)
return new
def wind_speed_interpreter(wind):
"""
Interprets NWS wind speed to return the maximum.
"""
pattern = re.compile(r'(\d{1,3})')
try:
new_wind = float(pattern.findall(wind)[-1])
except:
new_wind = np.nan
return new_wind
def get_nws_forecast(config, stid, lat, lon, forecast_date):
"""
Retrieve current NWS forecast for a point location.
:param config:
:param stid: str: station ID
:param lat: float: latitude
:param lon: float: longitude
:param forecast_date: datetime:
:return:
"""
hourly_url = 'http://forecast.weather.gov/MapClick.php?lat=%f&lon=%f&FcstType=digitalDWML'
response = requests.get(hourly_url % (lat, lon))
# Raise error for invalid HTTP response
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
print('nws: got HTTP error when querying for XML file from %s' % (hourly_url % (lat, lon)))
raise
hourly_xml = eTree.fromstring(response.text)
hourly_dict = etree_to_dict(hourly_xml)
# Create a DataFrame for hourly data
hourly = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from check import topsis
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import pandas as pd
from mcdm import executors as exe
#impact
impact = [0,1,1,1,1,1,1,0,1]
#Takes original data of data provider, result of MCDM Avg
def predict(data,y):
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data)
X_train, X_test, Y_train, Y_test = train_test_split(scaled_data, y, test_size=0.2,shuffle=False)
clf = RandomForestRegressor()
clf.fit(X_train, Y_train)
prediction = (clf.predict(X_test))
for i in range(len(prediction)):
print(Y_test[i],prediction[i])
def lstm(data,y):
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data)
X_train, X_test, Y_train, Y_test = train_test_split(scaled_data, y, test_size=0.2,shuffle=False)
clf = LSTM()
clf.fit(X_train, Y_train)
prediction = (clf.predict(X_test))
for i in range(len(prediction)):
print(Y_test[i],prediction[i])
#Takes in dataframe
def weights(df):
y=[]
#Iterate through 9 rows (As weight matrix has 9 parameters)
for j in range(10):
y.append(list(df.iloc[j,1:]))
criteria_matrix = np.array(y)
#print(criteria_matrix)
# Column Sum
col_sum = criteria_matrix.sum(axis=0)
# Normalised Criteria Matrix
normalised_criteria_matrix = criteria_matrix / col_sum
# We calculate the eighen vector
eighen_vector = normalised_criteria_matrix.mean(1)
eighen_vector = np.reshape(eighen_vector, (1, 10))
#print(eighen_vector)
return eighen_vector[0][:9]
#Takes in dataframe
def runMain(df):
#Read excel file as a dataframe
dataset = pd.read_csv('data.csv')
#Selective column inputs
colnames=[i for i in dataset][:-3]
#Generating new dataframe from Selective columns
new_df=dataset[colnames]
dataset = dataset.iloc[:,:-3].values
#Weights
w=weights(df)
#Impact Factor (Positive or Negative)
#Topsis
#top=topsis(dataset,w,impact)
#y=top.calc()
#Dictionary to store performance against each data nnode of provider.
#df={"Node":["Node "+str(i+1) for i in range(len(y))],"Results":y}
#new_df['Y']=y
#return [new_df,y]
#Input File (Data of Service Provider)
#dataset-> User entered weight matrix
#df=pd.read_excel("input.xlsx")
#out=runMain(df)
#print(predict(out[0],out[1]))
#dataset = pd.read_csv('data.csv')
#dataset = dataset.iloc[:,:-3].values
matrix=[]
for i in range(len(dataset)):
matrix.append(list(dataset[i]))
inputs= | pd.read_csv('data.csv') | pandas.read_csv |
import re
import numpy as np
import pandas as pd
import pytest
from woodwork import DataTable
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
Filepath,
FullName,
Integer,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PhoneNumber,
SubRegionCode,
ZIPCode
)
def test_datatable_physical_types(sample_df):
dt = DataTable(sample_df)
assert isinstance(dt.physical_types, dict)
assert set(dt.physical_types.keys()) == set(sample_df.columns)
for k, v in dt.physical_types.items():
assert isinstance(k, str)
assert v == sample_df[k].dtype
def test_sets_category_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['a', 'b', 'c'], name=column_name),
pd.Series(['a', None, 'c'], name=column_name),
pd.Series(['a', np.nan, 'c'], name=column_name),
pd.Series(['a', pd.NA, 'c'], name=column_name),
pd.Series(['a', pd.NaT, 'c'], name=column_name),
]
logical_types = [
Categorical,
CountryCode,
Ordinal(order=['a', 'b', 'c']),
SubRegionCode,
ZIPCode,
]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_category_dtype_on_update():
column_name = 'test_series'
series = pd.Series(['a', 'b', 'c'], name=column_name)
series = series.astype('object')
logical_types = [
Categorical,
CountryCode,
Ordinal(order=['a', 'b', 'c']),
SubRegionCode,
ZIPCode,
]
for logical_type in logical_types:
ltypes = {
column_name: NaturalLanguage,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_object_dtype_on_init(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: LatLong,
}
dt = DataTable(latlong_df.loc[:, [column_name]], logical_types=ltypes)
assert dt.columns[column_name].logical_type == LatLong
assert dt.columns[column_name].dtype == LatLong.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == LatLong.pandas_dtype
def test_sets_object_dtype_on_update(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: NaturalLanguage
}
dt = DataTable(latlong_df.loc[:, [column_name]], logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: LatLong})
assert dt.columns[column_name].logical_type == LatLong
assert dt.columns[column_name].dtype == LatLong.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == LatLong.pandas_dtype
def test_sets_string_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['a', 'b', 'c'], name=column_name),
pd.Series(['a', None, 'c'], name=column_name),
pd.Series(['a', np.nan, 'c'], name=column_name),
pd.Series(['a', pd.NA, 'c'], name=column_name),
]
logical_types = [
Filepath,
FullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_string_dtype_on_update():
column_name = 'test_series'
series = pd.Series(['a', 'b', 'c'], name=column_name)
series = series.astype('object')
logical_types = [
Filepath,
FullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for logical_type in logical_types:
ltypes = {
column_name: Categorical,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_boolean_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([True, False, True], name=column_name),
pd.Series([True, None, True], name=column_name),
pd.Series([True, np.nan, True], name=column_name),
pd.Series([True, pd.NA, True], name=column_name),
]
logical_type = Boolean
for series in series_list:
series = series.astype('object')
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_boolean_dtype_on_update():
column_name = 'test_series'
series = pd.Series([0, 1, 0], name=column_name)
series = series.astype('object')
ltypes = {
column_name: Integer,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: Boolean})
assert dt.columns[column_name].logical_type == Boolean
assert dt.columns[column_name].dtype == Boolean.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == Boolean.pandas_dtype
def test_sets_int64_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([1, 2, 3], name=column_name),
pd.Series([1, None, 3], name=column_name),
pd.Series([1, np.nan, 3], name=column_name),
pd.Series([1, pd.NA, 3], name=column_name),
]
logical_types = [Integer]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_int64_dtype_on_update():
column_name = 'test_series'
series = pd.Series([1.0, 2.0, 1.0], name=column_name)
series = series.astype('object')
logical_types = [Integer]
for logical_type in logical_types:
ltypes = {
column_name: Double,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_float64_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([1.1, 2, 3], name=column_name),
pd.Series([1.1, None, 3], name=column_name),
pd.Series([1.1, np.nan, 3], name=column_name),
]
logical_type = Double
for series in series_list:
series = series.astype('object')
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_float64_dtype_on_update():
column_name = 'test_series'
series = pd.Series([0, 1, 0], name=column_name)
series = series.astype('object')
ltypes = {
column_name: Integer,
}
dt = DataTable( | pd.DataFrame(series) | pandas.DataFrame |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractproperty
from collections import OrderedDict
import functools
import warnings
import numpy as np
import pandas as pd
import toolz
from numpy import searchsorted
from pandas import DataFrame, date_range
from pandas.tseries.holiday import AbstractHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from pytz import UTC
from exchange_calendars import errors
from .calendar_helpers import (
NP_NAT,
NANOSECONDS_PER_MINUTE,
compute_all_minutes,
one_minute_later,
one_minute_earlier,
next_divider_idx,
previous_divider_idx,
Session,
Date,
Minute,
TradingMinute,
parse_timestamp,
parse_trading_minute,
parse_session,
parse_date,
)
from .utils.memoize import lazyval
from .utils.pandas_utils import days_at_time
from .pandas_extensions.offsets import MultipleWeekmaskCustomBusinessDay
GLOBAL_DEFAULT_START = pd.Timestamp.now(tz=UTC).floor("D") - pd.DateOffset(years=20)
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
GLOBAL_DEFAULT_END = pd.Timestamp.now(tz=UTC).floor("D") + pd.DateOffset(years=1)
NANOS_IN_MINUTE = 60000000000
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = range(7)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY)
WEEKENDS = (SATURDAY, SUNDAY)
def selection(arr, start, end):
predicates = []
if start is not None:
predicates.append(start.tz_localize(UTC) <= arr)
if end is not None:
predicates.append(arr < end.tz_localize(UTC))
if not predicates:
return arr
return arr[np.all(predicates, axis=0)]
def _group_times(all_days, times, tz, offset=0):
if times is None:
return None
elements = [
days_at_time(selection(all_days, start, end), time, tz, offset)
for (start, time), (end, _) in toolz.sliding_window(
2, toolz.concatv(times, [(None, None)])
)
]
return elements[0].append(elements[1:])
class deprecate:
"""Decorator for deprecated/renamed ExchangeCalendar methods."""
def __init__(
self,
deprecated_release: str = "3.4",
removal_release: str = "4.0",
alt_method: str = "",
renamed: bool = True,
):
self.deprecated_release = "release " + deprecated_release
self.removal_release = "release " + removal_release
self.alt_method = alt_method
self.renamed = renamed
if renamed:
assert alt_method, "pass `alt_method` if renaming"
def __call__(self, f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
warnings.warn(self._message(f), FutureWarning)
return f(*args, **kwargs)
return wrapped_f
def _message(self, f):
msg = (
f"`{f.__name__}` was deprecated in {self.deprecated_release}"
f" and will be removed in {self.removal_release}."
)
if self.alt_method:
if self.renamed:
msg += (
f" The method has been renamed `{self.alt_method}`."
f" NB parameter names may also have changed (see "
f" documentation for `{self.alt_method}`)."
)
else:
msg += f" Use `{self.alt_method}`."
return msg
class ExchangeCalendar(ABC):
"""Representation of timing information of a single market exchange.
The timing information comprises sessions, open/close times and, for
exchanges that observe an intraday break, break_start/break_end times.
For exchanges that do not observe an intraday break a session
represents a contiguous set of minutes. Where an exchange observes
an intraday break a session represents two contiguous sets of minutes
separated by the intraday break.
Each session has a label that is midnight UTC. It is important to note
that a session label should not be considered a specific point in time,
and that midnight UTC is just being used for convenience.
For each session, we store the open and close time together with, for
those exchanges with breaks, the break start and break end. All times
are defined as UTC.
Parameters
----------
start : default: later of 20 years ago or first supported start date.
First calendar session will be `start`, if `start` is a session, or
first session after `start`.
end : default: earliest of 1 year from 'today' or last supported end date.
Last calendar session will be `end`, if `end` is a session, or last
session before `end`.
side : default: "both" ("left" for 24 hour calendars)
Define which of session open/close and break start/end should
be treated as a trading minute:
"left" - treat session open and break_start as trading minutes,
do not treat session close or break_end as trading minutes.
"right" - treat session close and break_end as trading minutes,
do not treat session open or break_start as tradng minutes.
"both" - treat all of session open, session close, break_start
and break_end as trading minutes.
"neither" - treat none of session open, session close,
break_start or break_end as trading minutes.
Raises
------
ValueError
If `start` is earlier than the earliest supported start date.
If `end` is later than the latest supported end date.
If `start` parses to a later date than `end`.
Notes
-----
Exchange calendars were originally defined for the Zipline package from
Quantopian under the package 'trading_calendars'. Since 2021 they have
been maintained under the 'exchange_calendars' package (a fork of
'trading_calendars') by an active community of contributing users.
Some calendars have defined start and end bounds within which
contributors have endeavoured to ensure the calendar's accuracy and
outside of which the calendar would not be accurate. These bounds
are enforced such that passing `start` or `end` as dates that are
out-of-bounds will raise a ValueError. The bounds of each calendar are
exposed via the `bound_start` and `bound_end` properties.
Many calendars do not have bounds defined (in these cases `bound_start`
and/or `bound_end` return None). These calendars can be created through
any date range although it should be noted that the earlier the start
date, the greater the potential for inaccuracies.
In all cases, no guarantees are offered as to the accuracy of any
calendar.
Internal method parameters:
_parse: bool
Determines if a `minute` or `session` parameter should be
parsed (default True). Passed as False:
- internally to prevent double parsing.
- by tests for efficiency.
"""
_LEFT_SIDES = ["left", "both"]
_RIGHT_SIDES = ["right", "both"]
def __init__(
self,
start: Date | None = None,
end: Date | None = None,
side: str | None = None,
):
side = side if side is not None else self.default_side
if side not in self.valid_sides:
raise ValueError(
f"`side` must be in {self.valid_sides} although received as {side}."
)
self._side = side
if start is None:
start = self.default_start
else:
start = parse_date(start, "start")
if self.bound_start is not None and start < self.bound_start:
raise ValueError(self._bound_start_error_msg(start))
if end is None:
end = self.default_end
else:
end = parse_date(end, "end")
if self.bound_end is not None and end > self.bound_end:
raise ValueError(self._bound_end_error_msg(end))
if start >= end:
raise ValueError(
"`start` must be earlier than `end` although `start` parsed as"
f" '{start}' and `end` as '{end}'."
)
# Midnight in UTC for each trading day.
_all_days = date_range(start, end, freq=self.day, tz=UTC)
if _all_days.empty:
raise errors.NoSessionsError(calendar_name=self.name, start=start, end=end)
# `DatetimeIndex`s of standard opens/closes for each day.
self._opens = _group_times(
_all_days,
self.open_times,
self.tz,
self.open_offset,
)
self._break_starts = _group_times(
_all_days,
self.break_start_times,
self.tz,
)
self._break_ends = _group_times(
_all_days,
self.break_end_times,
self.tz,
)
self._closes = _group_times(
_all_days,
self.close_times,
self.tz,
self.close_offset,
)
# Apply special offsets first
self._calculate_and_overwrite_special_offsets(_all_days, start, end)
# Series mapping sessions with nonstandard opens/closes.
_special_opens = self._calculate_special_opens(start, end)
_special_closes = self._calculate_special_closes(start, end)
# Overwrite the special opens and closes on top of the standard ones.
_overwrite_special_dates(_all_days, self._opens, _special_opens)
_overwrite_special_dates(_all_days, self._closes, _special_closes)
_remove_breaks_for_special_dates(
_all_days,
self._break_starts,
_special_closes,
)
_remove_breaks_for_special_dates(
_all_days,
self._break_ends,
_special_closes,
)
if self._break_starts is None:
break_starts = None
else:
break_starts = self._break_starts.tz_localize(None)
if self._break_ends is None:
break_ends = None
else:
break_ends = self._break_ends.tz_localize(None)
self.schedule = DataFrame(
index=_all_days,
data=OrderedDict(
[
("market_open", self._opens.tz_localize(None)),
("break_start", break_starts),
("break_end", break_ends),
("market_close", self._closes.tz_localize(None)),
]
),
dtype="datetime64[ns]",
)
self.market_opens_nanos = self.schedule.market_open.values.astype(np.int64)
self.market_break_starts_nanos = self.schedule.break_start.values.astype(
np.int64
)
self.market_break_ends_nanos = self.schedule.break_end.values.astype(np.int64)
self.market_closes_nanos = self.schedule.market_close.values.astype(np.int64)
_check_breaks_match(
self.market_break_starts_nanos, self.market_break_ends_nanos
)
self.first_trading_session = _all_days[0]
self.last_trading_session = _all_days[-1]
self._late_opens = _special_opens.index
self._early_closes = _special_closes.index
# Methods and properties that define calendar and which should be
# overriden or extended, if and as required, by subclass.
@abstractproperty
def name(self) -> str:
raise NotImplementedError()
@property
def bound_start(self) -> pd.Timestamp | None:
"""Earliest date from which calendar can be constructed.
Returns
-------
pd.Timestamp or None
Earliest date from which calendar can be constructed. Must have
tz as "UTC". None if no limit.
Notes
-----
To impose a constraint on the earliest date from which a calendar
can be constructed subclass should override this method and
optionally override `_bound_start_error_msg`.
"""
return None
@property
def bound_end(self) -> pd.Timestamp | None:
"""Latest date to which calendar can be constructed.
Returns
-------
pd.Timestamp or None
Latest date to which calendar can be constructed. Must have tz
as "UTC". None if no limit.
Notes
-----
To impose a constraint on the latest date to which a calendar can
be constructed subclass should override this method and optionally
override `_bound_end_error_msg`.
"""
return None
def _bound_start_error_msg(self, start: pd.Timestamp) -> str:
"""Return error message to handle `start` being out-of-bounds.
See Also
--------
bound_start
"""
return (
f"The earliest date from which calendar {self.name} can be"
f" evaluated is {self.bound_start}, although received `start` as"
f" {start}."
)
def _bound_end_error_msg(self, end: pd.Timestamp) -> str:
"""Return error message to handle `end` being out-of-bounds.
See Also
--------
bound_end
"""
return (
f"The latest date to which calendar {self.name} can be evaluated"
f" is {self.bound_end}, although received `end` as {end}."
)
@property
def default_start(self) -> pd.Timestamp:
if self.bound_start is None:
return GLOBAL_DEFAULT_START
else:
return max(GLOBAL_DEFAULT_START, self.bound_start)
@property
def default_end(self) -> pd.Timestamp:
if self.bound_end is None:
return GLOBAL_DEFAULT_END
else:
return min(GLOBAL_DEFAULT_END, self.bound_end)
@abstractproperty
def tz(self):
raise NotImplementedError()
@abstractproperty
def open_times(self):
"""
Returns a list of tuples of (start_date, open_time). If the open
time is constant throughout the calendar, use None for the start_date.
"""
raise NotImplementedError()
@property
def break_start_times(self):
"""
Returns a optional list of tuples of (start_date, break_start_time).
If the break start time is constant throughout the calendar, use None
for the start_date. If there is no break, return `None`.
"""
return None
@property
def break_end_times(self):
"""
Returns a optional list of tuples of (start_date, break_end_time). If
the break end time is constant throughout the calendar, use None for
the start_date. If there is no break, return `None`.
"""
return None
@abstractproperty
def close_times(self):
"""
Returns a list of tuples of (start_date, close_time). If the close
time is constant throughout the calendar, use None for the start_date.
"""
raise NotImplementedError()
@property
def weekmask(self):
"""
String indicating the days of the week on which the market is open.
Default is '1111100' (i.e., Monday-Friday).
See Also
--------
numpy.busdaycalendar
"""
return "1111100"
@property
def open_offset(self):
return 0
@property
def close_offset(self):
return 0
@property
def regular_holidays(self):
"""
Returns
-------
pd.AbstractHolidayCalendar: a calendar containing the regular holidays
for this calendar
"""
return None
@property
def adhoc_holidays(self):
"""
Returns
-------
list: A list of tz-naive timestamps representing unplanned closes.
"""
return []
@property
def special_opens(self):
"""
A list of special open times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_opens_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
@property
def special_closes(self):
"""
A list of special close times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_closes_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
@property
def special_weekmasks(self):
"""
Returns
-------
list: List of (date, date, str) tuples that represent special
weekmasks that applies between dates.
"""
return []
@property
def special_offsets(self):
"""
Returns
-------
list: List of (timedelta, timedelta, timedelta, timedelta, AbstractHolidayCalendar) tuples
that represent special open, break_start, break_end, close offsets
and corresponding HolidayCalendars.
"""
return []
@property
def special_offsets_adhoc(self):
"""
Returns
-------
list: List of (timedelta, timedelta, timedelta, timedelta, DatetimeIndex) tuples
that represent special open, break_start, break_end, close offsets
and corresponding DatetimeIndexes.
"""
return []
# ------------------------------------------------------------------
# -- NO method below this line should be overriden on a subclass! --
# ------------------------------------------------------------------
# Methods and properties that define calendar (continued...).
@lazyval
def day(self):
if self.special_weekmasks:
return MultipleWeekmaskCustomBusinessDay(
holidays=self.adhoc_holidays,
calendar=self.regular_holidays,
weekmask=self.weekmask,
weekmasks=self.special_weekmasks,
)
else:
return CustomBusinessDay(
holidays=self.adhoc_holidays,
calendar=self.regular_holidays,
weekmask=self.weekmask,
)
@property
def valid_sides(self) -> list[str]:
"""List of valid `side` options."""
if self.close_times == self.open_times:
return ["left", "right"]
else:
return ["both", "left", "right", "neither"]
@property
def default_side(self) -> str:
"""Default `side` option."""
if self.close_times == self.open_times:
return "right"
else:
return "both"
@property
def side(self) -> str:
"""Side on which sessions are closed.
Returns
-------
str
"left" - Session open and break_start are trading minutes.
Session close and break_end are not trading minutes.
"right" - Session close and break_end are trading minutes,
Session open and break_start are not tradng minutes.
"both" - Session open, session close, break_start and
break_end are all trading minutes.
"neither" - Session open, session close, break_start and
break_end are all not trading minutes.
Notes
-----
Subclasses should NOT override this method.
"""
return self._side
# Properties covering all sessions.
@property
def all_sessions(self) -> pd.DatetimeIndex:
"""All calendar sessions."""
return self.schedule.index
@property
def opens(self) -> pd.Series:
"""Open time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Open time of corresponding session. NB Times are UTC
although dtype is timezone-naive.
"""
return self.schedule.market_open
@property
def closes(self) -> pd.Series:
"""Close time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Close time of corresponding session. NB Times are UTC
although dtype is timezone-naive.
"""
return self.schedule.market_close
@property
def break_starts(self) -> pd.Series:
"""Break start time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Break-start time of corresponding session. NB Times are UTC
although dtype is timezone-naive. Value is missing
(pd.NaT) for any session that does not have a break.
"""
return self.schedule.break_start
@property
def break_ends(self) -> pd.Series:
"""Break end time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Break-end time of corresponding session. NB Times are UTC
although dtype is timezone-naive. Value is missing
(pd.NaT) for any session that does not have a break.
"""
return self.schedule.break_end
@functools.lru_cache(maxsize=1) # cache last request
def _first_minute_nanos(self, side: str | None = None) -> np.ndarray:
side = side if side is not None else self.side
if side in self._LEFT_SIDES:
return self.market_opens_nanos
else:
return one_minute_later(self.market_opens_nanos)
@functools.lru_cache(maxsize=1) # cache last request
def _last_minute_nanos(self, side: str | None = None) -> np.ndarray:
side = side if side is not None else self.side
if side in self._RIGHT_SIDES:
return self.market_closes_nanos
else:
return one_minute_earlier(self.market_closes_nanos)
@functools.lru_cache(maxsize=1) # cache last request
def _last_am_minute_nanos(self, side: str | None = None) -> np.ndarray:
side = side if side is not None else self.side
if side in self._RIGHT_SIDES:
return self.market_break_starts_nanos
else:
return one_minute_earlier(self.market_break_starts_nanos)
@functools.lru_cache(maxsize=1) # cache last request
def _first_pm_minute_nanos(self, side: str | None = None) -> np.ndarray:
side = side if side is not None else self.side
if side in self._LEFT_SIDES:
return self.market_break_ends_nanos
else:
return one_minute_later(self.market_break_ends_nanos)
def _minutes_as_series(self, nanos: np.ndarray, name: str) -> pd.Series:
"""Convert trading minute nanos to pd.Series."""
ser = pd.Series(pd.DatetimeIndex(nanos, tz="UTC"), index=self.all_sessions)
ser.name = name
return ser
@property
def all_first_minutes(self) -> pd.Series:
"""First trading minute of each session."""
return self._minutes_as_series(self._first_minute_nanos(), "first_minutes")
@property
def all_last_minutes(self) -> pd.Series:
"""Last trading minute of each session."""
return self._minutes_as_series(self._last_minute_nanos(), "last_minutes")
@property
def all_last_am_minutes(self) -> pd.Series:
"""Last am trading minute of each session."""
return self._minutes_as_series(self._last_am_minute_nanos(), "last_am_minutes")
@property
def all_first_pm_minutes(self) -> pd.Series:
"""First pm trading minute of each session."""
return self._minutes_as_series(
self._first_pm_minute_nanos(), "first_pm_minutes"
)
# Properties covering all minutes.
def _all_minutes(self, side: str) -> pd.DatetimeIndex:
return pd.DatetimeIndex(
compute_all_minutes(
self.market_opens_nanos,
self.market_break_starts_nanos,
self.market_break_ends_nanos,
self.market_closes_nanos,
side,
),
tz="UTC",
)
@lazyval
def all_minutes(self) -> pd.DatetimeIndex:
"""All trading minutes."""
return self._all_minutes(self.side)
@lazyval
def all_minutes_nanos(self) -> np.ndarray:
"""All trading minutes as nanoseconds."""
return self.all_minutes.values.astype(np.int64)
# Calendar properties.
@property
def first_session(self) -> pd.Timestamp:
"""First calendar session."""
return self.all_sessions[0]
@property
def last_session(self) -> pd.Timestamp:
"""Last calendar session."""
return self.all_sessions[-1]
@property
def first_session_open(self) -> pd.Timestamp:
"""Open time of calendar's first session."""
return self.opens[0]
@property
def last_session_close(self) -> pd.Timestamp:
"""Close time of calendar's last session."""
return self.closes[-1]
@property
def first_trading_minute(self) -> pd.Timestamp:
"""Calendar's first trading minute."""
return pd.Timestamp(self.all_minutes_nanos[0], tz="UTC")
@property
def last_trading_minute(self) -> pd.Timestamp:
"""Calendar's last trading minute."""
return pd.Timestamp(self.all_minutes_nanos[-1], tz="UTC")
def has_breaks(
self, start: Date | None = None, end: Date | None = None, _parse: bool = True
) -> bool:
"""Query if at least one session of a calendar has a break.
Parameters
----------
start : optional
Limit query to sessions from `start`.
end : optional
Limit query to sessions through `end`.
Returns
-------
bool
True if any calendar session, or session of any range defined
from `start` to `end`, has a break. False otherwise.
"""
if _parse and start is not None:
start = self._parse_session_range_start(start)
if _parse and end is not None:
end = self._parse_session_range_end(end)
return self.break_starts[start:end].notna().any()
@property
def late_opens(self) -> pd.DatetimeIndex:
"""Sessions that open later than the prevailing normal open.
NB. Prevailing normal open as defined by `open_times`.
"""
return self._late_opens
@property
def early_closes(self) -> pd.DatetimeIndex:
"""Sessions that close earlier than the prevailing normal close.
NB. Prevailing normal close as defined by `close_times`.
"""
return self._early_closes
# Methods that interrogate a given session.
def session_open(self, session_label: Session, _parse: bool = True) -> pd.Timestamp:
"""Return open time for a given session."""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return self.schedule.at[session_label, "market_open"].tz_localize(UTC)
def session_close(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return close time for a given session."""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return self.schedule.at[session_label, "market_close"].tz_localize(UTC)
def session_break_start(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT:
"""Return break-start time for a given session.
Returns pd.NaT if no break.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
break_start = self.schedule.at[session_label, "break_start"]
if not pd.isnull(break_start):
break_start = break_start.tz_localize(UTC)
return break_start
def session_break_end(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT:
"""Return break-end time for a given session.
Returns pd.NaT if no break.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
break_end = self.schedule.at[session_label, "break_end"]
if not pd.isnull(break_end):
break_end = break_end.tz_localize(UTC)
return break_end
def open_and_close_for_session(
self, session_label: Session, _parse: bool = True
) -> tuple[pd.Timestamp, pd.Timestamp]:
"""Return open and close times for a given session.
Parameters
----------
session_label
Session for which require open and close.
Returns
-------
tuple[pd.Timestamp, pd.Timestamp]
[0] Open time of `session_label`.
[1] Close time of `session_label`.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return (
self.session_open(session_label),
self.session_close(session_label),
)
def break_start_and_end_for_session(
self, session_label: Session, _parse: bool = True
) -> tuple[pd.Timestamp | pd.NaT, pd.Timestamp | pd.NaT]:
"""Return break-start and break-end times for a given session.
Parameters
----------
session_label
Session for which require break-start and break-end.
Returns
-------
tuple[pd.Timestamp | pd.NaT, pd.Timestamp | pd.NaT]
[0] Break-start time of `session_label`, or pd.NaT if no break.
[1] Close time of `session_label`, or pd.NaT if no break.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return (
self.session_break_start(session_label),
self.session_break_end(session_label),
)
def _get_session_minute_from_nanos(
self, session: Session, nanos: np.ndarray, _parse: bool
) -> pd.Timestamp:
if _parse:
session = parse_session(self, session, "session")
idx = self.all_sessions.get_loc(session)
return pd.Timestamp(nanos[idx], tz="UTC")
def session_first_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return first trading minute of a given session."""
nanos = self._first_minute_nanos()
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_last_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return last trading minute of a given session."""
nanos = self._last_minute_nanos()
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_last_am_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT: # Literal[pd.NaT] - when move to min 3.8
"""Return last trading minute of am subsession of a given session."""
nanos = self._last_am_minute_nanos()
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_first_pm_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT: # Literal[pd.NaT] - when move to min 3.8
"""Return first trading minute of pm subsession of a given session."""
nanos = self._first_pm_minute_nanos()
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_first_and_last_minute(
self,
session: Session,
_parse: bool = True,
) -> tuple(pd.Timestamp, pd.Timestamp):
"""Return first and last trading minutes of a given session."""
if _parse:
session = parse_session(self, session, "session")
idx = self.all_sessions.get_loc(session)
first = pd.Timestamp(self._first_minute_nanos()[idx], tz="UTC")
last = pd.Timestamp(self._last_minute_nanos()[idx], tz="UTC")
return (first, last)
def session_has_break(self, session: Session, _parse: bool = True) -> bool:
"""Query if a given session has a break.
Parameters
----------
session
Session to query.
Returns
-------
bool
True if `session` has a break, false otherwise.
"""
if _parse:
session = parse_session(self, session, "session")
return pd.notna(self.session_break_start(session))
def next_session_label(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return session that immediately follows a given session.
Parameters
----------
session_label
Session whose next session is desired.
Raises
------
ValueError
If `session_label` is the last calendar session.
See Also
--------
date_to_session_label
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
idx = self.schedule.index.get_loc(session_label)
try:
return self.schedule.index[idx + 1]
except IndexError as err:
if idx == len(self.schedule.index) - 1:
raise ValueError(
"There is no next session as this is the end"
" of the exchange calendar."
) from err
else:
raise
def previous_session_label(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return session that immediately preceeds a given session.
Parameters
----------
session_label
Session whose previous session is desired.
Raises
------
ValueError
If `session_label` is the first calendar session.
See Also
--------
date_to_session_label
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError(
"There is no previous session as this is the"
" beginning of the exchange calendar."
)
return self.schedule.index[idx - 1]
def minutes_for_session(
self, session_label: Session, _parse: bool = True
) -> pd.DatetimeIndex:
"""Return trading minutes corresponding to a given session.
Parameters
----------
session_label
Session for which require trading minutes.
Returns
-------
pd.DateTimeIndex
Trading minutes for `session`.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
first, last = self.session_first_and_last_minute(session_label, _parse=False)
return self.minutes_in_range(start_minute=first, end_minute=last)
# Methods that interrogate a date.
def is_session(self, dt: Date, _parse: bool = True) -> bool:
"""Query if a date is a valid session.
Parameters
----------
dt
Date to be queried.
Return
------
bool
True if `dt` is a session, False otherwise.
Returns False if `dt` is earlier than the first calendar
session or later than the last calendar session.
"""
if _parse:
dt = parse_date(dt, "dt")
return dt in self.schedule.index
def date_to_session_label(
self,
date: Date,
direction: str = "none", # when min 3.8, Literal["none", "previous", "next"]
_parse: bool = True,
) -> pd.Timestamp:
"""Return a session label corresponding to a given date.
Parameters
----------
date
Date for which require session label. Can be a date that does not
represent an actual session (see `direction`).
direction : default: "none"
Defines behaviour if `date` does not represent a session:
"next" - return first session label following `date`.
"previous" - return first session label prior to `date`.
"none" - raise ValueError.
Returns
-------
pd.Timestamp (midnight UTC)
Label of the corresponding session.
See Also
--------
next_session_label
previous_session_label
"""
if _parse:
date = parse_date(date, "date")
if self.is_session(date):
return date
elif direction in ["next", "previous"]:
if direction == "previous" and date < self.first_session:
raise ValueError(
"Cannot get a session label prior to the first calendar"
f" session ('{self.first_session}'). Consider passing"
f" `direction` as 'next'."
)
if direction == "next" and date > self.last_session:
raise ValueError(
"Cannot get a session label later than the last calendar"
f" session ('{self.last_session}'). Consider passing"
f" `direction` as 'previous'."
)
idx = self.all_sessions.values.astype(np.int64).searchsorted(date.value)
if direction == "previous":
idx -= 1
return self.all_sessions[idx]
elif direction == "none":
raise ValueError(
f"`date` '{date}' does not represent a session. Consider passing"
" a `direction`."
)
else:
raise ValueError(
f"'{direction}' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
# Methods that interrogate a given minute (trading or non-trading).
def is_trading_minute(self, minute: Minute, _parse: bool = True) -> bool:
"""Query if a given minute is a trading minute.
Minutes during breaks are not considered trading minutes.
Note: `self.side` determines whether exchange will be considered
open or closed on session open, session close, break start and
break end.
Parameters
----------
minute
Minute being queried.
Returns
-------
bool
Boolean indicting if `minute` is a trading minute.
See Also
--------
is_open_on_minute
"""
if _parse:
minute = parse_timestamp(
minute, "minute", raise_oob=True, calendar=self
).value
else:
minute = minute.value
idx = self.all_minutes_nanos.searchsorted(minute)
numpy_bool = minute == self.all_minutes_nanos[idx]
return bool(numpy_bool)
def is_break_minute(self, minute: Minute, _parse: bool = True) -> bool:
"""Query if a given minute is within a break.
Note: `self.side` determines whether either, both or one of break
start and break end are treated as break minutes.
Parameters
----------
minute
Minute being queried.
Returns
-------
bool
Boolean indicting if `minute` is a break minute.
"""
if _parse:
minute = parse_timestamp(
minute, "minute", raise_oob=True, calendar=self
).value
else:
minute = minute.value
session_idx = np.searchsorted(self._first_minute_nanos(), minute) - 1
break_start = self._last_am_minute_nanos()[session_idx]
break_end = self._first_pm_minute_nanos()[session_idx]
# NaT comparisions evalute as False
numpy_bool = break_start < minute < break_end
return bool(numpy_bool)
def is_open_on_minute(
self, dt: Minute, ignore_breaks: bool = False, _parse: bool = True
) -> bool:
"""Query if exchange is open on a given minute.
Note: `self.side` determines whether exchange will be considered
open or closed on session open, session close, break start and
break end.
Parameters
----------
dt
Minute being queried.
ignore_breaks
Should exchange be considered open during any break?
True - treat exchange as open during any break.
False - treat exchange as closed during any break.
Returns
-------
bool
Boolean indicting if exchange is open on `dt`.
See Also
--------
is_trading_minute
"""
if _parse:
minute = parse_timestamp(dt, "dt", raise_oob=True, calendar=self)
else:
minute = dt
is_trading_minute = self.is_trading_minute(minute, _parse=_parse)
if is_trading_minute or not ignore_breaks:
return is_trading_minute
else:
# not a trading minute although should return True if in break
return self.is_break_minute(minute, _parse=_parse)
def next_open(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return next open that follows a given minute.
If `dt` is a session open, the next session's open will be
returned.
Parameters
----------
dt
Minute for which to get the next open.
Returns
-------
pd.Timestamp
UTC timestamp of the next open.
"""
if _parse:
dt = parse_timestamp(dt, "dt", raise_oob=True, calendar=self)
try:
idx = next_divider_idx(self.market_opens_nanos, dt.value)
except IndexError:
if dt.tz_convert(None) >= self.opens[-1]:
raise ValueError(
"Minute cannot be the last open or later (received `dt`"
f" parsed as '{dt}'.)"
) from None
else:
raise
return | pd.Timestamp(self.market_opens_nanos[idx], tz=UTC) | pandas.Timestamp |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
import os
import pandas as pd
import xgboost as xgb
import gensim
from gensim.models import Doc2Vec
import xgb_model.config as config
import xgb_model.cut_utils as cut_utils
import xgb_model.string_distance as string_distance
import xgb_model.string_diff as string_diff
import xgb_model.n_grams as n_grams
import xgb_model.word2vec_utils as word2vec_utils
import xgb_model.doc2vec_infer as doc2vec_infer
# import logging
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def process(inpath, outpath="test_xgb.csv"):
# TODO : change inpath when upload to web
path_test_raw = inpath # config.path_test_raw
# use jieba tokenizer the raw sentences
cut_utils.del_bom(path_test_raw)
test = pd.read_csv(path_test_raw, sep="\t", header=None, encoding="utf-8", names=["id", "s1", "s2"])
test = cut_utils.cut(test)
test.to_csv(config.path_test_cut, sep=str("\t"), index=False, header=False,
columns=["id", "cut_s1", "cut_s2"], encoding="utf-8")
# get string distance features
test = pd.read_csv(path_test_raw, sep="\t", header=None, encoding="utf-8", names=["id", "s1", "s2"])
test = string_distance.get_features(test)
col = [c for c in test.columns if c[:1] == "d"]
test.to_csv(config.path_test_string_distance, index=False, columns=col, encoding="utf-8")
# get string diff features
test = pd.read_csv(config.path_test_cut, sep="\t", header=None, encoding="utf-8", names=["id", "s1", "s2"])
test = string_diff.get_features(test)
col = [c for c in test.columns if c[:1] == "f"]
test.to_csv(config.path_test_string_diff, index=False, columns=col, encoding="utf-8")
# get n-grams features
test = pd.read_csv(config.path_test_cut, sep="\t", header=None, encoding="utf-8", names=["id", "s1", "s2"])
test["sentences"] = test["s1"] + "_split_tag_" + test["s2"]
test = n_grams.get_features(test)
col = [c for c in test.columns if c[:1] == "f"]
test.to_csv(config.path_test_gram_feature, index=False, columns=col, encoding="utf-8")
# get word2vec features
test = pd.read_csv(config.path_test_cut, sep="\t", header=None, encoding="utf-8", names=["id", "s1", "s2"])
test = word2vec_utils.get_features(test)
col = [c for c in test.columns if c[:1] == "z"]
test.to_csv(config.path_test_word2vec, index=False, columns=col, encoding="utf-8")
# get doc2vec features
model_saved_file = "xgb_model/model/doc2vec_model4"
doc2vec_model = Doc2Vec.load(model_saved_file)
test = pd.read_csv(config.path_test_cut, sep="\t", encoding="utf-8", header=None, names=["id", "s1", "s2"])
test = doc2vec_infer.make_feature(test, loaded_model=doc2vec_model)
col = [c for c in test.columns if c[:1] == "z"]
test.to_csv(config.path_test_doc2vec4, index=False, columns=col, encoding="utf-8")
# columns
origincol = ["id", "s1", "s2"]
copycol2 = ['f_1dis', 'f_2word_dis', 'f_2char_dis', 'f_3word_dis', 'f_3char_dis',
'f_1dis2', 'f_2word_dis2', 'f_2char_dis2', 'f_3word_dis2', 'f_3char_dis2',
'f_1dis3', 'f_2word_dis3', 'f_2char_dis3', 'f_3word_dis3', 'f_3char_dis3',
'f_1dis4', 'f_2word_dis4', 'f_2char_dis4', 'f_3word_dis4', 'f_3char_dis4']
copycol12 = ['z3_cosine', 'z3_manhatton', 'z3_euclidean', 'z3_pearson', 'z3_spearman', 'z3_kendall']
copycol13 = ['f_total_unique_words', 'f_wc_diff', 'f_wc_ratio', 'f_wc_diff_unique',
'f_wc_ratio_unique', 'f_char_diff', 'f_char_ratio']
copycol18 = ["d_nlevenshtein_1", "d_nlevenshtein_2", "d_jaro_winkler", "d_jaccard"]
copycol19 = ["z_tfidf_cos_sim",
"z_w2v_bow_dis_cosine", "z_w2v_bow_dis_euclidean", "z_w2v_bow_dis_minkowski",
"z_w2v_bow_dis_cityblock", "z_w2v_bow_dis_canberra",
"z_w2v_tfidf_dis_cosine", "z_w2v_tfidf_dis_euclidean", "z_w2v_tfidf_dis_minkowski",
"z_w2v_tfidf_dis_cityblock", "z_w2v_tfidf_dis_canberra",
"z_glove_bow_dis_cosine", "z_glove_bow_dis_euclidean", "z_glove_bow_dis_minkowski",
"z_glove_bow_dis_cityblock", "z_glove_bow_dis_canberra",
"z_glove_tfidf_dis_cosine", "z_glove_tfidf_dis_euclidean", "z_glove_tfidf_dis_minkowski",
"z_glove_tfidf_dis_cityblock", "z_glove_tfidf_dis_canberra"]
test_raw = pd.read_csv(path_test_raw, sep="\t", names=origincol, encoding="utf-8")
test_feature2 = pd.read_csv(config.path_test_gram_feature, usecols=copycol2, dtype=float, encoding="utf-8")
test_feature12 = pd.read_csv(config.path_test_doc2vec4, usecols=copycol12, dtype=float, encoding="utf-8")
test_feature13 = pd.read_csv(config.path_test_string_diff, usecols=copycol13, dtype=float, encoding="utf-8")
test_feature18 = pd.read_csv(config.path_test_string_distance, usecols=copycol18, dtype=float, encoding="utf-8")
test_feature19 = | pd.read_csv(config.path_test_word2vec, usecols=copycol19, dtype=float, encoding="utf-8") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 13:30:39 2020
@author: base
"""#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 09:58:51 2020
@author: base
"""
from keras_vggface_TF.vggfaceTF import VGGFace
from keras_vggface_TF.utils import preprocess_input
print('using tf.keras')
#import tensorflow as tf
import tflite_runtime.interpreter as tflite
import os
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
from numpy import asarray
from numpy import expand_dims
from scipy import stats
import time
#get_ipython().run_line_magic('matplotlib', 'qt')
"""
#-------------------------------------------------------------------------
# Define Variables
"""
basepath=Path.cwd() / 'Free_com_Celeb_St'
faultpath=basepath / 'test_faulty'
model= 'quantized_modelh5-15'
modelpath= basepath / (model + '.tflite')
print(modelpath)
path=basepath / 'Free_com_Celeb_croped'
print(path)
#path=Path.cwd() / 'Free_com_Celeb_St/test_faulty'
CelebFolders = next(os.walk(path))[1]
print(CelebFolders)
EMBEDS= | pd.read_csv('/home/base/Documents/Git/Projekte/CelebFaceMatcher/Embeddings/EMBEDDINGS_tf220_all_int8.csv') | pandas.read_csv |
"""
네이버의 게시물 목록을 크롤링
"""
import requests
import json
import re
from dateutil.parser import parse as date_parse
import pandas as pd
from pandas import DataFrame
import time
from urllib.parse import unquote_plus
total_count = 0
class LazyDecoder(json.JSONDecoder):
"""
https://stackoverflow.com/questions/65910282/jsondecodeerror-invalid-escape-when-parsing-from-python
JSONDecodeError; Invalid /escape 에 대한 조치
"""
def decode(self, s, **kwargs):
regex_replacements = [
(re.compile(r'([^\\])\\([^\\])'), r'\1\\\\\2'),
(re.compile(r',(\s*])'), r'\1'),
]
for regex, replacement in regex_replacements:
s = regex.sub(replacement, s)
return super().decode(s, **kwargs)
def read_list_in_category(blog_id: str, category_no, include_child=False) -> DataFrame:
"""
목록을 조회하는 기능.
:param blog_id: 블로그 아이디
:param category_no: 카테고리번호
:param include_child: 자식 카테고리 포함 여부 (기본값 False)
:return: DataFrame
"""
per_page = 30
df = read_list_in_category_per_page(blog_id, category_no,
current_page=1, count_per_page=per_page, include_child_category=include_child)
page_total_count = count_page(per_page)
if page_total_count >= 2:
for current_page in range(2, page_total_count+1):
current_df = read_list_in_category_per_page(blog_id, category_no,
current_page=current_page,
count_per_page=per_page, include_child_category=include_child)
df = | pd.concat([df, current_df], ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zohaib
This script merges Pangolin report (assigned lineages) with the
metadata file which allows data extraction and filtering based on
lineage information in nf-ncov-voc workflow.
"""
import argparse
import pandas as pd
import csv
def parse_args():
parser = argparse.ArgumentParser(
description='Merges pangolin output report and metadata file '
'using isolate as key')
parser.add_argument('--metadata', type=str, default=None,
help='Metadata file (.tsv) format')
parser.add_argument('--pangolin', type=str, default=None,
help='Pangolin report (.csv) format')
parser.add_argument('--output', type=str, default=None,
help='Metadata file (.tsv) format')
return parser.parse_args()
def write_metadata(dataframe):
dataframe.to_csv(args.output,
sep="\t",
quoting=csv.QUOTE_NONE,
index=False, header=True)
if __name__ == '__main__':
args = parse_args()
metadata_df = | pd.read_csv(args.metadata, sep="\t") | pandas.read_csv |
"""
Recipe Linter
QC checks (linter) for recipes, returning a TSV of issues identified.
The strategy here is to use simple functions that do a single check on
a recipe. When run on a single recipe it can be used for linting new
contributions; when run on all recipes it helps highlight entire classes of
problems to be addressed.
See the `lint_functions` module for these.
After writing the function, register it in the global ``registry`` dict,
``lint_functions.registry``.
The output is a TSV where the "info" column contains the dicts returned by
each check function, and this column is expanded into multiple extra colums.
While this results in a lot of NaNs, it makes it easy to drop non-interesting
cases with pandas, e.g.,
.. code:: python
recipes_with_missing_tests = df.dropna(subset=['no_tests'])
or
.. code:: python
def not_in_bioconda(x):
if not isinstance(x, set):
return np.nan
res = set(x).difference(['bioconda'])
if len(res):
return(res)
return np.nan
df['other'] = df.exists_in_channel.apply(not_in_bioconda)
other_channels = df[['recipe', 'other']].dropna()
TODO:
~~~~~
- check version and build number against master branch. I think there's stuff
in bioconductor updating to handle this sort of thing. Also bioconda_utils
has utils for checking against master branch.
- if version changed, ensure build number is 0
- if version unchanged, ensure build number incremented
- currently we don't pay attention to py27/py3. It would be nice to handle
that.
- how to define valid licenses?
(conda_build.metadata.ensure_valid_license_family is for family)
- gcc/llvm have their respective preprocessing selectors
- excessive comments (from skeletons?)
"""
import os
import re
import itertools
from collections import defaultdict, namedtuple
from typing import List
import pandas as pd
import numpy as np
import ruamel_yaml as yaml
from . import utils
from . import lint_functions
from .recipe import Recipe, RecipeError
import logging
logger = logging.getLogger(__name__)
usage = """
Perform various checks on recipes.
"""
def select_recipes(packages, git_range, recipe_folder, config_filename, config, force):
if git_range:
modified = utils.modified_recipes(git_range, recipe_folder, config_filename)
if not modified:
logger.info('No recipe modified according to git, exiting.')
return []
# Recipes with changed `meta.yaml` or `build.sh` files
changed_recipes = [
os.path.dirname(f) for f in modified
if os.path.basename(f) in ['meta.yaml', 'build.sh'] and
os.path.exists(f)
]
logger.info(
'Recipes to consider according to git: \n{}'.format(
'\n '.join(changed_recipes)))
else:
changed_recipes = []
blacklisted_recipes = utils.get_blacklist(config['blacklists'], recipe_folder)
selected_recipes = list(utils.get_recipes(recipe_folder, packages))
_recipes = []
for recipe in selected_recipes:
stripped = os.path.relpath(recipe, recipe_folder)
if stripped in blacklisted_recipes and recipe in changed_recipes:
logger.warning('%s is blacklisted but also has changed. Consider '
'removing from blacklist if you want to build it', recipe)
if force:
_recipes.append(recipe)
logger.debug('forced: %s', recipe)
continue
if stripped in blacklisted_recipes:
logger.debug('blacklisted: %s', recipe)
continue
if git_range:
if recipe not in changed_recipes:
continue
_recipes.append(recipe)
logger.debug(recipe)
logger.info('Recipes to lint:\n{}'.format('\n '.join(_recipes)))
return _recipes
class LintArgs(namedtuple('LintArgs', (
'exclude', 'registry',
))):
"""
exclude : list
List of function names in ``registry`` to skip globally. When running on
CI, this will be merged with anything else detected from the commit
message or LINT_SKIP environment variable using the special string
"[skip lint <function name> for <recipe name>]". While those other
mechanisms define skipping on a recipe-specific basis, this argument
can be used to skip tests for all recipes. Use sparingly.
registry : list or tuple
List of functions to apply to each recipe. If None, defaults to
`bioconda_utils.lint_functions.registry`.
"""
def __new__(cls, exclude=None, registry=None):
return super().__new__(cls, exclude, registry)
def lint(recipes: List[str], lint_args, basedir="recipes"):
"""
Parameters
----------
recipes : list
List of recipes to lint
lint_args : LintArgs
"""
exclude = lint_args.exclude
registry = lint_args.registry
if registry is None:
registry = lint_functions.registry
skip_dict = defaultdict(list)
commit_message = ""
if 'LINT_SKIP' in os.environ:
# Allow overwriting of commit message
commit_message = os.environ['LINT_SKIP']
else:
# Obtain commit message from last commit.
commit_message = utils.run(
['git', 'log', '--format=%B', '-n', '1'], mask=False
).stdout
# For example the following text in the commit message will skip
# lint_functions.uses_setuptools for recipe argparse:
#
# [ lint skip uses_setuptools for argparse ]
skip_re = re.compile(
r'\[\s*lint skip (?P<func>\w+) for (?P<recipe>.*?)\s*\]')
to_skip = skip_re.findall(commit_message)
if exclude is not None:
# exclude arg is used to skip test for *all* packages
to_skip += list(itertools.product(exclude, recipes))
for func, recipe in to_skip:
skip_dict[recipe].append(func)
hits = []
for recipe in sorted(recipes):
logger.debug("Linting: %s", recipe)
try:
recipe_obj = Recipe.from_file(basedir, recipe)
except RecipeError as exc:
result = {'load_recipe': str(exc), 'fix': str(exc)}
line = getattr(exc, 'line', None)
if line is not None:
result['start_line'] = result['end_line'] = line
hits.append({
'recipe': recipe,
'check': 'load_recipe',
'severity': 'ERROR',
'info': result
})
continue
# Since lint functions need a parsed meta.yaml, checking for parsing
# errors can't be a lint function.
#
# TODO: do we need a way to skip this the same way we can skip lint
# functions? I can't think of a reason we'd want to keep an unparseable
# YAML.
metas = []
try:
for platform in ["linux", "osx"]:
config = utils.load_conda_build_config(platform=platform, trim_skip=False)
metas.extend(utils.load_all_meta(recipe, config=config, finalize=False))
except (
yaml.scanner.ScannerError, yaml.constructor.ConstructorError, SystemExit
) as exc:
hits.append({
'recipe': recipe,
'check': 'parse_error',
'severity': 'ERROR',
'info': {'parse_error': str(exc), 'fix': str(exc), '_exc': type(exc),
'test1': getattr(exc, 'code', None), 'test2': exc.args }
})
continue
# skips defined in commit message
skip_for_this_recipe = set(skip_dict[recipe])
# skips defined in meta.yaml
for meta in metas:
persistent = meta.get_value('extra/skip-lints', [])
skip_for_this_recipe.update(persistent)
for func in registry:
if func.__name__ in skip_for_this_recipe:
skip_sources = [
('Commit message', skip_dict[recipe]),
('skip-lints', persistent),
]
for source, skips in skip_sources:
if func.__name__ not in skips:
continue
logger.info('%s defines skip lint test %s for recipe %s',
source, func.__name__, recipe)
continue
result = func(recipe_obj, metas)
if result:
hits.append(
{'recipe': recipe,
'check': func.__name__,
'info': result})
if hits:
report = pd.DataFrame(hits)[['recipe', 'check', 'info']]
# expand out the info into more columns
info = pd.DataFrame(list(report['info'].values))
report = | pd.concat((report, info), axis=1) | pandas.concat |
"""
Issue 01
===========================
"""
# Import
import pandas as pd
# DataBlend library
from datablend.core.blend.blender import Blender
from datablend.core.widgets.format import FullTemplateWidget
from datablend.core.widgets.format import RenameWidget
# ------------------------
# Constants
# ------------------------
# Template
template = [
{'from_name': 'StudyNo', 'to_name': 'study_number'},
{'from_name': 'DateEnrol', 'to_name': 'date_enrol'},
{'from_name': 'TimeEnrol', 'to_name': 'time_enrol'},
{'from_name': 'DateIllness', 'to_name': 'date_illness'},
{'from_name': 'TimeIllness', 'to_name': 'time_illness'},
{'from_name': 'MucosalBlHist',
'to_name': 'bleeding_mucosal',
'to_replace': {True:1, False: 2},
'timestamp': 'date_onset'},
{'from_name': 'SkinBlHist',
'to_name': 'bleeding_skin',
'to_replace': {True: 1, False: 2},
'timestamp': 'date_onset'},
{'from_name': 'MucosalBlExam',
'to_name': 'bleeding_mucosal',
'to_replace': {True: True, False: False},
'timestamp': 'date_enrolment'},
{'from_name': 'SkinBlExam',
'to_name': 'bleeding_skin',
'to_replace': {True: True, False: False},
'timestamp': 'date_enrolment'},
{'from_name': 'date_enrolment',
'to_name': 'date_enrolment',
'datetime_date': 'DateEnrol',
'datetime_time': 'TimeEnrol',
'event': 'event_enrolment'},
{'from_name': 'date_onset',
'to_name': 'date_onset',
'datetime_date': 'DateIllness',
'datetime_time': 'TimeIllness',
'event': 'event_onset'}
]
# Data
data = [
{'StudyNo': '1-0016',
'DateEnrol': '10/25/10 12:00 AM',
'TimeEnrol': '11:05',
'DateIllness': '10/24/10 12:00 AM',
'TimeIllness': '14:00',
'MucosalBlHist': 1,
'SkinBlHist': 1,
'MucosalBlExam': False,
'SkinBlExam': False},
{'StudyNo': '1-0099',
'DateEnrol': '12/3/10 12:00 AM',
'TimeEnrol': '15:10',
'DateIllness': '12/3/10 12:00 AM',
'TimeIllness': '14:00',
'MucosalBlHist': 2,
'SkinBlHist': 2,
'MucosalBlExam': False,
'SkinBlExam': True},
]
# ISSUE 01: TO FIX
# ----------------
# It is necessary here to convert the template
# and the data to dataframes before passing to
# the blender object. This should be done auto,
# just check whether it is a list, use pandas and
# verify that it is a valid BlenderTemplate
template = | pd.DataFrame(template) | pandas.DataFrame |
### HI_Waterbird_Repro_DataJoinMerge_v3.py
### Version: 5/7/2020
### Author: <NAME>, <EMAIL>, (503) 231-6839
### Abstract: This Python 3 script pulls data from the HI Waterbirds Reproductive Success ArcGIS Online feature service and performs joins and merges to result in a combined CSV dataset.
import arcpy
import pandas as pd
from arcgis import GIS
import time, os, fnmatch, shutil
### ArcGIS Online stores date-time information in UTC by default. This function converts time zones and can be used to convert from UTC ("UTC") to Hawaii standard time ("US/Hawaii"; UTC -10).
from datetime import datetime
from pytz import timezone
def change_timezone_of_field(df, source_date_time_field, new_date_time_field, source_timezone, new_timezone):
"""Returns the values in *source_date_time_field* with its timezone converted to a new timezone within a new field *new_date_time_field*
: param df: The name of the spatially enabled or pandas DataFrame containing datetime fields
: param source_date_time_field: The name of the datetime field whose timezone is to be changed
: param new_date_time_field: The name of the new datetime field
: param source_timezone: The name of the source timezone
: param new_timezone: The name of the converted timezone. For possible values, see https://gist.github.com/heyalexej/8bf688fd67d7199be4a1682b3eec7568
"""
# Define the source timezone in the source_date_time_field
df[source_date_time_field] = df[source_date_time_field].dt.tz_localize(source_timezone)
# Convert the datetime in the source_date_time_field to the new timezone in a new field called new_date_time_field
df[new_date_time_field] = df[source_date_time_field].dt.tz_convert(new_timezone)
### Allow authentication via login to U.S. Fish & Wildlife Service ArcGIS Online account via ArcGIS Pro
gis = GIS("pro")
### Enter path for local file saving
# uncomment next line to use ArcGIS interface, otherwise hard coding out_workspace
out_workspace = arcpy.GetParameterAsText(0)
# out_workspace = "C:/Users/kso/Desktop/"
### Paths to ArcGIS Online data
# To populate Service ItemId, go to Feature Service webpage and in bottom right corner, click on the View link.
# Current Feature Service webpage: https://fws.maps.arcgis.com/home/item.html?id=55275a4a0dc54c1c8dcab604b65a88f0
ServiceItemID = gis.content.get("55275a4a0dc54c1c8dcab604b65a88f0")
### There are separate methods for pulling spatial versus non-spatial data into Python. Spatial layers will become Spatially Enabled DataFrame objects. Non-spatial data will become regular pandas DataFrame objects.
## Define variables pointing to spatial layers
NestLocationLyr = ServiceItemID.layers[0]
BroodLocationLyr = ServiceItemID.layers[1]
CountUnitsLyr = ServiceItemID.layers[2]
## Create Spatially Enabled DataFrame objects
sedfNestLocation = pd.DataFrame.spatial.from_layer(NestLocationLyr)
sedfBroodLocation = | pd.DataFrame.spatial.from_layer(BroodLocationLyr) | pandas.DataFrame.spatial.from_layer |
import pandas as pd
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from bs4 import BeautifulSoup
import random
import time
import logging
def calculate_pagecount(total_items):
if int(total_items)%100 ==0:
pages = int(total_items)//100
else:
pages = (int(total_items)//100)+1
return pages
retry_strategy = Retry(
total=10,
backoff_factor=1,
respect_retry_after_header=True,
status_forcelist=[429, 413, 502, 500],
)
adapter = HTTPAdapter(max_retries=retry_strategy)
session = requests.Session()
session.mount("https://", adapter)
session.mount("http://", adapter)
# use the session object to make requests
#response = session.get(url)
logging.basicConfig(filename='api_scraper_2.0.log',
level=logging.INFO)
boardgames_raw = pd.read_csv('../data/boardgames.csv')
boardgames = pd.read_csv('../data/boardgames_extend.csv')
boardgames['id'] = boardgames_raw['id']
boardgames = boardgames.set_index('id')
#boardgames['categories'] = None
#boardgames['mechanics'] = None
#boardgames['family'] = None
#boardgames['expansions'] = None
#boardgames['integrations'] = None
#boardgames['designers'] = None
#boardgames['publishers'] = None
restart = True
restart_page = 40
#restart_id = 463
step_width = 100
start_id = 245934
start_index = boardgames.index.tolist().index(start_id)
end_index = start_index + step_width
boardgame_list = []
while start_index < len(boardgames.index):
boardgame_list = boardgames.index[start_index:end_index].astype('str').tolist()
ID = ','.join(boardgame_list)
logging.info(f'movie id: {ID}')
page = 1
# request page 1 of game overview with user ratings and comments
api_adress = f"https://www.boardgamegeek.com/xmlapi2/thing?id={ID}&ratingcomments=1&page={page}"
response = session.get(api_adress)
logging.info(f'status {response.status_code} for {api_adress}')
xml = response.text
soup = BeautifulSoup(xml, 'xml')
maxitems = 0
for item in soup.find_all('item'):
boardgame_id = int(item.get('id'))
ratings_page = int(item.find('comments').get('page'))
logging.info(f'boardgame id: {boardgame_id}, page: {ratings_page}')
total_items = int(item.find('comments').get('totalitems'))
if total_items > maxitems:
maxitems = total_items
if restart == False:
categories = [link.get('value') for link in item.find_all('link', attrs={'type':'boardgamecategory'})]
boardgames.loc[boardgame_id,'categories'] = ', '.join(categories)
mechanics = [link.get('value') for link in item.find_all('link', attrs={'type':'boardgamemechanic'})]
boardgames.loc[boardgame_id,'mechanics'] = ', '.join(mechanics)
family = [link.get('value') for link in item.find_all('link', attrs={'type':'boardgamefamily'})]
boardgames.loc[boardgame_id,'family'] = ', '.join(family)
expansions = [link.get('value') for link in item.find_all('link', attrs={'type':'boardgameexpansion'})]
boardgames.loc[boardgame_id,'expansions'] = ', '.join(expansions)
integrations = [link.get('value') for link in item.find_all('link', attrs={'type':'boardgameintegration'})]
boardgames.loc[boardgame_id,'integrations'] = ', '.join(integrations)
designers = [link.get('value') for link in item.find_all('link', attrs={'type':'boardgamedesigner'})]
boardgames.loc[boardgame_id,'designers'] = ', '.join(designers)
publishers = [link.get('value') for link in item.find_all('link', attrs={'type':'boardgamepublisher'})]
boardgames.loc[boardgame_id,'publishers'] = ', '.join(publishers)
# update number of voters
boardgames.loc[boardgame_id, 'num_voters'] = total_items
boardgames.to_csv('../data/boardgames_extend.csv', index=False)
#initialize lists and dataframe
boardgame_ids = []
user_ratings = []
users = []
user_comments = []
ratings = | pd.DataFrame() | pandas.DataFrame |
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import linkage, dendrogram
from scipy.spatial import distance
from matplotlib import rcParams
from numpy.random import seed
seed(123)
from scipy.stats.mstats import spearmanr
from scipy.stats.mstats import pearsonr
metric = 'euclidean'
method = 'ward'
test = False
compare = False
if not test:
plt.switch_backend('agg')
# Plot from scratch
def plot_RSA(output_dir, categories, layer= None, layer_name='lstm_3', amount_sent=None):
RSA = []
start = amount_sent
# corr_method = 'Pearson'
df0= layer[0:amount_sent] #sentences from first category
df0 = pd.DataFrame.transpose(df0) #result: vector_len x amount_sent, eg, 100x1000
# Create pair-wise correlation matrix between sentences from first category and second category
print('making RSA_arr...')
for cat in range(len(categories)):
row = []
for sent in range(start,layer.shape[0],amount_sent): #sentences from second category
df = layer[sent:sent+amount_sent]
df = pd.DataFrame.transpose(df)
df0.columns = df.columns
df_corr = df.corrwith(df0, axis=0, drop=False) #corr sentences from two categories TODO: Spearman, will make a difference
# df_mean1 = spearmanr(df.values.flatten(), df0.values.flatten())[0]
# df_mean2 = pearsonr(np.array(df).flatten('F'), np.array(df0).flatten('F'))[0]
df_mean = df_corr.mean().mean() #mean of correlations between sentences of two categories.
row.append(df_mean) # single value
df0 = layer[start:start+ amount_sent]
df0 = pd.DataFrame.transpose(df0)
start +=amount_sent
RSA.append(row)
# insert 0s in the begining
RSA_copy = RSA[:]
for cat in range(len(categories)):
zeros = [0] * (cat + 1)
RSA_copy[cat] = zeros + RSA_copy[cat]
# Create diagonal by correlating with itself
RSA_copy2 = RSA_copy[:]
start = 0
cat = 0
for sent in range(start, layer.shape[0], amount_sent):
sentences = layer[sent:sent + amount_sent]
df = pd.DataFrame(sentences)
df = pd.DataFrame.transpose(df)
df_corr = df.corrwith(df, axis=0) #TODO: Spearman, but won't make a difference
df_mean = df_corr.mean().mean()
RSA_copy2[cat][cat] = df_mean
cat += 1
print('Done making RSA_arr.')
RSA_arr = np.array(RSA_copy)
# copy upper triangle to bottom triangle
for i in range(len(categories)):
for j in range(i, len(categories)):
RSA_arr[j][i] = RSA_arr[i][j]
df = pd.DataFrame(RSA_arr, columns=categories, index=categories)
df.to_csv(output_dir +'RSA_arr_' + layer_name, index=False)
# np.save(output_dir +'RSA_arr_' + layer_name, RSA_arr)
correlations = pd.DataFrame(RSA_arr[:], columns=categories, index=categories)
correlations_array = np.asarray(RSA_arr)
row_linkage = linkage(distance.pdist(correlations_array, metric=metric), method=method,
optimal_ordering=True)
col_linkage = linkage(distance.pdist(correlations_array.T, metric=metric), method=method,
optimal_ordering=True)
sns.set(font_scale=0.5)
cg = sns.clustermap(correlations, row_linkage=row_linkage, col_linkage=col_linkage, cmap="RdBu_r", vmin=-1.,
vmax=1., cbar_kws={"ticks": [-1., -0.5, 0.0, 0.5, 1.]})
# cg = sns.clustermap(df, method='ward', cmap="RdBu_r",vmin = -0.8, vmax=0.8, cbar_kws={"ticks":[-0.8,-0.4,0.0, 0.4, 0.8]})
#
plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.setp(cg.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
cg.savefig(output_dir + 'RSA_ward_'+ layer_name + '.eps', format='eps', dpi=100)
rcParams['lines.linewidth'] = 0.7
plt.figure(figsize=(9, 8))
dendrogram(row_linkage, orientation='left', labels=np.array(categories),
leaf_font_size=2)
plt.savefig(output_dir + 'dendrogram_'+ layer_name + '.eps', format='eps', dpi=100)
return
# Plot from saved RSM
def plot_rsm(path_to_dir, RSA_arr, categories, layer_name='lstm_1'):
# layer_name is just used to define the output layer name
RSA_arr1 = np.load(path_to_dir+RSA_arr)
df = | pd.DataFrame(RSA_arr1, columns=categories, index=categories) | pandas.DataFrame |
# This script performs the time series analyses on the cleaned twitter data and generates wordclouds
# Importing required modules
import pandas as pd
import numpy as np
from pylab import rcParams
from statsmodels.tsa.api import VAR
from statsmodels.tsa.stattools import adfuller
from matplotlib import pyplot as plt
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from wordcloud import WordCloud
# Specifying your username -- you will need to update filepaths according to your setup
username = ''
# Reading in the data
doge = pd.read_csv('C:/Users/' + username + '/Documents/Data/dogefather/dogecoin.csv')
s1snl = pd.read_csv('C:/Users/' + username + '/Documents/Data/dogefather/sentiment_snl.csv')
s2snl = pd.read_csv('C:/Users/' + username + '/Documents/Data/dogefather/sentiment2_snl.csv')
s1doge = | pd.read_csv('C:/Users/' + username + '/Documents/Data/dogefather/sentiment_doge.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from itertools import chain
from tools.tables import read_csv
def merge_grants(output):
print('Merging all grant data')
grant = read_csv(f'{output}/grant_grant.csv').set_index('patnum')
firm = read_csv(f'{output}/grant_firm.csv').set_index('patnum')
cite = read_csv(f'{output}/cite_stats.csv').set_index('patnum')
assign = read_csv(f'{output}/assign_stats.csv').set_index('patnum')
maint = read_csv(f'{output}/maint.csv').set_index('patnum')
grant = grant.join(firm)
grant = grant.join(cite)
grant = grant.join(assign)
grant = grant.join(maint)
fill_cols = ['n_cited', 'n_citing', 'n_self_cited', 'n_trans', 'claims']
grant[fill_cols] = grant[fill_cols].fillna(0).astype(np.int)
int_cols = ['firm_num', 'last_maint']
grant[int_cols] = grant[int_cols].astype('Int64')
grant.drop('abstract', axis=1).to_csv(f'{output}/grant_info.csv')
grant[['title', 'abstract']].to_csv(f'{output}/grant_text.csv')
def generate_firmyear(output, compustat=False):
print('Generating all firm-years')
total = []
# patent applications
apply = read_csv(f'{output}/apply_apply.csv', usecols=['appnum', 'appdate'])
apply_firm = read_csv(f'{output}/apply_firm.csv').set_index('appnum')
apply = apply.join(apply_firm, on='appnum', how='inner')
apply['appyear'] = apply['appdate'].str.slice(0, 4).astype(np.int)
apply_fy = apply.groupby(['firm_num', 'appyear']).size().rename('n_apply')
apply_fy = apply_fy.rename_axis(index={'appyear': 'year'})
total.append(apply_fy)
# patent grants
grant = read_csv(f'{output}/grant_info.csv', usecols=['patnum', 'pubdate', 'n_cited', 'n_citing', 'n_self_cited'])
grant_firm = read_csv(f'{output}/grant_firm.csv').set_index('patnum')
grant = grant.dropna(subset=['pubdate'], axis=0)
grant['pubyear'] = grant['pubdate'].str.slice(0, 4).astype(np.int)
grant = grant.join(grant_firm, on='patnum', how='inner')
grant_groups = grant.groupby(['firm_num', 'pubyear'])
grant_fy = grant_groups[['n_cited', 'n_citing', 'n_self_cited']].sum()
grant_fy['n_grant'] = grant_groups.size()
grant_fy = grant_fy.rename_axis(index={'pubyear': 'year'})
total.append(grant_fy)
# patent assignments
assign = read_csv(f'{output}/assign_use.csv', usecols=['assignid', 'execdate'])
assignor_firm = read_csv(f'{output}/assignor_firm.csv').set_index('assignid')
assignee_firm = read_csv(f'{output}/assignee_firm.csv').set_index('assignid')
assign = assign.join(assignor_firm.add_prefix('assignor_'), on='assignid', how='inner')
assign = assign.join(assignee_firm.add_prefix('assignee_'), on='assignid', how='inner')
assign['execyear'] = assign['execdate'].str.slice(0, 4).astype(np.int)
assignor_fy = assign.groupby(['assignor_firm_num', 'execyear']).size().rename('n_source')
assignor_fy = assignor_fy.rename_axis(index={'assignor_firm_num': 'firm_num', 'execyear': 'year'})
total.append(assignor_fy)
assignee_fy = assign.groupby(['assignee_firm_num', 'execyear']).size().rename('n_dest')
assignee_fy = assignee_fy.rename_axis(index={'assignee_firm_num': 'firm_num', 'execyear': 'year'})
total.append(assignee_fy)
# compustat firms
if compustat:
compu = read_csv(f'{output}/compustat.csv')
compu_firm = read_csv(f'{output}/compustat_firm.csv').set_index('compid')
compu = compu.join(compu_firm, on='compid', how='inner')
compu_fy = compu.groupby(['firm_num', 'year'])[['assets', 'capx', 'cash', 'cogs', 'deprec', 'income', 'employ', 'intan', 'debt', 'revenue', 'sales', 'rnd', 'fcost', 'mktval']].sum()
ind_info = compu.groupby(['firm_num', 'year'])[['naics', 'sic']].first()
compu_fy = compu_fy.join(ind_info)
total.append(compu_fy)
# comprehensive
total = | pd.concat(total, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : <NAME>
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import requests
import pandas as pd
import datetime
import numpy as np
import os
from matplotlib import pyplot
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import calendar
def get_extracted_data_from_restful(ms, year, parcel_id, api_user, api_pass, tstype,
ptype):
was_error = False
if ms == "be-wa":
ms = "bewa"
if ptype == "":
url = "http://cap.users.creodias.eu/query/parcelTimeSeries?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&tstype=" + tstype + "&scl=True&ref=True"
else:
url = "http://cap.users.creodias.eu/query/parcelTimeSeries?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&ptype=" + ptype + "&tstype=" + tstype + "&scl=True&ref=True"
print(url)
# response = requests.get(url, auth=(api_user, api_pass))
try:
response = requests.get(url, auth=(api_user, api_pass))
print(response)
if response.status_code == 404 or response.status_code == 500:
was_error = True
df = pd.DataFrame()
else:
df = pd.read_json(response.text)
if not df.empty:
if tstype == "c6":
df['date_part']=df['date_part'].map(lambda e: datetime.datetime.fromtimestamp(e))
df['orbit'] = df['date_part'].apply(lambda s: 'D' if s.hour < 12 else 'A')
df['date'] = df['date_part'].apply(lambda s: s.date())
if tstype == "bs":
df['date_part']=df['date_part'].map(lambda e: datetime.datetime.fromtimestamp(e))
df['orbit'] = df['date_part'].apply(lambda s: 'D' if s.hour < 12 else 'A')
df['date'] = df['date_part'].apply(lambda s: s.date())
# convert backscatters to decibels
# df['mean'] = df['mean'].map(lambda s: 10.0*np.log10(s))
else:
# create an Empty DataFrame object
df = pd.DataFrame()
except requests.exceptions.HTTPError as errh:
was_error = True
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
was_error = True
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
was_error = True
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
was_error = True
print ("OOps: Something Else",err)
if was_error:
df = pd.DataFrame()
return url, df, was_error
def get_extracted_data_from_db(host : str, port : str, dbname : str, user : str, password : str, sql_select : str) -> pd.DataFrame :
"""
Summary :
This function connects with the database and run the SQL query that is passed as a parameter.
The query extracts time series of sentinel data formatted according to the marker detection tool requirements.
A single, a subset or the whole set of parcels is retrieved according to the SQL based on the parameters set by the user.
The result is stored in a dataframe and an index is set using the db id and the timestamp of the image.
Database creadential are needed (that is different from Restful).
Arguments :
host - IP address of the database server
port - port of the database (usually, 5432)
dbname - name of the database where data is stored
user - database user (with access privilege to the parcel, hist, sigs and metadata tables)
password - <PASSWORD>
sql_select - sql query that retrive the desired data. It is passed as a parament by the function that calls get_extracted_data_from_db.
Returns :
A data frame with all sentinel data ready to be used by the preprocessing and marker detection modules.
"""
# I connect with the db and check id the connection works fine
conn = None
try:
conn = psycopg2.connect(host=host, port=port, dbname= dbname, user= user, password= password)
print("Connection to DB established")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
# I execute the query and copy the data it in a panda dataframe that
# I create with the same colums returned by the SQL statement
cur = conn.cursor()
cur.execute(sql_select)
data_ts = cur.fetchall()
col = []
for x in cur.description:
col.append(x[0])
ts_db = pd.DataFrame(data=data_ts, columns = col)
# I close the connection
cur.close()
conn.close()
# I set the index (parcel id as in the db + datetime of the images)
ts_db.set_index(['db_id', 'obstime'], inplace=True, verify_integrity=True)
return ts_db
def get_parcel_data_from_restful(ms, year, parcel_id, api_user, api_pass, ptype):
was_error = False
if ms == "be-wa":
ms = "bewa"
if ptype == "":
url = "http://cap.users.creodias.eu/query/parcelById?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&withGeometry=True"
else:
url = "http://cap.users.creodias.eu/query/parcelById?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&ptype=" + ptype + "&withGeometry=True"
print(url)
try:
response = requests.get(url, auth=(api_user, api_pass))
print(response)
if response.status_code == 404 or response.status_code == 500:
was_error = True
df = pd.DataFrame()
else:
df = pd.read_json(response.text)
except requests.exceptions.HTTPError as errh:
was_error = True
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
was_error = True
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
was_error = True
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
was_error = True
print ("OOps: Something Else",err)
if was_error:
df = | pd.DataFrame() | pandas.DataFrame |
# !/usr/bin/env python
# coding: utf-8
"""
Some utility functions aiming to analyse OSM data
"""
import datetime as dt
from datetime import timedelta
import re
import math
import numpy as np
import pandas as pd
import statsmodels.api as sm
from osmdq.extract_user_editor import editor_name
### OSM data exploration ######################
def updatedelem(data):
"""Return an updated version of OSM elements
Parameters
----------
data: df
OSM element timeline
"""
updata = data.groupby(['elem','id'])['version'].max().reset_index()
return pd.merge(updata, data, on=['id','version'])
def datedelems(history, date):
"""Return an updated version of history data at date
Parameters
----------
history: df
OSM history dataframe
date: datetime
date in datetime format
"""
datedelems = (history.query("ts <= @date")
.groupby(['elem','id'])['version']
.max()
.reset_index())
return pd.merge(datedelems, history, on=['elem','id','version'])
def osm_stats(osm_history, timestamp):
"""Compute some simple statistics about OSM elements (number of nodes,
ways, relations, number of active contributors, number of change sets
Parameters
----------
osm_history: df
OSM element up-to-date at timestamp
timestamp: datetime
date at which OSM elements are evaluated
"""
osmdata = datedelems(osm_history, timestamp)
nb_nodes = len(osmdata.query('elem=="node"'))
nb_ways = len(osmdata.query('elem=="way"'))
nb_relations = len(osmdata.query('elem=="relation"'))
nb_users = osmdata.uid.nunique()
nb_chgsets = osmdata.chgset.nunique()
return [nb_nodes, nb_ways, nb_relations, nb_users, nb_chgsets]
def osm_chronology(history, start_date, end_date=dt.datetime.now()):
"""Evaluate the chronological evolution of OSM element numbers
Parameters
----------
history: df
OSM element timeline
"""
timerange = pd.date_range(start_date, end_date, freq="1M").values
osmstats = [osm_stats(history, str(date)) for date in timerange]
osmstats = pd.DataFrame(osmstats, index=timerange,
columns=['n_nodes', 'n_ways', 'n_relations',
'n_users', 'n_chgsets'])
return osmstats
### OSM metadata extraction ####################
def group_count(metadata, data, grp_feat, res_feat, namesuffix):
"""Group-by 'data' by 'grp_feat' and element type features, count element
corresponding to each grp_feat-elemtype tuples and merge them into metadata
table
Parameters
----------
metadata: df
Dataframe that will integrate the new features
data: df
Dataframe from where information is grouped
grp_feat: object
string that indicates which feature from 'data' must be used to group items
res_feat: object
string that indicates the measured feature (how many items correspond
to the criterion)
namesuffix: object
string that ends the new feature name
"""
md_ext = (data.groupby([grp_feat, 'elem'])[res_feat]
.count()
.unstack()
.reset_index()
.fillna(0))
md_ext['elem'] = md_ext[['node','relation','way']].apply(sum, axis=1)
md_ext = md_ext[[grp_feat, 'elem', 'node', 'way', 'relation']]
colnames = "n_" + md_ext.columns.values[-4:] + namesuffix
md_ext.columns = [grp_feat, *colnames]
return pd.merge(metadata, md_ext, on=grp_feat, how='outer').fillna(0)
def group_nunique(metadata, data, grp_feat, res_feat, namesuffix):
"""Group-by 'data' by 'grp_feat' and element type features, count unique
element corresponding to each grp_feat-elemtype tuples and merge them into
metadata table
Parameters
----------
metadata: df
Dataframe that will integrate the new features
data: df
Dataframe from where information is grouped
grp_feat: object
string that indicates which feature from 'data' must be used to group items
res_feat: object
string that indicates the measured feature (how many items correspond
to the criterion)
namesuffix: object
string that ends the new feature name
"""
md_ext = (data.groupby([grp_feat, 'elem'])[res_feat]
.nunique()
.unstack()
.reset_index()
.fillna(0))
md_ext['elem'] = md_ext[['node','relation','way']].apply(sum, axis=1)
md_ext = md_ext[[grp_feat, 'elem', 'node', 'way', 'relation']]
colnames = "n_" + md_ext.columns.values[-4:] + namesuffix
md_ext.columns = [grp_feat, *colnames]
return pd.merge(metadata, md_ext, on=grp_feat, how='outer').fillna(0)
def group_stats(metadata, data, grp_feat, res_feat, nameprefix, namesuffix):
"""Group-by 'data' by 'grp_feat' and element type features, compute basic
statistic features (first and ninth deciles) corresponding to each
grp_feat-elemtype tuples and merge them into metadata table; need
Pandas >= 0.20
Parameters
----------
metadata: df
Dataframe that will integrate the new features
data: df
Dataframe from where information is grouped
grp_feat: object
string that indicates which feature from 'data' must be used to group items
res_feat: object
string that indicates the measured feature (how many items correspond
to the criterion)
nameprefix: object
string that begins the new feature name
namesuffix: object
string that ends the new feature name
"""
md_ext = (data.groupby(grp_feat)[res_feat]
.quantile(q=[0.1, 0.9])
.unstack()
.reset_index())
colnames = [nameprefix + str(int(100*op)) + namesuffix
for op in md_ext.columns.values[1:]]
md_ext.columns = [grp_feat, *colnames]
return pd.merge(metadata, md_ext, on=grp_feat, how='outer').fillna(0)
def init_metadata(osm_elements, init_feat, timeunit='1d'):
""" This function produces an init metadata table based on 'init_feature'
in table 'osm_elements'. The intialization consider timestamp measurements
(generated for each metadata tables, i.e. elements, change sets and users).
Parameters
----------
osm_elements: pd.DataFrame
OSM history data
duration_feat: object
metadata duration feature name in string format
timeunit: object
time unit in which 'duration_feature' will be expressed
"""
metadata = (osm_elements.groupby(init_feat)['ts']
.agg(["min", "max"])
.reset_index())
metadata.columns = [*init_feat, 'first_at', 'last_at']
metadata['lifespan'] = ((metadata.last_at - metadata.first_at)
/ pd.Timedelta(timeunit))
extraction_date = osm_elements.ts.max()
metadata['n_inscription_days'] = ((extraction_date - metadata.first_at)
/ pd.Timedelta('1d'))
metadata['n_activity_days'] = (osm_elements
.groupby(init_feat)['ts']
.nunique()
.reset_index())['ts']
return metadata.sort_values(by=['first_at'])
def enrich_osm_elements(osm_elements):
"""Enrich OSM history data by computing additional features
Parameters
----------
osm_elements: pd.DataFrame
OSM history data
"""
# Extract information from first and last versions
osmelem_first_version = (osm_elements
.groupby(['elem','id'])['version', 'uid']
.first()
.reset_index())
osm_elements = pd.merge(osm_elements, osmelem_first_version,
on=['elem','id'])
osm_elements.columns = ['elem', 'id', 'version', 'visible', 'ts',
'uid', 'chgset', 'vmin', 'first_uid']
osmelem_last_version = (osm_elements
.groupby(['elem','id'])['version', 'uid',
'visible']
.last()
.reset_index())
osm_elements = pd.merge(osm_elements, osmelem_last_version,
on=['elem','id'])
osm_elements.columns = ['elem', 'id', 'version', 'visible', 'ts', 'uid',
'chgset', 'vmin', 'first_uid', 'vmax', 'last_uid',
'available']
osmelem_last_bychgset = (osm_elements
.groupby(['elem','id','chgset'])['version',
'visible']
.last()
.reset_index())
osm_elements = pd.merge(osm_elements,
osmelem_last_bychgset[['elem', 'id',
'chgset', 'visible']],
on=['elem','id', 'chgset'])
osm_elements.columns = ['elem', 'id', 'version', 'visible', 'ts', 'uid',
'chgset', 'vmin', 'first_uid', 'vmax', 'last_uid',
'available', 'open']
# New version-related features
osm_elements['init'] = osm_elements.version == osm_elements.vmin
osm_elements['up_to_date'] = osm_elements.version == osm_elements.vmax
osm_elements = osm_elements.drop(['vmin'], axis=1)
osmelem_first_bychgset = (osm_elements
.groupby(['elem','id','chgset'])['version', 'init']
.first()
.reset_index())
osm_elements = pd.merge(osm_elements,
osmelem_first_bychgset[['elem', 'id',
'chgset', 'init']],
on=['elem','id','chgset'])
osm_elements.columns = ['elem', 'id', 'version', 'visible', 'ts', 'uid',
'chgset', 'first_uid', 'vmax', 'last_uid',
'available', 'open', 'init', 'up_to_date',
'created']
# Whether or not an element will be corrected in the last version
osm_elements['willbe_corr'] = np.logical_and(osm_elements.id.diff(-1)==0,
osm_elements.uid.diff(-1)!=0)
osm_elements['willbe_autocorr'] = np.logical_and(osm_elements.id.diff(-1)==0,
osm_elements.uid
.diff(-1)==0)
# Time before the next modification
osm_elements['nextmodif_in'] = - osm_elements.ts.diff(-1)
osm_elements.loc[osm_elements.up_to_date,['nextmodif_in']] = pd.NaT
osm_elements.nextmodif_in = (osm_elements.nextmodif_in
.astype('timedelta64[D]'))
# Time before the next modification, if it is done by another user
osm_elements['nextcorr_in'] = osm_elements.nextmodif_in
osm_elements['nextcorr_in'] = (osm_elements.nextcorr_in
.where(osm_elements.willbe_corr,
other=pd.NaT))
# Time before the next modification, if it is done by the same user
osm_elements['nextauto_in'] = osm_elements.nextmodif_in
osm_elements['nextauto_in'] = (osm_elements.nextauto_in
.where(osm_elements.willbe_autocorr,
other=pd.NaT))
return osm_elements
def extract_elem_metadata(osm_elements, user_groups, drop_ts=True):
""" Extract element metadata from OSM history data
Parameters
----------
osm_elements: pd.DataFrame
OSM history data
Return
------
elem_md: pd.DataFrame
Change set metadata with timestamp information, version-related features
and number of unique change sets (resp. users)
"""
elem_md = init_metadata(osm_elements, ['elem','id'])
elem_md['version'] = (osm_elements.groupby(['elem','id'])['version']
.max()
.reset_index())['version']
elem_md['n_chgset'] = (osm_elements.groupby(['elem', 'id'])['chgset']
.nunique()
.reset_index())['chgset']
elem_md['n_user'] = (osm_elements.groupby(['elem', 'id'])['uid']
.nunique()
.reset_index())['uid']
elem_md['n_autocorr'] = (osm_elements
.groupby(['elem','id'])['willbe_autocorr']
.sum()
.reset_index()['willbe_autocorr']
.astype('int'))
elem_md['n_corr'] = (osm_elements
.groupby(['elem','id'])['willbe_corr']
.sum()
.reset_index()['willbe_corr']
.astype('int'))
elem_md = pd.merge(elem_md, osm_elements[['elem', 'id',
'version', 'visible',
'first_uid', 'last_uid']],
on=['elem', 'id', 'version'])
elem_md = elem_md.set_index(['elem', 'id'])
elem_md = elem_md.join(user_groups.Xclust, on='first_uid')
elem_md = elem_md.rename(columns={'Xclust':'first_ug'})
elem_md = elem_md.join(user_groups.Xclust, on='last_uid')
elem_md = elem_md.rename(columns={'Xclust':'last_ug'})
elem_md = elem_md.reset_index()
if drop_ts:
return drop_features(elem_md, '_at')
else:
return elem_md
def extract_chgset_metadata(osm_elements, drop_ts=True):
""" Extract change set metadata from OSM history data
Parameters
----------
osm_elements: pd.DataFrame
OSM history data
Return
------
chgset_md: pd.DataFrame
Change set metadata with timestamp information, user-related features
and other features describing modification and OSM elements themselves
"""
chgset_md = init_metadata(osm_elements, ['chgset'], '1m')
# User-related features
chgset_md = pd.merge(chgset_md,
osm_elements[['chgset','uid']].drop_duplicates(),
on=['chgset'])
chgset_md['user_lastchgset_h'] = (chgset_md.groupby('uid')['first_at']
.diff())
chgset_md.user_lastchgset_h = (chgset_md.user_lastchgset_h /
timedelta(hours=1))
chgset_md['user_chgset_rank'] = chgset_md.groupby('uid')['first_at'].rank()
# Number of modifications per unique element
contrib_byelem = (osm_elements.groupby(['elem', 'id', 'chgset'])['version']
.count()
.reset_index())
chgset_md['nmean_modif_byelem'] = (contrib_byelem
.groupby('chgset')['version']
.mean()
.reset_index())['version']
# Element-related features
chgset_md = extract_element_features(chgset_md, osm_elements,
'node', 'chgset')
chgset_md = extract_element_features(chgset_md, osm_elements,
'way', 'chgset')
chgset_md = extract_element_features(chgset_md, osm_elements,
'relation', 'chgset')
chset_md = chgset_md.set_index('chgset')
if drop_ts:
return drop_features(chgset_md, '_at')
else:
return chgset_md
def metadata_version(metadata, osmelem, grp_feat, res_feat, feature_suffix):
"""Compute the version-related features of metadata and append them into
the metadata table
Parameters
----------
metadata: pd.DataFrame
Metadata table to complete
osmelem: pd.DataFrame
original data used to compute versions; contains a 'elem' feature
grp_feat: object
string that indicates which feature from 'data' must be used to group items
res_feat: object
string that indicates the measured feature (how many items correspond
feature_suffix: str
string designing the end of the new feature names
"""
osmelem_nodes = osmelem.query('elem=="node"')
osmelem_ways = osmelem.query('elem=="way"')
osmelem_relations = osmelem.query('elem=="relation"')
metadata = group_stats(metadata, osmelem_nodes, grp_feat, res_feat,
'v', '_node'+feature_suffix)
metadata = group_stats(metadata, osmelem_ways, grp_feat, res_feat,
'v', '_way'+feature_suffix)
metadata = group_stats(metadata, osmelem_relations, grp_feat, res_feat,
'v', '_relation'+feature_suffix)
return metadata
def extract_user_metadata(osm_elements, chgset_md, drop_ts=True):
""" Extract user metadata from OSM history data
Parameters
----------
osm_elements: pd.DataFrame
OSM history data
chgset_md: pd.DataFrame
OSM change set metadata
Return
------
user_md: pd.DataFrame
User metadata with timestamp information, changeset-related features
and other features describing modification and OSM elements themselves
"""
user_md = init_metadata(osm_elements, ['uid'])
# Change set-related features
user_md['n_chgset'] = (chgset_md.groupby('uid')['chgset']
.count()
.reset_index())['chgset']
user_md['dmean_chgset'] = (chgset_md.groupby('uid')['lifespan']
.mean()
.reset_index())['lifespan']
# Number of modifications per unique element
contrib_byelem = (osm_elements.groupby(['elem', 'id', 'uid'])['version']
.count()
.reset_index())
user_md['nmean_modif_byelem'] = (contrib_byelem.groupby('uid')['version']
.mean()
.reset_index())['version']
# Modification-related features
user_md = extract_generic_modif_features(user_md, osm_elements, 'uid')
user_md = extract_modif_features(user_md, osm_elements, 'node', 'uid')
user_md = extract_modif_features(user_md, osm_elements, 'way', 'uid')
user_md = extract_modif_features(user_md, osm_elements, 'relation', 'uid')
user_md = user_md.set_index('uid')
if drop_ts:
return drop_features(user_md, '_at')
else:
return user_md
def add_chgset_metadata(metadata, total_change_sets):
"""Add total change set count to user metadata
Parameters
----------
metadata: pd.DataFrame
user metadata; must be indexed by a column 'uid'
total_change_sets: pd.DataFrame
total number of change sets by user; must contain columns 'uid' and 'num'
"""
metadata = (metadata.join(total_change_sets.set_index('uid'))
.rename_axis({'num': 'n_total_chgset'}, axis=1))
metadata['p_local_chgset'] = metadata.n_chgset / metadata.n_total_chgset
return metadata
def add_editor_metadata(metadata, top_editors):
"""Add editor information to each metadata recordings; use an outer join to
overcome the fact that some users do not indicate their editor, and may be
skipped by a natural join => the outer join allow to keep them with 0
values on known editors
Parameters
----------
metadata: pd.DataFrame
user metadata; must be indexed by a column 'uid'
top_editors: pd.DataFrame
raw editor information, editors used by each user, with a highlight on
N most popular editors; must contain a column 'uid'
"""
metadata = (metadata
.join(top_editors.set_index('uid'), how='left')
.fillna(0))
metadata['n_total_chgset_unknown'] = (metadata['n_total_chgset']
- metadata['n_total_chgset_known'])
return drop_features(metadata, 'n_total_chgset_known')
def transform_editor_features(metadata):
"""Transform editor-related features into metadata; editor uses are expressed
as proportions of n_total_chgset, a proportion of local change sets is
computed as a new feature and an ecdf transformation is applied on n_chgset
and n_total_chgset
Parameters
----------
metadata: pd.DataFrame
user metadata; must contain n_chgset and n_total_chgset columns, and
editor column names must begin with 'n_total_chgset_'
"""
normalize_features(metadata, 'n_total_chgset')
metadata = ecdf_transform(metadata, 'n_chgset')
metadata = ecdf_transform(metadata, 'n_total_chgset')
return metadata
def ecdf_transform(metadata, feature):
""" Apply an ECDF transform on feature within metadata; transform the column
data into ECDF values
Parameters
----------
metadata: pd.DataFrame
Metadata in which the transformation takes place
feature: object
string designing the transformed column
Return
------
New metadata, with a new renamed feature containing ecdf version of
original data
"""
ecdf = sm.distributions.ECDF(metadata[feature])
metadata[feature] = ecdf(metadata[feature])
new_feature_name = 'u_' + feature.split('_', 1)[1]
return metadata.rename(columns={feature: new_feature_name})
def extract_generic_modif_features(metadata, data, grp_feat):
"""Extract generic features about modifications done by each individuals: number
of modifications, number of modifications per element type
Parameters
----------
metadata: pd.DataFrame
metadata table
data: pd.DataFrame
original data
grp_feat: ojbect
string designing the grouping feature: it characterizes the metadata
("chgset", or "user")
"""
newfeature = (data.groupby([grp_feat])['id']
.count()
.reset_index()
.fillna(0))
newfeature.columns = [grp_feat, "n_total_modif"]
metadata = pd.merge(metadata, newfeature, on=grp_feat, how="outer").fillna(0)
newfeature = (data.query('elem=="node"').groupby([grp_feat])['id']
.count()
.reset_index()
.fillna(0))
newfeature.columns = [grp_feat, "n_total_modif_node"]
metadata = | pd.merge(metadata, newfeature, on=grp_feat, how="outer") | pandas.merge |
import json
from Tkinter import *
import os
import pandas as pd
import numpy as np
import tkMessageBox
import csv
import shutil
import datetime
from ErrorWindow import ErrorWindow
from HPSpreadsheet import HPSpreadsheet, CustomTable
from PIL import Image, ImageTk
import data_files
import hp_data
RVERSION = hp_data.RVERSION
class KeywordsSheet(HPSpreadsheet):
"""
Class for managing keyword data entry. Simply overrides most of HPSpreadsheet, main difference being the lack of
tabs.
"""
def __init__(self, settings, dir=None, keyCSV=None, master=None, oldImageNames=[], newImageNames=[]):
self.keywords = self.load_keywords()
self.settings = settings
HPSpreadsheet.__init__(self, settings, dir=dir, master=master)
self.oldImageNames = oldImageNames
self.newImageNames = newImageNames
self.dir = dir
if self.dir:
self.imageDir = os.path.join(self.dir, 'image')
self.on_main_tab = True
self.master = master
self.keyCSV = keyCSV
self.saveState = True
self.protocol("WM_DELETE_WINDOW", self.check_save)
def create_widgets(self):
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.topFrame = Frame(self)
self.topFrame.pack(side=TOP, fill=X)
self.rightFrame = Frame(self, width=480)
self.rightFrame.pack(side=RIGHT, fill=Y)
self.leftFrame = Frame(self)
self.leftFrame.pack(side=LEFT, fill=BOTH, expand=1)
self.pt = CustomTable(self.leftFrame, scrollregion=None, width=1024, height=720)
self.leftFrame.pack(fill=BOTH, expand=1)
self.pt.show()
self.currentImageNameVar = StringVar()
self.currentImageNameVar.set('Current Image: ')
l = Label(self.topFrame, height=1, textvariable=self.currentImageNameVar)
l.pack(fill=BOTH, expand=1)
image = Image.open(data_files._REDX)
image.thumbnail((250,250))
self.photo = ImageTk.PhotoImage(image)
self.l2 = Button(self.rightFrame, image=self.photo, command=self.open_image)
self.l2.image = self.photo # keep a reference!
self.l2.pack(side=TOP)
self.validateFrame = Frame(self.rightFrame, width=480)
self.validateFrame.pack(side=BOTTOM)
self.currentColumnLabel = Label(self.validateFrame, text='Current column:')
self.currentColumnLabel.grid(row=0, column=0, columnspan=2)
lbl = Label(self.validateFrame, text='Valid values for cells in this column:').grid(row=1, column=0, columnspan=2)
self.vbVertScroll = Scrollbar(self.validateFrame)
self.vbVertScroll.grid(row=2, column=1, sticky='NS')
self.vbHorizScroll = Scrollbar(self.validateFrame, orient=HORIZONTAL)
self.vbHorizScroll.grid(row=3, sticky='WE')
self.validateBox = Listbox(self.validateFrame, xscrollcommand=self.vbHorizScroll.set, yscrollcommand=self.vbVertScroll.set, selectmode=SINGLE, width=50, height=14)
self.validateBox.grid(row=2, column=0)
self.vbVertScroll.config(command=self.validateBox.yview)
self.vbHorizScroll.config(command=self.validateBox.xview)
for v in self.keywords:
self.validateBox.insert(END, v)
self.validateBox.bind('<<ListboxSelect>>', self.insert_item)
self.menubar = Menu(self)
self.fileMenu = Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label="File", menu=self.fileMenu)
self.fileMenu.add_command(label='Save', command=self.exportCSV, accelerator='ctrl-s')
self.fileMenu.add_command(label='Validate', command=self.validate)
self.editMenu = Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label='Edit', menu=self.editMenu)
self.editMenu.add_command(label='Fill Down', command=self.pt.fill_selection, accelerator='ctrl-d')
self.editMenu.add_command(label='Fill True', command=self.pt.enter_true, accelerator='ctrl-t')
self.editMenu.add_command(label='Fill False', command=self.pt.enter_false, accelerator='ctr-f')
self.editMenu.add_command(label='Add Column', command=self.add_column)
self.config(menu=self.menubar)
def open_spreadsheet(self):
if self.dir and not self.keyCSV:
self.csvdir = os.path.join(self.dir, 'csv')
for f in os.listdir(self.csvdir):
if f.endswith('.csv') and 'keywords' in f:
self.keyCSV = os.path.join(self.csvdir, f)
self.title(self.keyCSV)
self.pt.importCSV(self.keyCSV)
def update_current_image(self, event):
row = self.pt.getSelectedRow()
self.imName = str(self.pt.model.getValueAt(row, 0))
self.currentImageNameVar.set('Current Image: ' + self.imName)
maxSize = 480
try:
im = Image.open(os.path.join(self.imageDir, self.imName))
except (IOError, AttributeError):
im = Image.open(data_files._REDX)
if im.size[0] > maxSize or im.size[1] > maxSize:
im.thumbnail((maxSize,maxSize), Image.ANTIALIAS)
newimg=ImageTk.PhotoImage(im)
self.l2.configure(image=newimg)
self.l2.image = newimg
self.update_valid_values()
def load_keywords(self):
try:
df = | pd.read_csv(data_files._IMAGEKEYWORDS) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 14 10:59:05 2021
@author: franc
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import json
from collections import Counter, OrderedDict
import math
import torchtext
from torchtext.data import get_tokenizer
from googletrans import Translator
# from deep_translator import GoogleTranslator
# pip install googletrans==4.0.0rc1
import pickle
# pip install pickle-mixin
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
# python -m spacy download es_core_news_sm
import spacy
import fasttext.util
import contractions
import re # libreria de expresiones regulares
import string # libreria de cadena de caracteres
import itertools
import sys
sys.path.append("/tmp/TEST")
from treetagger import TreeTagger
import pathlib
from scipy.spatial import distance
from scipy.stats import kurtosis
from scipy.stats import skew
class NLPClass:
def __init__(self):
self.numero = 1
nltk.download('wordnet')
def translations_dictionary(self, df_translate=None, path=""):
'''
It appends to a dictionary different animals names in spanish and
english languages. It adds them so that english animals names appear
in WordNet synset.
Parameters
----------
df_translate : pandas.dataframe, optional.
If it's not None, the rows are appended. Otherwise it's
initialized and then the rows are appended.
The default is None.
path : string, optional
The path where to save the pickle file with the dictionary. Unless
path is empty.
The default is "".
Returns
-------
df_translate : pandas.dataframe.
Pandas.dataframe with the new rows appended.
'''
df_auxiliar = pd.DataFrame(columns=['spanish','english'])
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yaguareté"], 'english': ["jaguar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["llama"], 'english': ["llama"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["picaflor"], 'english': ["hummingbird"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chita"], 'english': ["cheetah"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["torcaza"], 'english': ["dove"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yacaré"], 'english': ["alligator"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["corvina"], 'english': ["croaker"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["vizcacha"], 'english': ["viscacha"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["orca"], 'english': ["killer_whale"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["barata"], 'english': ["german_cockroach"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["coipo"], 'english': ["coypu"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cuncuna"], 'english': ["caterpillar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append( | pd.DataFrame({'spanish': ["carpincho"], 'english': ["capybara"]}) | pandas.DataFrame |
import argparse
from feather_helpers import FeatherInfo
from tardis_request import DownloadRequest, TardisGenerator
from feather_helpers import write_feather_frame
import pandas as pd
import numpy as np
import time
from dateutil import tz
# process tardis messages
import asyncio
# i/o
import os
import ujson
import json
import gzip
# normalization dictionary in separate file to keep code cleaner
from tardis_msg_normalization import *
# cythonized file
import tardis_msg_counter
from tardis_msg_counter import ret_all_subdir_file_paths
### Default Global Settings
from decouple import config
api_key = config('TARDIS_KEY')
default_cache_dir = config('CACHE_DIR')
msg_thrsh_process_file_by_file = 1000000
### Helper Functions
def format_houbi_ch_field(ch_field_str):
if ch_field_str.startswith('market.') & ch_field_str.endswith('.trade.detail'):
ch_field_str = ch_field_str.replace('market.','')
ch_field_str = ch_field_str.replace('.trade.detail','')
return ch_field_str
def tardis_cache_trade_zip_file_into_arr(file_path, arr_save_into, start_idx, exch):
"""
For one Tardis gzip file:
-> Iterate through messages
-> Load each one into pre-created 2D numpy array starting at row start_idx
Returns dictionary:
{
'start_idx': updated value start_idx (latest numpy array row inserted into),
'col_headings': data column headings
}
"""
line_data = None
sub_data = None
# read the gzip file
with gzip.open(file_path, "r") as z:
json_bytes = z.read()
json_str = json_bytes.decode('utf-8')
lines = json_str.split('\n')
col_headings = []
for l in lines:
if len(l)==0:
continue
line_data = {}
try:
loaded_line = ujson.loads(l[l.find(' ')+1:])
except ValueError:
loaded_line = json.loads(l[l.find(' ')+1:])
# extra fields sometimes found in deribit messages
# when this happens, for now they are ignored
is_deribit = False
# Generic
if 'data' in loaded_line:
line_data = loaded_line['data']
# For FTX, look up symbol separately b/c it's not included in data dictionary
if exch == 'ftx':
if type(line_data) is list:
for sub_data in line_data:
if 'market' in loaded_line:
sub_data['symbol'] = loaded_line['market']
else:
sub_data['symbol'] = ''
# For Houbi Data
elif 'tick' in loaded_line:
if 'data' in loaded_line['tick']:
line_data = loaded_line['tick']['data']
# look up symbol separately b/c it's not included in data dictionary
if type(line_data) is dict:
if 'ch' in loaded_line:
line_data['ch'] = format_houbi_ch_field(loaded_line['ch'])
elif type(line_data) is list:
for sub_data in line_data:
if 'ch' in loaded_line:
sub_data['ch'] = format_houbi_ch_field(loaded_line['ch'])
else:
sub_data['ch'] = ''
# For Deribit Data
elif 'params' in loaded_line:
if 'data' in loaded_line['params']:
line_data = loaded_line['params']['data']
is_deribit = True
# For Coinbase & Bitstamp data
elif type(loaded_line) is dict:
if 'type' in loaded_line:
if loaded_line['type']=='match':
line_data = loaded_line
# For Kraken data
elif type(loaded_line) is list:
if len(loaded_line)>=3:
if loaded_line[2]=='trade':
line_data = [{'price': sub_line[0],
'volume': sub_line[1],
'time': sub_line[2],
'side': sub_line[3],
'orderType': sub_line[4],
'misc': sub_line[5],
'symbol': loaded_line[3]}
for sub_line in loaded_line[1]]
if len(line_data)==0:
continue
if type(line_data) is dict:
# Unfortunately need hack to drop extra fields which
# occur in deribit messages sometimes
if is_deribit:
if 'liquidation' in line_data:
line_data.pop('liquidation')
if 'block_trade_id' in line_data:
line_data.pop('block_trade_id')
arr_save_into[start_idx] = np.array(list(line_data.items()))[:,1]
start_idx += 1
if len(col_headings)==0:
col_headings = list(line_data.keys())
elif type(line_data) is list:
for sub_data in line_data:
if is_deribit:
if 'liquidation' in sub_data:
sub_data.pop('liquidation')
if 'block_trade_id' in sub_data:
sub_data.pop('block_trade_id')
arr_save_into[start_idx] = np.array(list(sub_data.items()))[:,1]
start_idx += 1
if len(col_headings)==0:
col_headings = list(sub_data.keys())
return {
'start_idx': start_idx,
'col_headings': col_headings
}
def tardis_parse_zip_dir_and_cache_into_arr(dir_path, exch, dl_date):
"""
For one Tardis (sub)directory:
Iterate through message files and cache into Feather File
"""
# Count number of messages in all tardis gz files in directory
num_messages = tardis_msg_counter.c_tardis_count_trade_msgs_cache_dir(dir_path)
num_fields = len(trades_norm_dict[exch].keys())
# Set up numpy array to hold them
np_cache_array = np.ndarray(shape=(num_messages, num_fields),dtype='O')
glbl_msg_id = 0
dir_files = ret_all_subdir_file_paths(dir_path)
for file_path in dir_files:
if not(str(file_path).endswith('.json.gz')):
continue
run_out = tardis_cache_trade_zip_file_into_arr(file_path, np_cache_array, glbl_msg_id, exch)
glbl_msg_id = run_out['start_idx']
# convert numpy array to DataFrame
col_headings = run_out['col_headings']
if exch == 'kraken':
col_headings = trades_norm_dict[exch].keys()
if len(col_headings)==0:
col_headings_norm = [trades_norm_dict[exch][k][0] for k in trades_norm_dict[exch].keys()]
else:
col_headings_norm = [trades_norm_dict[exch][c][0] for c in col_headings]
type_dict = {t[0]:t[1] for t in trades_norm_dict[exch].values()}
df_result = pd.DataFrame(np_cache_array,columns=col_headings_norm)
df_result.index.name = 'msg_num'
# Clean up DataFrame
for k in type_dict.keys():
type_to_convert_to = type_dict[k]
if type_to_convert_to == int:
type_to_convert_to = float
df_result[k] = df_result[k].astype(type_to_convert_to)
normalize_df_timestamps(df_result, exch)
# Save as (intermediate) Feather file
feather_save_path = dir_path + 'trd_tmp_'+pd.Timestamp(dl_date).strftime('%Y%m%d')
feather_info = FeatherInfo(feather_save_path, "lz4")
write_feather_frame(feather_info, df_result)
def tardis_parse_root_cache_dir(root_cache_dir, exch, dl_date):
"""
For all Tardis sub-directories (one level up from cached gzip files):
-> aggregate messages from sub-directory files
-> combine and save to intermediate Feather file
Then iterate through all intermediate cached Feather files:
-> combine data and save to final output file
"""
all_file_paths = ret_all_subdir_file_paths(root_cache_dir)
all_file_paths_onelvlup = [path_str[:(path_str[::-1].find('\\'))*-1] for path_str in all_file_paths]
all_file_paths_onelvlup = list(pd.value_counts(all_file_paths_onelvlup).sort_index().index)
for dir_path in all_file_paths_onelvlup:
tardis_parse_zip_dir_and_cache_into_arr(dir_path, exch, dl_date)
print(dir_path)
print('**** Recombining DataFrames ****\n')
feather_files_list = [f for f in ret_all_subdir_file_paths(root_cache_dir) if not(f.endswith('.json.gz'))]
df_list = []
for ff in feather_files_list:
temp_df = pd.read_feather(ff)
df_list.append(temp_df)
df_result = pd.concat(df_list)
feather_save_path = root_cache_dir + 'trd_'+ | pd.Timestamp(dl_date) | pandas.Timestamp |
'''Check the datasets for simulation'''
import os
from basis.file import downloadDatasets
existing_datasets = os.path.exists("haikou-experiments/datasets")
if existing_datasets == False:
print("Downloading datasets...")
print("If failed, you can download them from https://drive.google.com/file/d/1yi3aNhB6xc1vjsWX5pq9eb5rSyDiyeRw/view?usp=sharing")
downloadDatasets()
'''import neccessary dependency'''
import random
import simpy
import time
import pandas as pd
import datetime
import csv
import numpy as np
from basis.schedule import Schedule
from basis.setting import MAX_SEARCH_LAYERS,MAX_DETOUR_LENGTH
from basis.setting import WAITING_TIME,PERIODS_MINUTES,getSpeed,SPEED
from basis.time_periods import ALL_PERIODS,TIME_DICS,PERIODS
from basis.edges import ALL_EDGES,ALL_EDGES_DIC
from basis.vertexes import ALL_VERTEXES
from basis.neighbor import ALL_NEIGHBOR_EDGES
from basis.assistant import getID
import progressbar
ALL_PASSENGERS = {}
EDGES_TO_CUSTOMERS = [[] for _ in range(len(ALL_EDGES))] # The customers existing in each edge
RANDOM_SEED = 30
class CarpoolSimulation(object):
def __init__(self, env, max_time):
self.begin_time = time.strftime("%m-%d %H:%M:%S", time.localtime())
self.overall_success = 0
self.schedule_by_history = True
self.env = env
self.max_time = max_time
self.all_OD = False
self.COMBINED_OD = True
self.tendency = 1 # Proportion of passengers who choose carpooling
self.possibleOD()
self.env.process(self.generateByHistory())
def possibleOD(self):
'''Load all origin-destination'''
ODs_df = pd.read_csv("haikou-experiments/network/ODs_combined.csv")
self.possible_ODs = {}
for i in range(ODs_df.shape[0]):
self.possible_ODs[getID(ODs_df["start_ver"][i],ODs_df["end_ver"][i])] = i
def generateByHistory(self):
'''Run simulation experiments based on data provided by <NAME>'''
self.csv_path = "haikou-experiments/results/SIMULATION_RESULTS_ALL_DIDI_CHUXING_HAIKOU.csv"
with open(self.csv_path,"w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["passenger_id", "real_start_date", "real_start_time", "start_time", "end_time", "OD_id",
"start_ver", "end_ver","orginal_start_ver", "orginal_end_ver", "original_distance", "final_distance",
"matching_or", "shared_distance", "detour", "gap", "matching_id", "matching_type", "matching_ver",""])
history_df = pd.read_csv("haikou-experiments/datasets/DATASETS_DIDI_CHUXING_HAIKOU.csv")
history_df["depature_time"] = | pd.to_datetime(history_df['depature_time']) | pandas.to_datetime |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import dash_table
import json
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from dash.dependencies import Input, Output, State
from hackpion.EyHelper import (get_all_hashtags , get_all_mentions ,strip_all_entities ,sentiment_final , sentiment_final_int,
sentiment_analyzer_scores_text_blob)
import dash_bootstrap_components as dbc
import dash_html_components as html
def preprocessData(df):
df["mentions"] = df.tweet.apply(lambda x: get_all_mentions( str(x)))
df["hashtags"] = df.tweet.apply(lambda x: get_all_hashtags( str(x)))
df["cleaned_review"] = df.tweet.apply(lambda x: strip_all_entities(str(x).lower()))
df["sentiment_polarity"] = df.cleaned_review.apply(sentiment_analyzer_scores_text_blob)
df["sentiment"] = df.sentiment_polarity.apply(sentiment_final)
df["sentiment_int"] = df.sentiment_polarity.apply(sentiment_final_int)
return df
def update_sentiment_trend(selected_options , timeframe):
with open('database/compet_database.json', 'r') as openfile:
data = json.load(openfile)
comp = pd.DataFrame(data['username'])
competitors = comp.loc[comp.compet].screen_name.values
self_account = comp.loc[~comp.compet].screen_name.values
search_base = [{comp_:[comp_]} for comp_ in competitors]+[{"EY":list(self_account)}]
ey = pd.DataFrame()
for i in self_account:
ey = pd.concat([ey,pd.read_csv('database/userdata/{}_user_profile.csv'.format(i))])
compet = pd.DataFrame()
for i in competitors:
compet = pd.concat([compet,pd.read_csv('database/userdata/{}_user_profile.csv'.format(i))])
print(selected_options,timeframe)
ey['Week'] = pd.to_datetime(ey['date']).dt.strftime('%U')
ey['month'] =ey.date.apply(lambda x: x[:7])
like_ey_pos =ey.loc[ey.sentiment_int==1].groupby(by=timeframe).sentiment_int.count().reset_index()
like_ey_pos['Type'] = "Positive Sentiment"
like_ey_neg =ey.loc[ey.sentiment_int==-1].groupby(by=timeframe).sentiment_int.count().reset_index()
like_ey_neg['Type'] = "Negative Sentiment"
like_ =ey.groupby(by=timeframe).likes_count.mean().reset_index()
like_['Type'] = "Likes Count"
like_.columns= like_ey_neg.columns
like_ey =ey.groupby(by=timeframe).sentiment_int.count().reset_index()
like_ey['Type'] = "All Tweets"
avg = like_.copy()
avg.sentiment_int=like_.sentiment_int.mean()
avg.Type="Mean"
count_data = pd.concat([like_ey ,like_ey_neg , like_ey_pos,like_])
count_data = count_data.loc[count_data.Type.isin(selected_options)]
fig_line_all = px.line(count_data, x=timeframe, y="sentiment_int",color="Type",title="Trend for tweets")
fig_line_all.update_layout(
xaxis=dict(
showline=True,
showgrid=False,
showticklabels=True,
linecolor='rgb(204, 204, 204)',
linewidth=2,
ticks='outside',
tickfont=dict(
family='Arial',
size=12,
color='rgb(82, 82, 82)',
),
),
plot_bgcolor='white'
)
return fig_line_all
def sentiment_distribution_chart():
with open('database/compet_database.json', 'r') as openfile:
data = json.load(openfile)
comp = pd.DataFrame(data['username'])
competitors = comp.loc[comp.compet].screen_name.values
self_account = comp.loc[~comp.compet].screen_name.values
search_base = [{comp_:[comp_]} for comp_ in competitors]+[{"EY":list(self_account)}]
ey = pd.DataFrame()
for i in self_account:
ey = pd.concat([ey,pd.read_csv('database/userdata/{}_user_profile.csv'.format(i))])
compet = pd.DataFrame()
for i in competitors:
compet = pd.concat([compet,pd.read_csv('database/userdata/{}_user_profile.csv'.format(i))])
fig = make_subplots(rows=1, cols=2, start_cell="bottom-left",subplot_titles = ["EY" ,"Competitor"])
fig.add_trace(go.Bar(x= ey.sentiment.value_counts().index,y= ey.sentiment.value_counts().values,name="EY"),
row=1, col=1)
fig.add_trace(go.Bar(x= compet.sentiment.value_counts().index,y= compet.sentiment.value_counts().values,name="Competitor"),
row=1, col=2)
fig.update_layout( plot_bgcolor='white')
return fig
def prepeare_Sentiment_content(dashboard):
with open('database/compet_database.json', 'r') as openfile:
data = json.load(openfile)
comp = | pd.DataFrame(data['username']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from itertools import product
import numpy as np
import pytest
import pandas.util.testing as tm
from pandas import DatetimeIndex, MultiIndex
from pandas._libs import hashtable
from pandas.compat import range, u
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(names):
mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
def test_unique_datetimelike():
idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(idx, level):
# GH #17896 - with level= argument
result = idx.unique(level=level)
expected = idx.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
@pytest.mark.parametrize('dropna', [True, False])
def test_get_unique_index(idx, dropna):
mi = idx[[0, 1, 0, 1, 1, 0, 0]]
expected = mi._shallow_copy(mi[[0, 1]])
result = mi._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
def test_duplicate_multiindex_labels():
# GH 17464
# Make sure that a MultiIndex with duplicate levels throws a ValueError
with pytest.raises(ValueError):
mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)])
# And that using set_levels with duplicate levels fails
mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'],
[1, 2, 1, 2, 3]])
with pytest.raises(ValueError):
mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2],
[1, 'a', 1]])
def test_duplicate_level_names(names):
# GH18872, GH19029
mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
assert mi.names == names
# With .rename()
mi = MultiIndex.from_product([[0, 1]] * 3)
mi = mi.rename(names)
assert mi.names == names
# With .rename(., level=)
mi.rename(names[1], level=1, inplace=True)
mi = mi.rename([names[0], names[2]], level=[0, 2])
assert mi.names == names
def test_duplicate_meta_data():
# GH 10115
mi = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [mi,
mi.set_names([None, None]),
mi.set_names([None, 'Num']),
mi.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_has_duplicates(idx, idx_dup):
# see fixtures
assert idx.is_unique is True
assert idx.has_duplicates is False
assert idx_dup.is_unique is False
assert idx_dup.has_duplicates is True
mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
assert mi.is_unique is False
assert mi.has_duplicates is True
def test_has_duplicates_from_tuples():
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, | u('y') | pandas.compat.u |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = | pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner') | pandas.concat |
from itertools import product
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from woodwork.logical_types import URL, Categorical, Double, Integer, Unknown
from rayml.utils import (
_convert_numeric_dataset_pandas,
_schema_is_equal,
infer_feature_types,
)
def test_infer_feature_types_no_type_change():
X_dt = pd.DataFrame([[1, 2], [3, 4]])
X_dt.ww.init()
pd.testing.assert_frame_equal(X_dt, infer_feature_types(X_dt))
X_dc = ww.init_series(pd.Series([1, 2, 3, 4]))
pd.testing.assert_series_equal(X_dc, infer_feature_types(X_dc))
X_pd = pd.DataFrame(
{0: pd.Series([1, 2], dtype="int64"), 1: pd.Series([3, 4], dtype="int64")}
)
pd.testing.assert_frame_equal(X_pd, infer_feature_types(X_pd))
X_list = [1, 2, 3, 4]
X_expected = ww.init_series(pd.Series(X_list))
pd.testing.assert_series_equal(X_expected, infer_feature_types(X_list))
assert X_list == [1, 2, 3, 4]
X_np = np.array([1, 2, 3, 4])
X_expected = ww.init_series(pd.Series(X_np))
pd.testing.assert_series_equal(X_expected, infer_feature_types(X_np))
assert np.array_equal(X_np, np.array([1, 2, 3, 4]))
X_np = np.array([[1, 2], [3, 4]])
X_expected = pd.DataFrame(X_np)
X_expected.ww.init()
pd.testing.assert_frame_equal(X_expected, infer_feature_types(X_np))
assert np.array_equal(X_np, np.array([[1, 2], [3, 4]]))
def test_infer_feature_types_series_name():
name = "column with name"
X_pd = | pd.Series([1, 2, 3, 4], dtype="int64", name=name) | pandas.Series |
import pandas as pd
import os
"""
Prepara Archivos
"""
textos = os.listdir('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/2. Textos Geneales')
bdd = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Latam3.csv')
bdd = bdd.loc[bdd['Tipo convocatoria'] == 'Investigación-Innovación']
"""
Tokens español
"""
gramas_esp = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Gramas_final.csv')
# Convierte ods en lista
class_ods_esp = []
for list_osd in gramas_esp['ODS']:
list_osd = list_osd.lower().replace(':', ';').split(';')
list_osd2 = []
for o in list_osd:
if o == 'rabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económic':
o = 'trabajo decente y crecimiento económico'
if o == 'igualdad de género' or o == 'gualdad de género' or o == 'igualdad de genero':
o = 'igualdad de género'
if o == 'industria, innovación e infraestructuras' or o == 'industria, innovación e infraestructura':
o = 'industria, innovación e infraestructuras'
if o == 'paz, justicia e instituciones solidas' or o == 'paz, justicia e instituciones sólidas' or o == 'paz, justicia e instituciones sólida':
o = 'paz, justicia e instituciones sólidas'
if 'producción y consumo' in o:
o = 'producción y consumo responsable'
if o == 'ciudades y comunidades sostenibles' or o == 'ciudades y comunidades sostenible' or o == 'ciudades y comunidades sostenible':
o = 'ciudades y comunidades sostenibles'
if o == 'alianzas para lograr los objetivos' or o == 'alianza para lograr los objetivos':
o = 'alianza para lograr los objetivos'
if o == 'reducción de desigualdade' or o == 'reducción de las desigualdades' or o == 'reducción de desigualdades':
o = 'reducción de desigualdades'
if o == 'vida de ecosistemas terrestres' or o == 'vida de ecosistemas terrestre':
o = 'vida de ecosistemas terrestres'
o = o.strip()
list_osd2.append(o)
class_ods_esp.append(list_osd2)
gramas_esp['ODS'] = class_ods_esp
"""
Tokens portugues
"""
gramas_por = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Gramas_protugues.csv')
# convierte Ods en lista
class_ods_por = []
for list_osd in gramas_por['ODS']:
list_osd = list_osd.lower().split(';')
list_osd2 = []
for o in list_osd:
if o == 'rabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económic':
o = 'trabajo decente y crecimiento económico'
if o == 'igualdad de género' or o == 'gualdad de género' or o == 'igualdad de genero':
o = 'igualdad de género'
if o == 'industria, innovación e infraestructuras' or o == 'industria, innovación e infraestructura':
o = 'industria, innovación e infraestructuras'
if o == 'paz, justicia e instituciones solidas' or o == 'paz, justicia e instituciones sólidas' or o == 'paz, justicia e instituciones sólida':
o = 'paz, justicia e instituciones sólidas'
if 'producción y consumo' in o:
o = 'producción y consumo responsable'
if o == 'ciudades y comunidades sostenibles' or o == 'ciudades y comunidades sostenible' or o == 'ciudades y comunidades sostenible':
o = 'ciudades y comunidades sostenibles'
if o == 'alianzas para lograr los objetivos' or o == 'alianza para lograr los objetivos':
o = 'alianza para lograr los objetivos'
if o == 'reducción de desigualdade' or o == 'reducción de las desigualdades' or o == 'reducción de desigualdades':
o = 'reducción de desigualdades'
if o == 'vida de ecosistemas terrestres' or o == 'vida de ecosistemas terrestre':
o = 'vida de ecosistemas terrestres'
o = o.strip()
list_osd2.append(o.lower())
class_ods_por.append(list_osd2)
gramas_por['ODS'] = class_ods_por
"""
Elimina las tildes
"""
def normalize(s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("Á", "A"),
("É", "E"),
("Í", "I"),
("Ó", "O"),
("Ú", "U")
)
for a, b in replacements:
s = s.replace(a, b)
return s
"""
Crea matriz de tokens en textos
"""
txt_inv = bdd['ID Proyecto'].tolist()
entidad = bdd['País'].tolist()
entidad.index('Brasil')
gramas_esp = gramas_esp[gramas_esp['ODS'].isnull() == False]
path_base = '/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/2. Textos Geneales'
# matriz = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Matriz_Clasificación_ODS.csv')
matriz = pd.DataFrame()
n = 0
for i in txt_inv:
n+=1
print(str(n * 100 / len(txt_inv)))
print(n)
txt = open(path_base + '/' + i , 'r')
txt = str(normalize(txt.read())).replace('\n', ' ').split('.')
## Va Palabra por palabra
"""
Define variables por ODS
"""
pobreza = ''
pobreza_num= 0
hambre = ''
hambre_num = 0
salud = ''
salud_num = 0
educacion = ''
educacion_num = 0
genero = ''
genero_num = 0
agua = ''
agua_num = 0
energia = ''
energia_num = 0
trabajo = ''
trabajo_num = 0
industria = ''
industria_num = 0
desigualdades = ''
desigualdades_num = 0
sostenibles = ''
sostenibles_num = 0
producción_consumo = ''
producción_consumo_num = 0
clima = ''
clima_num = 0
submarina = ''
submarina_num = 0
terrestres = ''
terrestres_num = 0
paz = ''
paz_num = 0
alianza = ''
alianza_num = 0
if entidad[txt_inv.index(i)] != 'Brasil':
for t in range(len(txt)):
i_split = txt[t].split()
for grama in i_split:
grama = str(grama).lower()
if grama in gramas_esp['Gramas'].tolist() and grama.isalpha() and grama.isdigit() == False:
for id_token in range(len(gramas_esp)):
if grama == gramas_esp['Gramas'][id_token]:
if 'educación de calidad' in gramas_esp['ODS'][id_token]:
educacion = educacion + txt[t]+ '\n'
educacion_num +=1
if 'fin de la pobreza' in gramas_esp['ODS'][id_token]:
pobreza = pobreza + txt[t]+'\n'
pobreza_num +=1
if 'salud y bienestar' in gramas_esp['ODS'][id_token]:
salud = salud + txt[t]+'\n'
salud_num +=1
if 'igualdad de género' in gramas_esp['ODS'][id_token]:
genero = genero + txt[t]+'\n'
genero_num +=1
if 'agua limpia y saneamiento' in gramas_esp['ODS'][id_token]:
agua = agua + txt[t]+'\n'
agua_num +=1
if 'energía asequible y no contaminante' in gramas_esp['ODS'][id_token]:
energia = energia + txt[t]+'\n'
energia_num +=1
if 'trabajo decente y crecimiento económico' in gramas_esp['ODS'][id_token]:
trabajo = trabajo + txt[t]+'\n'
trabajo_num +=1
if 'industria, innovación e infraestructuras' in gramas_esp['ODS'][id_token]:
industria = industria + txt[t]+'\n'
industria_num+=1
if 'reducción de desigualdades' in gramas_esp['ODS'][id_token]:
desigualdades = desigualdades + txt[t]+'\n'
desigualdades_num +=1
if 'ciudades y comunidades sostenibles' in gramas_esp['ODS'][id_token]:
sostenibles = sostenibles + txt[t]+'\n'
sostenibles_num +=1
if 'producción y consumo responsable' in gramas_esp['ODS'][id_token]:
producción_consumo = producción_consumo + txt[t]+'\n'
producción_consumo_num +=1
if 'acción por el clima' in gramas_esp['ODS'][id_token]:
clima = clima + txt[t]+'\n'
clima_num +=1
if 'vida submarina' in gramas_esp['ODS'][id_token]:
submarina = submarina + txt[t]+'\n'
submarina_num +=1
if 'vida de ecosistemas terrestres' in gramas_esp['ODS'][id_token]:
terrestres = terrestres + txt[t]+'\n'
terrestres_num +=1
if 'paz, justicia e instituciones sólidas' in gramas_esp['ODS'][id_token]:
paz = paz + txt[t]+'\n'
paz_num +=1
if 'alianza para lograr los objetivos' in gramas_esp['ODS'][id_token]:
alianza = alianza + txt[t]+'\n'
alianza_num+=1
if 'hambre cero' in gramas_esp['ODS'][id_token]:
hambre = hambre + txt[t]+'\n'
hambre_num+=1
else:
continue
registro = pd.DataFrame()
registro['ID Documento'] = [i]
registro['Fin de la pobreza'] = [pobreza_num]
registro['TXT Fin de la pobreza'] = [pobreza]
registro['Hambre cero'] = [hambre_num]
registro['TXT Hambre cero'] = [hambre]
registro['Salud y bienestar'] = [salud_num]
registro['TXT Salud y bienestar'] = [salud]
registro['Educación de calidad'] = [educacion_num]
registro['TXT Educación de calidad'] = [educacion]
registro['Igualdad de género'] = [genero_num]
registro['TXT Igualdad de género'] = [genero]
registro['Agua limpia y saneamiento'] = [agua_num]
registro['TXT Agua limpia y saneamiento'] = [agua]
registro['Energía asequible y no contaminante'] = [energia_num]
registro['TXT Energía asequible y no contaminante'] = [energia]
registro['Trabajo decente y crecimiento económico'] = [trabajo_num]
registro['TXT Trabajo decente y crecimiento económico'] = [trabajo]
registro['Industria, innovación e infraestructuras'] = [industria_num]
registro['TXT Industria, innovación e infraestructuras'] = [industria]
registro['Reducción de desigualdades'] = [desigualdades_num]
registro['TXT Reducción de desigualdades'] = [desigualdades]
registro['Ciudades y comunidades sostenibles'] = [sostenibles_num]
registro['TXT Ciudades y comunidades sostenibles'] = [sostenibles]
registro['Producción y consumo responsable'] = [producción_consumo_num]
registro['TXT Producción y consumo responsable'] = [producción_consumo]
registro['Acción por el clima'] = [clima_num]
registro['TXT Acción por el clima'] = [clima]
registro['Vida submarina'] = [submarina_num]
registro['TXT Vida submarina'] = [submarina]
registro['Vida de ecosistemas terrestres'] = [terrestres_num]
registro['TXT Vida de ecosistemas terrestres'] = [terrestres]
registro['Paz, justicia e instituciones sólidas'] = [paz_num]
registro['TXT Paz, justicia e instituciones sólidas'] = [paz]
registro['Alianza para lograr los objetivos'] = [alianza_num]
registro['TXT Alianza para lograr los objetivos'] = [alianza]
matriz = pd.concat([matriz, registro])
matriz = matriz.reset_index(drop=True)
matriz.to_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Matriz_Clasificación_ODS.csv')
else:
for t in range(len(txt)):
i_split = txt[t].split()
for grama in i_split:
grama = str(grama).lower()
if grama.lower() in gramas_por['Gramas'].tolist():
for id_token in range(len(gramas_por)):
if grama.lower() == gramas_por['Gramas'][id_token] and grama.isalpha() and grama.isdigit() == False:
if 'educación de calidad' in gramas_por['ODS'][id_token]:
educacion = educacion + txt[t]+ '\n'
educacion_num +=1
if 'fin de la pobreza' in gramas_por['ODS'][id_token]:
pobreza = pobreza + txt[t]+'\n'
pobreza_num +=1
if 'salud y bienestar' in gramas_por['ODS'][id_token]:
salud = salud + txt[t]+'\n'
salud_num +=1
if 'igualdad de género' in gramas_por['ODS'][id_token]:
genero = genero + txt[t]+'\n'
genero_num +=1
if 'agua limpia y saneamiento' in gramas_por['ODS'][id_token]:
agua = agua + txt[t]+'\n'
agua_num +=1
if 'energía asequible y no contaminante' in gramas_por['ODS'][id_token]:
energia = energia + txt[t]+'\n'
energia_num +=1
if 'trabajo decente y crecimiento económico' in gramas_por['ODS'][id_token]:
trabajo = trabajo + txt[t]+'\n'
trabajo_num +=1
if 'industria, innovación e infraestructuras' in gramas_por['ODS'][id_token]:
industria = industria + txt[t]+'\n'
industria_num+=1
if 'reducción de desigualdades' in gramas_por['ODS'][id_token]:
desigualdades = desigualdades + txt[t]+'\n'
desigualdades_num +=1
if 'ciudades y comunidades sostenibles' in gramas_por['ODS'][id_token]:
sostenibles = sostenibles + txt[t]+'\n'
sostenibles_num +=1
if 'producción y consumo responsable' in gramas_por['ODS'][id_token]:
producción_consumo = producción_consumo + txt[t]+'\n'
producción_consumo_num +=1
if 'acción por el clima' in gramas_por['ODS'][id_token]:
clima = clima + txt[t]+'\n'
clima_num +=1
if 'vida submarina' in gramas_por['ODS'][id_token]:
submarina = submarina + txt[t]+'\n'
submarina_num +=1
if 'vida de ecosistemas terrestres' in gramas_por['ODS'][id_token]:
terrestres = terrestres + txt[t]+'\n'
terrestres_num +=1
if 'paz, justicia e instituciones sólidas' in gramas_por['ODS'][id_token]:
paz = paz + txt[t]+'\n'
paz_num +=1
if 'alianza para lograr los objetivos' in gramas_por['ODS'][id_token]:
alianza = alianza + txt[t]+'\n'
alianza_num+=1
if 'hambre cero' in gramas_por['ODS'][id_token]:
hambre = hambre + txt[t]+'\n'
hambre_num+=1
else:
continue
#elif gramas_esp['ODS'][id_token].lower() == 'hambre cero':
registro = pd.DataFrame()
registro['ID Documento'] = [i]
registro['Fin de la pobreza'] = [pobreza_num]
registro['TXT Fin de la pobreza'] = [pobreza]
registro['Hambre cero'] = [hambre_num]
registro['TXT Hambre cero'] = [hambre]
registro['Salud y bienestar'] = [salud_num]
registro['TXT Salud y bienestar'] = [salud]
registro['Educación de calidad'] = [educacion_num]
registro['TXT Educación de calidad'] = [educacion]
registro['Igualdad de género'] = [genero_num]
registro['TXT Igualdad de género'] = [genero]
registro['Agua limpia y saneamiento'] = [agua_num]
registro['TXT Agua limpia y saneamiento'] = [agua]
registro['Energía asequible y no contaminante'] = [energia_num]
registro['TXT Energía asequible y no contaminante'] = [energia]
registro['Trabajo decente y crecimiento económico'] = [trabajo_num]
registro['TXT Trabajo decente y crecimiento económico'] = [trabajo]
registro['Industria, innovación e infraestructuras'] = [industria_num]
registro['TXT Industria, innovación e infraestructuras'] = [industria]
registro['Reducción de desigualdades'] = [desigualdades_num]
registro['TXT Reducción de desigualdades'] = [desigualdades]
registro['Ciudades y comunidades sostenibles'] = [sostenibles_num]
registro['TXT Ciudades y comunidades sostenibles'] = [sostenibles]
registro['Producción y consumo responsable'] = [producción_consumo_num]
registro['TXT Producción y consumo responsable'] = [producción_consumo]
registro['Acción por el clima'] = [clima_num]
registro['TXT Acción por el clima'] = [clima]
registro['Vida submarina'] = [submarina_num]
registro['TXT Vida submarina'] = [submarina]
registro['Vida de ecosistemas terrestres'] = [terrestres_num]
registro['TXT Vida de ecosistemas terrestres'] = [terrestres]
registro['Paz, justicia e instituciones sólidas'] = [paz_num]
registro['TXT Paz, justicia e instituciones sólidas'] = [paz]
registro['Alianza para lograr los objetivos'] = [alianza_num]
registro['TXT Alianza para lograr los objetivos'] = [alianza]
matriz = | pd.concat([matriz, registro]) | pandas.concat |
#!/usr/bin/python3.7
# coding=utf-8
import argparse
import platform
import socket
import shutil
import random
import csv
import pathlib
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
import torch.nn as nn
from tqdm import tqdm
from glob import glob
from torch.cuda.amp import autocast, GradScaler
from torch.utils.tensorboard import SummaryWriter
from data import *
from utils import *
from model import *
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2" # print less verbose log
SEED = 2020
if SEED >= 0:
# seed_everything
random.seed(SEED)
os.environ['PYTHONHASHSEED'] = str(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def _show_param_distribution():
gradss = []
for p in filter(lambda p: p.grad is not None, model.parameters()):
# p.shape: [x, y], [x]
gradss.append(p.grad.detach().cpu().numpy().reshape(-1))
grads = np.concatenate(gradss)
grads = np.log(np.abs(grads) + 1e-10)
plt.figure()
plt.hist(grads, bins=10)
plt.show()
print(' Grad < clip_grad % = {}, LOG Grad max = {:.4f}, Grad norm2 = {:.4f}'
.format(np.sum(grads < grad_norm_clip) / grads.size, np.max(grads), np.linalg.norm(grads)))
def _join_ljust(words, width=9):
"""join list of str to fixed width, left just"""
return ' '.join(map(lambda s: s.ljust(width), words)).strip()
def plot_model_history(log_path, plot_loss=True):
train_loss, train_f1, val_loss, val_f1 = [], [], [], []
with open(log_path) as f:
f_csv = csv.reader(f)
headers = next(f_csv)
for row in f_csv:
if (not row) or row[0] == 'Epoch':
continue
if '#' in row[0]:
break
row = list(map(float, row))
train_loss.append(row[1])
train_f1.append(row[2])
val_loss.append(row[3])
val_f1.append(row[4])
plt.plot(train_f1, linewidth=0.5)
plt.plot(val_f1)
plt.title('Model F1w')
plt.ylabel('F1w')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
if plot_loss:
plt.figure()
plt.plot(train_loss, linewidth=0.5)
plt.plot(val_loss)
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
def plot_confusion_matrix():
model.eval()
f2 = lambda n: int((n + 1) / 2)
n2 = f2(n_label)
cm = np.zeros((n2, n2))
n_correct = 0
n_valid = 0
with torch.no_grad():
for inputs, att_mask, labels in val_data_loader:
# inputs: token_ids, labels: tag_ids
inputs, att_mask, labels = inputs.to(device), att_mask.to(device), labels.to(device).view(-1)
with autocast(enabled=args.fp16):
outputs = model(inputs, att_mask) # shape: [bs, sql, n_tags]
outputs = outputs.view(-1, outputs.shape[-1]) # [bs*sql, n_tags]
loss = criterion(outputs, labels)
_, predictions = torch.max(outputs, 1) # return (value, index)
n_correct += torch.sum(predictions == labels)
for i in range(labels.shape[0]):
if labels[i] >= 0:
cm[f2(labels[i])][f2(predictions[i])] += 1
n_valid += 1
log(f'Confusion matrix: val acc = {n_correct / n_valid:4f} ({n_correct}/{n_valid})')
cm = cm / cm.sum(axis=1)[:, None] # use [:, None] to reshapes it to column vec
cm = cm[list(range(1, n2)) + [0], :]
cm = cm[:, list(range(1, n2)) + [0]]
tags2 = [corpus.tags[i].replace('I-', '') for i in range(0, len(corpus.tags), 2)]
tags2 = tags2[1:] + [tags2[0]]
df_cm = | pd.DataFrame(cm, tags2, tags2) | pandas.DataFrame |
# -*- coding: utf-8 -*-# -*- coding: utf-8 -*-
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning) # noqa
import logging
import AFQ.data as afd
from AFQ.api.participant import ParticipantAFQ
from AFQ.api.utils import wf_sections, add_method_descriptions
import AFQ.viz.utils as vut
from AFQ.utils.parallel import parfor
from dipy.io.stateful_tractogram import StatefulTractogram, Space
from dipy.io.streamline import load_tractogram
import dipy.tracking.streamlinespeed as dps
import dipy.tracking.streamline as dts
from AFQ.version import version as pyafq_version
import pandas as pd
import numpy as np
import os
import os.path as op
import json
import s3fs
from time import time
import nibabel as nib
from bids.layout import BIDSLayout
import bids.config as bids_config
try:
bids_config.set_option('extension_initial_dot', True)
except ValueError:
pass
try:
import afqbrowser as afqb
using_afqb = True
except (ImportError, ModuleNotFoundError):
using_afqb = False
__all__ = ["GroupAFQ"]
# get rid of unnecessary columns in df
def clean_pandas_df(df):
df = df.reset_index(drop=True)
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
return df
# this is parallelized below
def _getter_helper(wf_dict, attr_name):
return wf_dict[attr_name]
@add_method_descriptions
class GroupAFQ(object):
"""
"""
def __init__(self,
bids_path,
bids_filters={"suffix": "dwi"},
preproc_pipeline="all",
participant_labels=None,
output_dir=None,
parallel_params={"engine": "serial"},
bids_layout_kwargs={},
**kwargs):
'''
Initialize a GroupAFQ object from a BIDS dataset.
Parameters
----------
bids_path : str
The path to preprocessed diffusion data organized in a BIDS
dataset. This should contain a BIDS derivative dataset with
preprocessed dwi/bvals/bvecs.
bids_filters : dict
Filter to pass to bids_layout.get when finding DWI files.
Default: {"suffix": "dwi"}
preproc_pipeline : str, optional.
The name of the pipeline used to preprocess the DWI data.
Default: "all".
participant_labels : list or None, optional
List of participant labels (subject IDs) to perform
processing on. If None, all subjects are used.
Default: None
output_dir : str or None, optional
Path to output directory. If None, outputs are put
in a AFQ pipeline folder in the derivatives folder of
the BIDS directory. pyAFQ will use existing derivatives
from the output directory if they exist, instead of recalculating
them (this means you need to clear the output folder if you want
to recalculate a derivative).
Default: None
parallel_params : dict, optional
Parameters to pass to parfor in AFQ.utils.parallel,
to parallelize computations across subjects and sessions.
Set "n_jobs" to -1 to automatically parallelize as
the number of cpus. Here is an example for how to do
multiprocessing with 4 cpus:
{"n_jobs": 4, "engine": "joblib", "backend": "loky"}
Default: {"engine": "serial"}
bids_layout_kwargs: dict, optional
Additional arguments to give to BIDSLayout from pybids.
For large datasets, try:
{"validate": False, "index_metadata": False}
Default: {}
kwargs : additional optional parameters
You can set additional parameters for any step
of the process. See :ref:`usage/kwargs` for more details.
Examples
--------
api.GroupAFQ(my_path, csd_sh_order=4)
api.GroupAFQ(
my_path,
reg_template_spec="mni_t2", reg_subject_spec="b0")
'''
if not isinstance(bids_path, str):
raise TypeError("bids_path must be a string")
if not op.exists(bids_path):
raise ValueError("bids_path not found")
if not op.exists(op.join(bids_path, "dataset_description.json")):
raise ValueError("There must be a dataset_description.json"
+ " in bids_path")
if not isinstance(bids_filters, dict):
raise TypeError("bids_filters must be a dict")
# preproc_pipeline typechecking handled by pyBIDS
if participant_labels is not None\
and not isinstance(participant_labels, list):
raise TypeError(
"participant_labels must be either a list or None")
if output_dir is not None\
and not isinstance(output_dir, str):
raise TypeError(
"output_dir must be either a str or None")
if not isinstance(parallel_params, dict):
raise TypeError("parallel_params must be a dict")
if not isinstance(bids_layout_kwargs, dict):
raise TypeError("bids_layout_kwargs must be a dict")
self.logger = logging.getLogger('AFQ.api')
self.parallel_params = parallel_params
self.wf_dict = {}
# validate input and fail early
if not op.exists(bids_path):
raise ValueError(f'Unable to locate BIDS dataset in: {bids_path}')
# This is where all the outputs will go:
if output_dir is None:
self.afq_path = op.join(bids_path, 'derivatives', 'afq')
self.afqb_path = op.join(bids_path, 'derivatives', 'afq_browser')
else:
self.afq_path = output_dir
self.afqb_path = op.join(output_dir, 'afq_browser')
# Create it as needed:
os.makedirs(self.afq_path, exist_ok=True)
bids_layout = BIDSLayout(
bids_path, derivatives=True, **bids_layout_kwargs)
bids_description = bids_layout.description
# check that any files exist in the derivatives folder,
# not including the dataset_description.json files
# the second check may be particularly useful in checking
# that the derivatives folder is well-defined
if len(bids_layout.get())\
- len(bids_layout.get(extension="json")) < 1:
raise ValueError(
f"No non-json files recognized by pyBIDS in {bids_path}")
if len(bids_layout.get(scope=preproc_pipeline))\
- len(bids_layout.get(
scope=preproc_pipeline,
extension="json")) < 1:
raise ValueError((
f"No non-json files recognized by "
f"pyBIDS in the pipeline: {preproc_pipeline}"))
# Add required metadata file at top level (inheriting as needed):
pipeline_description = {
"Name": bids_description["Name"],
"BIDSVersion": bids_description["BIDSVersion"],
"PipelineDescription": {"Name": "pyAFQ",
"Version": pyafq_version}}
pl_desc_file = op.join(self.afq_path, 'dataset_description.json')
with open(pl_desc_file, 'w') as outfile:
json.dump(pipeline_description, outfile)
self.subjects = bids_layout.get(return_type='id', target='subject')
if not len(self.subjects):
raise ValueError(
"`bids_path` contains no subjects in derivatives folders."
+ " This could be caused by derivatives folders not following"
+ " the BIDS format.")
if participant_labels is not None:
filtered_subjects = []
subjects_found_printed = False
for subjectID in participant_labels:
subjectID = str(subjectID)
if subjectID not in self.subjects:
self.logger.warning((
f"Subject {subjectID} specified in "
f"`participant_labels` but not found "
f"in BIDS derivatives folders"))
if not subjects_found_printed:
subjects_found_printed = True
self.logger.warning((
f"Only these subjects found in BIDS "
f"derivatives folders: {self.subjects}"))
else:
filtered_subjects.append(subjectID)
self.subjects = filtered_subjects
if not len(self.subjects):
raise ValueError(
"No subjects specified in `participant_labels` "
+ " found in BIDS derivatives folders."
+ " See above warnings.")
sessions = bids_layout.get(return_type='id', target='session')
if len(sessions):
self.sessions = sessions
else:
self.sessions = [None]
# do not bother to parallelize if less than 2 subject-sessions
if len(self.sessions) * len(self.subjects) < 2:
self.parallel_params["engine"] = "serial"
# do not parallelize segmentation if parallelizing across
# subject-sessions
if self.parallel_params["engine"] != "serial":
if "segmentation_params" not in kwargs:
kwargs["segmentation_params"] = {}
if "parallel_segmentation" not in kwargs["segmentation_params"]:
kwargs["segmentation_params"]["parallel_segmentation"] = {}
kwargs["segmentation_params"]["parallel_segmentation"]["engine"] =\
"serial"
self.valid_sub_list = []
self.valid_ses_list = []
for subject in self.subjects:
self.wf_dict[subject] = {}
for session in self.sessions:
this_kwargs = kwargs.copy()
results_dir = op.join(self.afq_path, 'sub-' + subject)
if session is not None:
results_dir = op.join(results_dir, 'ses-' + session)
dwi_bids_filters = {
"subject": subject,
"session": session,
"return_type": "filename",
"scope": preproc_pipeline,
"extension": "nii.gz",
"suffix": "dwi",
}
dwi_bids_filters.update(bids_filters)
dwi_files = bids_layout.get(**dwi_bids_filters)
if (not len(dwi_files)):
self.logger.warning(
f"No dwi found for subject {subject} and session "
f"{session}. Skipping.")
continue
os.makedirs(results_dir, exist_ok=True)
dwi_data_file = dwi_files[0]
# For bvals and bvecs, use ``get_bval()`` and ``get_bvec()`` to
# walk up the file tree and inherit the closest bval and bvec
# files. Maintain input ``bids_filters`` in case user wants to
# specify acquisition labels, but pop suffix since it is
# already specified inside ``get_bvec()`` and ``get_bval()``
suffix = bids_filters.pop("suffix", None)
bvec_file = bids_layout.get_bvec(
dwi_data_file,
**bids_filters)
bval_file = bids_layout.get_bval(
dwi_data_file,
**bids_filters)
if suffix is not None:
bids_filters["suffix"] = suffix
self.valid_sub_list.append(subject)
self.valid_ses_list.append(session)
this_pAFQ = ParticipantAFQ(
dwi_data_file,
bval_file, bvec_file,
results_dir,
bids_info={
"bids_layout": bids_layout,
"subject": subject,
"session": session},
**this_kwargs)
self.wf_dict[subject][str(session)] = this_pAFQ.wf_dict
def __getattribute__(self, attr):
# check if normal attr exists first
try:
return object.__getattribute__(self, attr)
except AttributeError:
pass
# find what name to use
first_dict =\
self.wf_dict[self.valid_sub_list[0]][str(self.valid_ses_list[0])]
attr_file = attr + "_file"
attr_name = None
if attr in first_dict:
attr_name = attr
section = None
elif attr_file in first_dict:
attr_name = attr_file
section = None
else:
for sub_attr in wf_sections:
if attr in first_dict[sub_attr]:
attr_name = attr
section = sub_attr
break
elif attr_file in first_dict[sub_attr]:
attr_name = attr_file
section = sub_attr
break
# attr not found, allow typical AttributeError
if attr_name is None:
return object.__getattribute__(self, attr)
# iterate over subjects / sessions,
# decide if they need to be calculated or not
in_list = []
to_calc_list = []
results = {}
for ii, subject in enumerate(self.valid_sub_list):
if subject not in results:
results[subject] = {}
session = self.valid_ses_list[ii]
wf_dict = self.wf_dict[subject][str(session)]
if section is not None:
wf_dict = wf_dict[section]
if ((self.parallel_params.get("engine", False) != "serial")
and (hasattr(wf_dict, "efferents"))
and (attr_name not in wf_dict.efferents)):
in_list.append((wf_dict))
to_calc_list.append((subject, session))
else:
results[subject][session] =\
_getter_helper(wf_dict, attr_name)
# if some need to be calculated, do those in parallel
if len(to_calc_list) > 0:
par_results = parfor(
_getter_helper, in_list,
func_args=[attr_name],
**self.parallel_params)
for i, subses in enumerate(to_calc_list):
subject, session = subses
results[subject][session] = par_results[i]
# If only one session, collapse session dimension
if len(self.sessions) == 1:
for subject in self.valid_sub_list:
results[subject] = results[subject][self.valid_ses_list[0]]
return results
def combine_profiles(self):
tract_profiles_dict = self.profiles
if len(self.sessions) > 1:
tract_profiles_list = []
for _, subject_dict in tract_profiles_dict.items():
tract_profiles_list.extend(subject_dict.values())
else:
tract_profiles_list = list(tract_profiles_dict.values())
_df = combine_list_of_profiles(tract_profiles_list)
out_file = op.abspath(op.join(
self.afq_path, "tract_profiles.csv"))
os.makedirs(op.dirname(out_file), exist_ok=True)
_df = clean_pandas_df(_df)
_df.to_csv(out_file, index=False)
return _df
def get_streamlines_json(self):
sls_json_fname = op.abspath(op.join(
self.afq_path, "afqb_streamlines.json"))
if not op.exists(sls_json_fname):
subses_info = []
def load_next_subject():
subses_idx = len(subses_info)
sub = self.valid_sub_list[subses_idx]
ses = self.valid_ses_list[subses_idx]
if len(self.sessions) > 1:
this_bundles_file = self.clean_bundles[sub][ses]
this_mapping = self.mapping[sub][ses]
this_img = nib.load(self.dwi[sub][ses])
else:
this_bundles_file = self.clean_bundles[sub]
this_mapping = self.mapping[sub]
this_img = nib.load(self.dwi[sub])
this_sft = load_tractogram(
this_bundles_file,
this_img,
Space.VOX)
subses_info.append((this_sft, this_img, this_mapping))
bundle_dict = self.bundle_dict[
self.valid_sub_list[0]]
if len(self.sessions) > 1:
bundle_dict = bundle_dict[self.valid_ses_list[0]]
sls_dict = {}
load_next_subject() # load first subject
for b in bundle_dict.keys():
if b != "whole_brain":
for i in range(len(self.valid_sub_list)):
sft, img, mapping = subses_info[i]
idx = np.where(
sft.data_per_streamline['bundle']
== bundle_dict[b]['uid'])[0]
# use the first subses that works
# otherwise try each successive subses
if len(idx) == 0:
# break if we run out of subses
if i + 1 >= len(self.valid_sub_list):
break
# load subses if not already loaded
if i + 1 >= len(subses_info):
load_next_subject()
continue
if len(idx) > 100:
idx = np.random.choice(
idx, size=100, replace=False)
these_sls = sft.streamlines[idx]
these_sls = dps.set_number_of_points(these_sls, 100)
tg = StatefulTractogram(
these_sls,
img,
Space.VOX)
tg.to_rasmm()
delta = dts.values_from_volume(
mapping.forward,
tg.streamlines, np.eye(4))
moved_sl = dts.Streamlines(
[d + s for d, s in zip(delta, tg.streamlines)])
moved_sl = np.asarray(moved_sl)
median_sl = np.median(moved_sl, axis=0)
sls_dict[b] = {"coreFiber": median_sl.tolist()}
for ii, sl_idx in enumerate(idx):
sls_dict[b][str(sl_idx)] = moved_sl[ii].tolist()
break
with open(sls_json_fname, 'w') as fp:
json.dump(sls_dict, fp)
return sls_json_fname
def export_all(self, viz=True, afqbrowser=True, xforms=True,
indiv=True):
""" Exports all the possible outputs
Parameters
----------
viz : bool
Whether to output visualizations. This includes tract profile
plots, a figure containing all bundles, and, if using the AFQ
segmentation algorithm, individual bundle figures.
Default: True
afqbrowser : bool
Whether to output an AFQ-Browser from this AFQ instance.
Default: True
xforms : bool
Whether to output the reg_template image in subject space and,
depending on if it is possible based on the mapping used, to
output the b0 in template space.
Default: True
indiv : bool
Whether to output individual bundles in their own files, in
addition to the one file containing all bundles. If using
the AFQ segmentation algorithm, individual ROIs are also
output.
Default: True
"""
start_time = time()
seg_params = self.segmentation_params[
self.valid_sub_list[0]]
if len(self.sessions) > 1:
seg_params = seg_params[self.valid_ses_list[0]]
seg_algo = seg_params.get("seg_algo", "AFQ")
if xforms:
try:
self.b0_warped
except Exception as e:
self.logger.warning((
"Failed to export warped b0. This could be because your "
"mapping type is only compatible with transformation "
f"from template to subject space. The error is: {e}"))
self.template_xform
if indiv:
self.indiv_bundles
if seg_algo == "AFQ":
self.rois
self.sl_counts
self.profiles
# We combine profiles even if there is only 1 subject / session,
# as the combined profiles format may still be useful
# i.e., for AFQ Browser
self.combine_profiles()
if viz:
try:
self.tract_profile_plots
except ImportError as e:
plotly_err_message = vut.viz_import_msg_error("plot")
if str(e) != plotly_err_message:
raise
else:
self.logger.warning(plotly_err_message)
self.all_bundles_figure
if seg_algo == "AFQ":
self.indiv_bundles_figures
if afqbrowser:
self.assemble_AFQ_browser()
self.logger.info(
"Time taken for export all: " + str(time() - start_time))
def upload_to_s3(self, s3fs, remote_path):
""" Upload entire AFQ derivatives folder to S3"""
s3fs.put(self.afq_path, remote_path, recursive=True)
if op.exists(self.afqb_path):
s3fs.put(self.afqb_path, remote_path, recursive=True)
def assemble_AFQ_browser(self, output_path=None, metadata=None,
page_title="AFQ Browser", page_subtitle="",
page_title_link="", page_subtitle_link=""):
"""
Assembles an instance of the AFQ-Browser from this AFQ instance.
First, we generate the combined tract profile if it is not already
generated. This includes running the full AFQ pipeline if it has not
already run. The combined tract profile is one of the outputs of
export_all.
Second, we generate a streamlines.json file from the bundle
recognized in the first subject's first session.
Third, we call AFQ-Browser's assemble to assemble an AFQ-Browser
instance in output_path.
Parameters
----------
output_path : str
Path to location to create this instance of the browser in.
Called "target" in AFQ Browser API. If None,
bids_path/derivatives/afq_browser is used.
Default: None
metadata : str
Path to subject metadata csv file. If None, an metadata file
containing only subject ID is created. This file requires a
"subjectID" column to work.
Default: None
page_title : str
Page title. If None, prompt is sent to command line.
Default: "AFQ Browser"
page_subtitle : str
Page subtitle. If None, prompt is sent to command line.
Default: ""
page_title_link : str
Title hyperlink (including http(s)://).
If None, prompt is sent to command line.
Default: ""
page_subtitle_link : str
Subtitle hyperlink (including http(s)://).
If None, prompt is sent to command line.
Default: ""
"""
if not using_afqb:
self.logger.warning((
"AFQ Browser is not installed, so AFQ Browswer instance "
"cannot be assembled. AFQ Browser can be installed with: "
"`pip install pyAFQ[afqbrowser]` or "
"`pip install AFQ-Browser>=0.3`"))
return
if output_path is None:
output_path = self.afqb_path
os.makedirs(self.afqb_path, exist_ok=True)
# generate combined profiles csv
self.combine_profiles()
# generate streamlines.json file
sls_json_fname = self.get_streamlines_json()
afqb.assemble(
op.abspath(op.join(self.afq_path, "tract_profiles.csv")),
target=output_path,
metadata=metadata,
streamlines=sls_json_fname,
title=page_title,
subtitle=page_subtitle,
link=page_title_link,
sublink=page_subtitle_link)
def download_and_combine_afq_profiles(bucket,
study_s3_prefix="", deriv_name=None,
out_file=None,
upload=False, session=None,
**kwargs):
"""
Download and combine tract profiles from different subjects / sessions
on an s3 bucket into one CSV.
Parameters
----------
bucket : str
The S3 bucket that contains the study data.
study_s3_prefix : str
The S3 prefix common to all of the study objects on S3.
out_file : filename, optional
Filename for the combined output CSV.
deriv_name : str, optional
If deriv_name is not None, it should be a string that specifies
which derivatives folder to download and combine profiles from.
upload : bool or str, optional
If True, upload the combined CSV to Amazon S3 at
bucket/study_s3_prefix/derivatives/afq. If a string,
assume string is an Amazon S3 URI and upload there.
Defaut: False
session : str, optional
Session to get CSVs from. If None, all sessions are used.
Default: None
kwargs : optional
Optional arguments to pass to S3BIDSStudy.
Returns
-------
Ouput CSV's pandas dataframe.
"""
if "subjects" not in kwargs:
kwargs["subjects"] = "all"
if "anon" not in kwargs:
kwargs["anon"] = False
if deriv_name is None:
deriv_name = True
with nib.tmpdirs.InTemporaryDirectory() as t_dir:
remote_study = afd.S3BIDSStudy(
"get_profiles",
bucket,
study_s3_prefix,
**kwargs)
remote_study.download(
t_dir,
include_modality_agnostic=False,
include_derivs=deriv_name,
include_derivs_dataset_description=True,
suffix="profiles.csv")
temp_study = BIDSLayout(t_dir, validate=False, derivatives=True)
if session is None:
profiles = temp_study.get(
extension='csv',
suffix='profiles',
return_type='filename')
else:
profiles = temp_study.get(
session=session,
extension='csv',
suffix='profiles',
return_type='filename')
df = combine_list_of_profiles(profiles)
df.to_csv("tmp.csv", index=False)
if upload is True:
bids_prefix = "/".join([bucket, study_s3_prefix]).rstrip("/")
fs = s3fs.S3FileSystem()
fs.put(
"tmp.csv",
"/".join([
bids_prefix,
"derivatives",
"afq",
"combined_tract_profiles.csv"
]))
elif isinstance(upload, str):
fs = s3fs.S3FileSystem()
fs.put("tmp.csv", upload.replace("s3://", ""))
if out_file is not None:
out_file = op.abspath(out_file)
os.makedirs(op.dirname(out_file), exist_ok=True)
df = clean_pandas_df(df)
df.to_csv(out_file, index=False)
return df
def combine_list_of_profiles(profile_fnames):
"""
Combine tract profiles from different subjects / sessions
into one CSV.
Parameters
----------
profile_fnames : list of str
List of csv filenames.
Returns
-------
Ouput CSV's pandas dataframe.
"""
dfs = []
for fname in profile_fnames:
profiles = | pd.read_csv(fname) | pandas.read_csv |
from django.shortcuts import render_to_response
from django.utils.cache import patch_response_headers
from django.http import JsonResponse
from core.views import initRequest, login_customrequired
from core.utils import is_json_request
from core.iDDS.useconstants import SubstitleValue
from core.iDDS.rawsqlquery import getRequests, getTransforms, getWorkFlowProgressItemized
from core.iDDS.algorithms import generate_requests_summary, parse_request
from core.libs.exlib import lower_dicts_in_list
from core.libs.DateEncoder import DateEncoder
import pandas as pd
CACHE_TIMEOUT = 20
OI_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
subtitleValue = SubstitleValue()
def prepare_requests_summary(workflows):
summary = {'status': {}, 'username': {}}
"""
completion
age
"""
for workflow in workflows:
summary['status'][workflow['r_status']] = summary['status'].get(workflow['r_status'], 0) + 1
if workflow['username'] == '':
workflow['username'] = "Not set"
summary['username'][workflow['username']] = summary['username'].get(workflow['username'], 0) + 1
return summary
def get_workflow_progress_data(request_params, **kwargs):
workflows_items = getWorkFlowProgressItemized(request_params, **kwargs)
workflows_items = | pd.DataFrame(workflows_items) | pandas.DataFrame |
import pandas as pd
import os
import requests as req
import sys
import re
import dask.dataframe as dd
from lxml import etree
import io
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s|%(name)s|%(levelname)s|%(message)s',
datefmt='%m-%d %H:%M',
# filename='/temp/myapp.log',
filemode='w')
# console = logging.StreamHandler()
logger = logging.getLogger('EdgarAnalyzer')
logger.setLevel('INFO')
# logger.addHandler(console)
dir_curr = os.path.abspath(os.path.dirname(__file__))
def ticker2cik(symbol):
symbol = str(symbol).upper()
cik = ticker2cik_sec(symbol) if ticker2cik_file(symbol) is None else ticker2cik_file(symbol)
return cik
def conv_list(i):
list_res = None
if isinstance(i, str) | isinstance(i, int):
list_res = [i]
elif isinstance(i, list):
list_res = i
return list_res
def ticker2cik_file(symbol):
symbol = str(symbol).upper()
path_cik_mapping = os.path.join(dir_curr, 'config', 'cik_mapping.csv')
df_mapping = pd.read_csv(path_cik_mapping).set_index('Ticker')['CIK']
if symbol in df_mapping.index:
if df_mapping[[symbol]].shape[0] == 1:
cik = str(df_mapping[symbol]).zfill(10)
else:
logger.warning('non-unique CIK for Symbol={s} in cik_mapping.csv'.format(s=symbol))
cik = ticker2cik_sec(symbol)
else:
logger.warning('Symbol not found in cik_mapping.csv.')
cik = None
return cik
def ticker2cik_sec(symbol, update_mapping=True):
symbol = str(symbol).upper()
try:
uri = "http://www.sec.gov/cgi-bin/browse-edgar"
resp = req.get(uri, {'CIK': symbol, 'action': 'getcompany'})
results = re.compile(r'.*CIK=(\d{10}).*').findall(str(resp.content))
cik = str(results[0])
except Exception as ex:
logger.error(ex)
logger.error('Symbol not found in SEC')
cik = None
if update_mapping and (cik is not None):
update_cik(symbol, cik)
return cik
def update_cik(symbol, cik):
logger.warning('update cik_mapping symbol={s}, cik={c}'.format(s=symbol, c=cik))
symbol = str(symbol).upper()
path_cik_mapping = os.path.join(dir_curr, 'config', 'cik_mapping.csv')
df_mapping = pd.read_csv(path_cik_mapping).set_index('Ticker')
if symbol in df_mapping.index:
df_mapping = df_mapping.drop(symbol)
df_mapping.loc[symbol] = int(cik)
df_mapping.to_csv(path_cik_mapping)
def download_list(list_path, dir_report, uri='https://www.sec.gov/Archives/', force_download=False, threads_number=8):
from multiprocessing.pool import ThreadPool
list_file = [os.path.join(dir_report, p) for p in list_path]
if not force_download:
list_path = [p for p in list_path if not os.path.exists(os.path.join(dir_report, p))]
# list_url = [uri+p for p in list_path]
# list_file = [os.path.join(dir, p) for p in list_path]
def download_url(p):
r = req.get(uri + p, stream=True)
path_save = os.path.join(dir_report, p)
logger.info('downloading {f}'.format(f=path_save))
if r.status_code == 200:
dir_name = os.path.dirname(path_save)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
with open(path_save, 'w') as f:
f.write(r.content.decode('latin-1'))
# with open(path_save, 'wb') as f:
# for chunk in r:
# f.write(chunk)
else:
logger.error('error downloading {f}'.format(f=uri + p))
return path_save
with ThreadPool(threads_number) as th:
results = list(th.imap_unordered(download_url, list_path))
#res = [p for p in list_path if os.path.exists(os.path.join(dir_report, p))]
list_res = []
for f in list_file:
if os.path.exists(f):
list_res.append(f)
else:
logger.error('cannot find filing file: '+f)
#results = list(ThreadPool(threads_number).imap_unordered(download_url, list_path))
# for l in results:
# logger.info('downloaded '+l)
return list_res
def re_string(keyword, data):
s = re.search(r'<{kw}>([\s\S]*?)\n'.format(kw=keyword), data)
res = s.group(1) if s else None
return res
def re_tag(tag, data, find_all=False):
s = re.search(r'<{tag}>\n([\s\S]*?)\n<\/{tag}>'.format(tag=tag), data)
res = s.group(1) if s else None
return res
def node2dict(node):
d = {}
for c in node.iterchildren():
key = c.tag.split('}')[1] if '}' in c.tag else c.tag
value = c.text if c.text else node2dict(c)
d[key] = value
return d
def parse_ins(txt_ins, has_dimension=False):
xbrl_ins = re_tag('XBRL', txt_ins)
if xbrl_ins is None:
xbrl_ins = re_tag('XML', txt_ins)
xbrl_ins = xbrl_ins.replace('>\n', '>')
r_ins = etree.fromstring(xbrl_ins.encode('utf-8'))
ns_ins = {k:v for k,v in r_ins.nsmap.items() if k is not None}
if 'xbrli' not in ns_ins.keys():
logger.info('fix missing namespace xbrli. {s}'.format(s=ns_ins))
ns_ins['xbrli'] = "http://www.xbrl.org/2003/instance"
list_context = r_ins.findall(r'xbrli:context', namespaces=ns_ins)
list_period = [dict(i.attrib, **node2dict(i.find('xbrli:period', namespaces=ns_ins))) for i in list_context]
df_period = pd.DataFrame(list_period)
# if 'id' not in df_period.columns:
# print(r_ins[:10])
# print(r_ins.findall('context')[:10])
# print(len(list_context))
# print(len(list_period))
# print(df_period.head())
df_period = df_period.set_index('id')
# df_period.head()
list_unit = r_ins.findall('xbrli:unit', namespaces=ns_ins)
df_unit = pd.DataFrame([dict(i.attrib, **{'unit': i[0].text.split(':')[-1]})
for i in list_unit]).set_index('id')
# df_unit
list_dim = r_ins.xpath('.//*[@dimension]')
df_dim = pd.DataFrame([dict(d.attrib, **{'member': d.text,
'id': d.getparent().getparent().getparent().attrib['id']})
for d in list_dim]).set_index('id')
# df_dim.head()
list_measure = r_ins.xpath('.//*[@contextRef]')
df_measure = pd.DataFrame([dict(i.attrib, **{'measure': i.tag, 'value': i.text}) for i in list_measure])
# df_measure.head()
df_merge = df_measure.join(df_period, on='contextRef').join(df_unit, on='unitRef').join(df_dim, on='contextRef')
ns_reverse = {v: k for k, v in ns_ins.items()}
df_merge['ns'] = df_merge.measure.apply(lambda ns: ns_reverse[re.search('{(.*)}', ns).group(1)])
df_merge['item'] = df_merge['ns'] +":" +df_merge.measure.apply(lambda x: x.split('}')[-1])
# df_merge['endDate'] = df_merge.endDate
df_merge.endDate.update(df_merge.instant)
df_merge.startDate.update(df_merge.instant)
#parse dtype
df_merge.endDate = pd.to_datetime(df_merge.endDate, infer_datetime_format=True)
df_merge.startDate = pd.to_datetime(df_merge.startDate, infer_datetime_format=True)
df_merge.value = | pd.to_numeric(df_merge.value, errors='ignore', downcast='integer') | pandas.to_numeric |
import os
from pandas.testing import assert_frame_equal
import pandas as pd
import numpy as np
from yaetos.pandas_utils import load_csvs, load_df, save_pandas_local
# TODO: check to remove .reset_index(drop=True), using assert_frame_equal(d1, d2, check_index_type=False) instead
def test_load_csvs():
# Test multiple file option
path = 'tests/fixtures/data_sample/wiki_example/input/'
actual = load_csvs(path, read_kwargs={}).sort_values('uuid').reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
{'uuid': 'u4', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u5', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u6', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
assert_frame_equal(actual, expected)
# Test single file option
path = 'tests/fixtures/data_sample/wiki_example/input/part1.csv'
actual = load_csvs(path, read_kwargs={}).reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
assert_frame_equal(actual, expected)
def test_load_df():
# Test multiple file option
path = 'tests/fixtures/data_sample/wiki_example/input/'
actual = load_df(path, file_type='csv', read_func='read_csv', read_kwargs={}).sort_values('uuid').reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
{'uuid': 'u4', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u5', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u6', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
assert_frame_equal(actual, expected)
# Test single file option, csv
path = 'tests/fixtures/data_sample/wiki_example/input/part1.csv'
actual = load_df(path, file_type='csv', read_func='read_csv', read_kwargs={}).reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
| assert_frame_equal(actual, expected) | pandas.testing.assert_frame_equal |
from itertools import groupby, zip_longest
from fractions import Fraction
from random import sample
import json
import pandas as pd
import numpy as np
import music21 as m21
from music21.meter import TimeSignatureException
m21.humdrum.spineParser.flavors['JRP'] = True
from collections import defaultdict
#song has no meter
class UnknownPGramType(Exception):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return f"Unknown pgram type: {self.arg}."
#compute features:
def compute_completesmeasure_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
def compute_completesmeasure_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
#extract IOI in units of beat
#IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note
#for last note: beatfraction is taken
#Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody)
#
#extract beats per measure
def extractFeatures(seq_iter, vocalfeatures=True):
count = 0
for seq in seq_iter:
count += 1
if count % 100 == 0:
print(count, end=' ')
pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests
IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs]
IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]]
seq['features']['IOI_beatfraction'] = IOI_beatfraction
beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']]
seq['features']['beatspermeasure'] = beatspermeasure
phrasepos = seq['features']['phrasepos']
phrasestart_ix=[0]*len(phrasepos)
for ix in range(1,len(phrasestart_ix)):
if phrasepos[ix] < phrasepos[ix-1]:
phrasestart_ix[ix] = ix
else:
phrasestart_ix[ix] = phrasestart_ix[ix-1]
seq['features']['phrasestart_ix'] = phrasestart_ix
endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True]
seq['features']['endOfPhrase'] = endOfPhrase
cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(len(phrasepos))]
cb_s = [compute_completesbeat_song(seq, ix) for ix in range(len(phrasepos))]
seq['features']['completesmeasure_phrase'] = cm_p
seq['features']['completesbeat_phrase'] = cb_p
seq['features']['completesmeasure_song'] = cm_s
seq['features']['completesbeat_song'] = cb_s
if vocalfeatures:
#move lyric features to end of melisma:
#rhymes, rhymescontentwords, wordstress, noncontentword, wordend
#and compute rhyme_noteoffset and rhyme_beatoffset
if 'melismastate' in seq['features'].keys(): #vocal?
lyrics = seq['features']['lyrics']
phoneme = seq['features']['phoneme']
melismastate = seq['features']['melismastate']
rhymes = seq['features']['rhymes']
rhymescontentwords = seq['features']['rhymescontentwords']
wordend = seq['features']['wordend']
noncontentword = seq['features']['noncontentword']
wordstress = seq['features']['wordstress']
rhymes_endmelisma, rhymescontentwords_endmelisma = [], []
wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], []
lyrics_endmelisma, phoneme_endmelisma = [], []
from_ix = 0
inmelisma = False
for ix in range(len(phrasepos)):
if melismastate[ix] == 'start':
from_ix = ix
inmelisma = True
if melismastate[ix] == 'end':
if not inmelisma:
from_ix = ix
inmelisma = False
rhymes_endmelisma.append(rhymes[from_ix])
rhymescontentwords_endmelisma.append(rhymescontentwords[from_ix])
wordend_endmelisma.append(wordend[from_ix])
noncontentword_endmelisma.append(noncontentword[from_ix])
wordstress_endmelisma.append(wordstress[from_ix])
lyrics_endmelisma.append(lyrics[from_ix])
phoneme_endmelisma.append(phoneme[from_ix])
else:
rhymes_endmelisma.append(False)
rhymescontentwords_endmelisma.append(False)
wordend_endmelisma.append(False)
noncontentword_endmelisma.append(False)
wordstress_endmelisma.append(False)
lyrics_endmelisma.append(None)
phoneme_endmelisma.append(None)
seq['features']['rhymes_endmelisma'] = rhymes_endmelisma
seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma
seq['features']['wordend_endmelisma'] = wordend_endmelisma
seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma
seq['features']['wordstress_endmelisma'] = wordstress_endmelisma
seq['features']['lyrics_endmelisma'] = lyrics_endmelisma
seq['features']['phoneme_endmelisma'] = phoneme_endmelisma
#compute rhyme_noteoffset and rhyme_beatoffset
rhyme_noteoffset = [0]
rhyme_beatoffset = [0.0]
previous = 0
previousbeat = float(Fraction(seq['features']['beatinsong'][0]))
for ix in range(1,len(rhymescontentwords_endmelisma)):
if rhymescontentwords_endmelisma[ix-1]: #previous rhymes
previous = ix
previousbeat = float(Fraction(seq['features']['beatinsong'][ix]))
rhyme_noteoffset.append(ix - previous)
rhyme_beatoffset.append(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat)
seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset
seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset
else:
#vocal features requested, but not present.
#skip melody
continue
#Or do this?
if False:
length = len(phrasepos)
seq['features']['rhymes_endmelisma'] = [None] * length
seq['features']['rhymescontentwords_endmelisma'] = [None] * length
seq['features']['wordend_endmelisma'] = [None] * length
seq['features']['noncontentword_endmelisma'] = [None] * length
seq['features']['wordstress_endmelisma'] = [None] * length
seq['features']['lyrics_endmelisma'] = [None] * length
seq['features']['phoneme_endmelisma'] = [None] * length
yield seq
class NoFeaturesError(Exception):
def __init__(self, arg):
self.args = arg
class NoTrigramsError(Exception):
def __init__(self, arg):
self.args = arg
def __str__(self):
return repr(self.value)
#endix is index of last note + 1
def computeSumFractions(fractions, startix, endix):
res = 0.0
for fr in fractions[startix:endix]:
res = res + float(Fraction(fr))
return res
#make groups of indices with the same successive pitch, but (optionally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be allowed (contourfourth)
#returns tuples (ix of first note in group, ix of last note in group + 1)
#crossPhraseBreak=False splits on phrase break. N.B. Is Using GroundTruth!
def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False):
res = []
if crossPhraseBreak:
for _, g in groupby( enumerate(midipitch), key=lambda x:x[1]):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
else: #N.B. This uses the ground truth
for _, g in groupby( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
return res
#True if no phrase end at first or second item (span) in the trigram
#trigram looks like ((8, 10), (10, 11), (11, 12))
def noPhraseBreak(tr, endOfPhrase):
return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \
( True in endOfPhrase[tr[1][0]:tr[1][1]] ) )
#pgram_type : "pitch", "note"
def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None):
pgrams = {}
arfftype = {}
for ix, seq in enumerate(corpus):
if endat is not None:
if ix >= endat:
continue
if ix < startat:
continue
if not ix%100:
print(ix, end=' ')
songid = seq['id']
try:
pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x)))
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float)
if 'melismastate' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int)
if 'informationcontent' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informationcontent', typeconv=float)
except NoFeaturesError:
print(songid, ": No features extracted.")
except NoTrigramsError:
print(songid, ": No trigrams extracted")
#if ix > startat:
# if arfftype.keys() != arfftype_new.keys():
# print("Warning: Melodies have different feature sets.")
# print(list(zip_longest(arfftype.keys(), arfftype_new.keys())))
#Keep largest set of features possible. N.B. no guarantee that all features in arfftype are in each sequence.
arfftype.update(arfftype_new)
#concat melodies
pgrams = pd.concat([v for v in pgrams.values()])
return pgrams, arfftype
def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False):
# some aliases
scaledegree = seq['features']['scaledegree']
endOfPhrase = seq['features']['endOfPhrase']
midipitch = seq['features']['midipitch']
phrase_ix = seq['features']['phrase_ix']
if pgram_type == "pitch":
event_spans = breakpitchlist(midipitch, phrase_ix) #allow pitches to cross phrase break
elif pgram_type == "note":
event_spans = list(zip(range(len(scaledegree)),range(1,len(scaledegree)+1)))
else:
raise UnknownPGramType(pgram_type)
# make trigram of spans
event_spans = event_spans + [(None, None), (None, None)]
pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:]))
# If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY?
#Why actually? e.g. kindr154 prhases of 2 pitches
if skipPhraseCrossing:
pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)]
if len(pgram_span_ixs) == 0:
raise NoTrigramsError(seq['id'])
# create dataframe with pgram names as index
pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs]
pgrams = pd.DataFrame(index=pgram_ids)
pgrams['ix0_0'] = pd.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix0_1'] = pd.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_0'] = pd.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_1'] = pd.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_0'] = pd.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_1'] = pd.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_0'] = pd.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_1'] = pd.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_0'] = pd.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_1'] = pd.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16")
#add tune family ids and songids
pgrams['tunefamily'] = seq['tunefamily']
pgrams['songid'] = seq['id']
pgrams, arfftype = extractPgramFeatures(pgrams, seq)
return pgrams, arfftype
def getBeatDuration(timesig):
try:
dur = float(m21.meter.TimeSignature(timesig).beatDuration.quarterLength)
except TimeSignatureException:
dur = float(Fraction(timesig) / Fraction('1/4'))
return dur
def oneCrossRelation(el1, el2, typeconv):
if pd.isna(el1) or pd.isna(el2):
return np.nan
return '-' if typeconv(el2) < typeconv(el1) else '=' if typeconv(el1) == typeconv(el2) else '+'
def addCrossRelations(pgrams, arfftype, featurename, newname=None, typeconv=int):
postfixes = {
1 : 'first',
2 : 'second',
3 : 'third',
4 : 'fourth',
5 : 'fifth'
}
if newname is None:
newname = featurename
for ix1 in range(1,6):
for ix2 in range(ix1+1,6):
featname = newname + postfixes[ix1] + postfixes[ix2]
source = zip(pgrams[featurename + postfixes[ix1]], pgrams[featurename + postfixes[ix2]])
pgrams[featname] = [oneCrossRelation(el1, el2, typeconv) for (el1, el2) in source]
arfftype[featname] = '{-,=,+}'
return pgrams, arfftype
def extractPgramFeatures(pgrams, seq):
# vocal?
vocal = False
if 'melismastate' in seq['features'].keys():
vocal = True
arfftype = {}
# some aliases
scaledegree = seq['features']['scaledegree']
beatstrength = seq['features']['beatstrength']
diatonicpitch = seq['features']['diatonicpitch']
midipitch = seq['features']['midipitch']
chromaticinterval = seq['features']['chromaticinterval']
timesig = seq['features']['timesignature']
metriccontour = seq['features']['metriccontour']
beatinsong = seq['features']['beatinsong']
beatinphrase = seq['features']['beatinphrase']
endOfPhrase = seq['features']['endOfPhrase']
phrasestart_ix = seq['features']['phrasestart_ix']
phrase_ix = seq['features']['phrase_ix']
completesmeasure_song = seq['features']['completesmeasure_song']
completesbeat_song = seq['features']['completesbeat_song']
completesmeasure_phrase = seq['features']['completesmeasure_phrase']
completesbeat_phrase = seq['features']['completesbeat_phrase']
IOIbeatfraction = seq['features']['IOI_beatfraction']
nextisrest = seq['features']['nextisrest']
gpr2a = seq['features']['gpr2a_Frankland']
gpr2b = seq['features']['gpr2b_Frankland']
gpr3a = seq['features']['gpr3a_Frankland']
gpr3d = seq['features']['gpr3d_Frankland']
gprsum = seq['features']['gpr_Frankland_sum']
pprox = seq['features']['pitchproximity']
prev = seq['features']['pitchreversal']
lbdmpitch = seq['features']['lbdm_spitch']
lbdmioi = seq['features']['lbdm_sioi']
lbdmrest = seq['features']['lbdm_srest']
lbdm = seq['features']['lbdm_boundarystrength']
if vocal:
wordstress = seq['features']['wordstress_endmelisma']
noncontentword = seq['features']['noncontentword_endmelisma']
wordend = seq['features']['wordend_endmelisma']
rhymescontentwords = seq['features']['rhymescontentwords_endmelisma']
rhymescontentwords_noteoffset = seq['features']['rhymescontentwords_noteoffset']
rhymescontentwords_beatoffset = seq['features']['rhymescontentwords_beatoffset']
melismastate = seq['features']['melismastate']
phrase_count = max(phrase_ix) + 1
pgrams['scaledegreefirst'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['scaledegreesecond'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['scaledegreethird'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['scaledegreefourth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['scaledegreefifth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['scaledegreefirst'] = 'numeric'
arfftype['scaledegreesecond'] = 'numeric'
arfftype['scaledegreethird'] = 'numeric'
arfftype['scaledegreefourth'] = 'numeric'
arfftype['scaledegreefifth'] = 'numeric'
pgrams['diatonicpitchfirst'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['diatonicpitchsecond'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['diatonicpitchthird'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['diatonicpitchfourth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['diatonicpitchfifth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['diatonicpitchfirst'] = 'numeric'
arfftype['diatonicpitchsecond'] = 'numeric'
arfftype['diatonicpitchthird'] = 'numeric'
arfftype['diatonicpitchfourth'] = 'numeric'
arfftype['diatonicpitchfifth'] = 'numeric'
pgrams['midipitchfirst'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['midipitchsecond'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['midipitchthird'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['midipitchfourth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['midipitchfifth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['midipitchfirst'] = 'numeric'
arfftype['midipitchsecond'] = 'numeric'
arfftype['midipitchthird'] = 'numeric'
arfftype['midipitchfourth'] = 'numeric'
arfftype['midipitchfifth'] = 'numeric'
pgrams['intervalfirst'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['intervalsecond'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['intervalthird'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['intervalfourth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['intervalfifth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['intervalfirst'] = 'numeric'
arfftype['intervalsecond'] = 'numeric'
arfftype['intervalthird'] = 'numeric'
arfftype['intervalfourth'] = 'numeric'
arfftype['intervalfifth'] = 'numeric'
parsons = {-1:'-', 0:'=', 1:'+'}
#intervalcontour is not a good feature. Pitchcontour would be better. This will be in the cross-relations
#pgrams['intervalcontoursecond'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int1) else np.nan for int1, int2 in \
# zip(pgrams['intervalfirst'],pgrams['intervalsecond'])]
#pgrams['intervalcontourthird'] = [parsons[np.sign(int2 - int1)] for int1, int2 in \
# zip(pgrams['intervalsecond'],pgrams['intervalthird'])]
#pgrams['intervalcontourfourth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalthird'],pgrams['intervalfourth'])]
#pgrams['intervalcontourfifth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalfourth'],pgrams['intervalfifth'])]
#arfftype['intervalcontoursecond'] = '{-,=,+}'
#arfftype['intervalcontourthird'] = '{-,=,+}'
#arfftype['intervalcontourfourth'] = '{-,=,+}'
#arfftype['intervalcontourfifth'] = '{-,=,+}'
#intervals of which second tone has center of gravity according to Vos 2002 + octave equivalents
VosCenterGravityASC = np.array([1, 5, 8])
VosCenterGravityDESC = np.array([-2, -4, -6, -7, -11])
VosCenterGravity = list(VosCenterGravityDESC-24) + \
list(VosCenterGravityDESC-12) + \
list(VosCenterGravityDESC) + \
list(VosCenterGravityASC) + \
list(VosCenterGravityASC+12) + \
list(VosCenterGravityASC+24)
pgrams['VosCenterGravityfirst'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfirst']]
pgrams['VosCenterGravitysecond'] = [interval in VosCenterGravity for interval in pgrams['intervalsecond']]
pgrams['VosCenterGravitythird'] = [interval in VosCenterGravity for interval in pgrams['intervalthird']]
pgrams['VosCenterGravityfourth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfourth']]
pgrams['VosCenterGravityfifth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfifth']]
arfftype['VosCenterGravityfirst'] = '{True, False}'
arfftype['VosCenterGravitysecond'] = '{True, False}'
arfftype['VosCenterGravitythird'] = '{True, False}'
arfftype['VosCenterGravityfourth'] = '{True, False}'
arfftype['VosCenterGravityfifth'] = '{True, False}'
VosHarmony = {
0: 0,
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 1,
7: 6,
8: 5,
9: 4,
10: 3,
11: 2,
12: 7
}
#interval modulo one octave, but 0 only for absolute unison (Vos 2002, p.633)
def vosint(intervals):
return [((np.sign(i)*i-1)%12+1 if i!=0 else 0) if not pd.isna(i) else np.nan for i in intervals]
pgrams['VosHarmonyfirst'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfirst'])], dtype="Int16")
pgrams['VosHarmonysecond'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalsecond'])], dtype="Int16")
pgrams['VosHarmonythird'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalthird'])], dtype="Int16")
pgrams['VosHarmonyfourth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfourth'])], dtype="Int16")
pgrams['VosHarmonyfifth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfifth'])], dtype="Int16")
arfftype['VosHarmonyfirst'] = 'numeric'
arfftype['VosHarmonysecond'] = 'numeric'
arfftype['VosHarmonythird'] = 'numeric'
arfftype['VosHarmonyfourth'] = 'numeric'
arfftype['VosHarmonyfifth'] = 'numeric'
if 'informationcontent' in seq['features'].keys():
informationcontent = seq['features']['informationcontent']
pgrams['informationcontentfirst'] = [informationcontent[int(ix)] for ix in pgrams['ix0_0']]
pgrams['informationcontentsecond'] = [informationcontent[int(ix)] for ix in pgrams['ix1_0']]
pgrams['informationcontentthird'] = [informationcontent[int(ix)] for ix in pgrams['ix2_0']]
pgrams['informationcontentfourth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['informationcontentfifth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['informationcontentfirst'] = 'numeric'
arfftype['informationcontentsecond'] = 'numeric'
arfftype['informationcontentthird'] = 'numeric'
arfftype['informationcontentfourth'] = 'numeric'
arfftype['informationcontentfifth'] = 'numeric'
pgrams['contourfirst'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfirst']]
pgrams['contoursecond'] = [parsons[np.sign(i)] for i in pgrams['intervalsecond']]
pgrams['contourthird'] = [parsons[np.sign(i)] for i in pgrams['intervalthird']]
pgrams['contourfourth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfourth']]
pgrams['contourfifth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfifth']]
arfftype['contourfirst'] = '{-,=,+}'
arfftype['contoursecond'] = '{-,=,+}'
arfftype['contourthird'] = '{-,=,+}'
arfftype['contourfourth'] = '{-,=,+}'
arfftype['contourfifth'] = '{-,=,+}'
###########################################3
#derived features from Interval and Contour
pgrams['registraldirectionchange'] = [cont_sec != cont_third for cont_sec, cont_third in \
zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['registraldirectionchange'] = '{True, False}'
pgrams['largetosmall'] = [int_first >= 6 and int_second <=4 for int_first, int_second in \
zip(pgrams['intervalsecond'], pgrams['intervalthird'])]
arfftype['largetosmall'] = '{True, False}'
pgrams['contourreversal'] = [(i[0] == '-' and i[1] == '+') or (i[0]=='+' and i[1]=='-') \
for i in zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['contourreversal'] = '{True, False}'
pgrams['isascending'] = \
(pgrams['diatonicpitchfirst'] < pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] < pgrams['diatonicpitchthird'])
arfftype['isascending'] = '{True, False}'
pgrams['isdescending'] = \
(pgrams['diatonicpitchfirst'] > pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] > pgrams['diatonicpitchthird'])
arfftype['isdescending'] = '{True, False}'
diat = pgrams[['diatonicpitchfirst','diatonicpitchsecond','diatonicpitchthird']].values
pgrams['ambitus'] = diat.max(1) - diat.min(1)
arfftype['ambitus'] = 'numeric'
pgrams['containsleap'] = \
(abs(pgrams['diatonicpitchsecond'] - pgrams['diatonicpitchfirst']) > 1) | \
(abs(pgrams['diatonicpitchthird'] - pgrams['diatonicpitchsecond']) > 1)
arfftype['containsleap'] = '{True, False}'
###########################################3
pgrams['numberofnotesfirst'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix0_0'],pgrams['ix0_1'])], dtype="Int16")
pgrams['numberofnotessecond'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix1_0'],pgrams['ix1_1'])], dtype="Int16")
pgrams['numberofnotesthird'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix2_0'],pgrams['ix2_1'])], dtype="Int16")
pgrams['numberofnotesfourth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix3_0'],pgrams['ix3_1'])], dtype="Int16")
pgrams['numberofnotesfifth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix4_0'],pgrams['ix4_1'])], dtype="Int16")
arfftype['numberofnotesfirst'] = 'numeric'
arfftype['numberofnotessecond'] = 'numeric'
arfftype['numberofnotesthird'] = 'numeric'
arfftype['numberofnotesfourth'] = 'numeric'
arfftype['numberofnotesfifth'] = 'numeric'
if seq['freemeter']:
pgrams['meternumerator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
else:
pgrams['meternumerator'] = pd.array([int(timesig[ix].split('/')[0]) for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([int(timesig[ix].split('/')[1]) for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['meternumerator'] = 'numeric'
arfftype['meterdenominator'] = 'numeric'
pgrams['nextisrestfirst'] = [nextisrest[ix-1] for ix in pgrams['ix0_1']]
pgrams['nextisrestsecond'] = [nextisrest[ix-1] for ix in pgrams['ix1_1']]
pgrams['nextisrestthird'] = [nextisrest[ix-1] for ix in pgrams['ix2_1']]
pgrams['nextisrestfourth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['nextisrestfifth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['nextisrestfirst'] = '{True, False}'
arfftype['nextisrestsecond'] = '{True, False}'
arfftype['nextisrestthird'] = '{True, False}'
arfftype['nextisrestfourth'] = '{True, False}'
arfftype['nextisrestfifth'] = '{True, False}'
pgrams['beatstrengthfirst'] = [beatstrength[int(ix)] for ix in pgrams['ix0_0']]
pgrams['beatstrengthsecond'] = [beatstrength[int(ix)] for ix in pgrams['ix1_0']]
pgrams['beatstrengththird'] = [beatstrength[int(ix)] for ix in pgrams['ix2_0']]
pgrams['beatstrengthfourth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['beatstrengthfifth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['beatstrengthfirst'] = 'numeric'
arfftype['beatstrengthsecond'] = 'numeric'
arfftype['beatstrengththird'] = 'numeric'
arfftype['beatstrengthfourth'] = 'numeric'
arfftype['beatstrengthfifth'] = 'numeric'
#these will be in crossrelations: beatstrengthfirstsecond, etc.
#pgrams['metriccontourfirst'] = [metriccontour[int(ix)] for ix in pgrams['ix0_0']]
#pgrams['metriccontoursecond'] = [metriccontour[int(ix)] for ix in pgrams['ix1_0']]
#pgrams['metriccontourthird'] = [metriccontour[int(ix)] for ix in pgrams['ix2_0']]
#pgrams['metriccontourfourth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
#pgrams['metriccontourfifth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
#arfftype['metriccontourfirst'] = '{-,=,+}'
#arfftype['metriccontoursecond'] = '{-,=,+}'
#arfftype['metriccontourthird'] = '{-,=,+}'
#arfftype['metriccontourfourth'] = '{-,=,+}'
#arfftype['metriccontourfifth'] = '{-,=,+}'
pgrams['IOIbeatfractionfirst'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix0_0'],pgrams['ix0_1'])]
pgrams['IOIbeatfractionsecond'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix1_0'],pgrams['ix1_1'])]
pgrams['IOIbeatfractionthird'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix2_0'],pgrams['ix2_1'])]
pgrams['IOIbeatfractionfourth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \
startix, endix in zip(pgrams['ix3_0'],pgrams['ix3_1'])]
pgrams['IOIbeatfractionfifth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \
startix, endix in zip(pgrams['ix4_0'],pgrams['ix4_1'])]
arfftype['IOIbeatfractionfirst'] = 'numeric'
arfftype['IOIbeatfractionsecond'] = 'numeric'
arfftype['IOIbeatfractionthird'] = 'numeric'
arfftype['IOIbeatfractionfourth'] = 'numeric'
arfftype['IOIbeatfractionfifth'] = 'numeric'
pgrams['durationcummulation'] = [((d2 > d1) and (d3 > d2)) for d1, d2, d3 in \
zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])]
arfftype['durationcummulation'] = '{True, False}'
#these will be in crossrelation: IOIbeatfractionfirstsecond, etc.
#pgrams['durationcontoursecond'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'])]
#pgrams['durationcontourthird'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])]
#pgrams['durationcontourfourth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionthird'],pgrams['IOIbeatfractionfourth'])]
#pgrams['durationcontourfifth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionfourth'],pgrams['IOIbeatfractionfifth'])]
#arfftype['durationcontoursecond'] = '{-,=,+}'
#arfftype['durationcontourthird'] = '{-,=,+}'
#arfftype['durationcontourfourth'] = '{-,=,+}'
#arfftype['durationcontourfifth'] = '{-,=,+}'
pgrams['onthebeatfirst'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix0_0']]
pgrams['onthebeatsecond'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix1_0']]
pgrams['onthebeatthird'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix2_0']]
pgrams['onthebeatfourth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['onthebeatfifth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['onthebeatfirst'] = '{True, False}'
arfftype['onthebeatsecond'] = '{True, False}'
arfftype['onthebeatthird'] = '{True, False}'
arfftype['onthebeatfourth'] = '{True, False}'
arfftype['onthebeatfifth'] = '{True, False}'
pgrams['completesmeasurephrase'] = [completesmeasure_phrase[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesmeasuresong'] = [completesmeasure_song[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesbeatphrase'] = [completesbeat_phrase[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesbeatsong'] = [completesbeat_song[ix-1] for ix in pgrams['ix2_1']]
arfftype['completesmeasurephrase'] = '{True, False}'
arfftype['completesmeasuresong'] = '{True, False}'
arfftype['completesbeatphrase'] = '{True, False}'
arfftype['completesbeatsong'] = '{True, False}'
if 'grouper' in seq['features'].keys():
grouper = seq['features']['grouper']
pgrams['grouperfirst'] = [grouper[int(ix)] for ix in pgrams['ix0_0']]
pgrams['groupersecond'] = [grouper[int(ix)] for ix in pgrams['ix1_0']]
pgrams['grouperthird'] = [grouper[int(ix)] for ix in pgrams['ix2_0']]
pgrams['grouperfourth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['grouperfifth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['grouperfirst'] = '{True, False}'
arfftype['groupersecond'] = '{True, False}'
arfftype['grouperthird'] = '{True, False}'
arfftype['grouperfourth'] = '{True, False}'
arfftype['grouperfifth'] = '{True, False}'
#values for final note of third group
pgrams['noteoffset'] = pd.array([(ix-1) - phrasestart_ix[(ix-1)] for ix in pgrams['ix2_1']], dtype="Int16")
pgrams['beatoffset'] = [float(Fraction(beatinphrase[ix-1])) - \
float(Fraction(beatinphrase[phrasestart_ix[(ix-1)]])) \
for ix in pgrams['ix2_1']]
arfftype['noteoffset'] = 'numeric'
arfftype['beatoffset'] = 'numeric'
pgrams['beatduration'] = [getBeatDuration(timesig[int(ix)]) for ix in pgrams['ix0_0']]
pgrams['beatcount'] = pd.array([m21.meter.TimeSignature(timesig[int(ix)]).beatCount for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['beatduration'] = 'numeric'
arfftype['beatcount'] = 'numeric'
#get values for the last note!
pgrams['gpr2afirst'] = [gpr2a[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr2asecond'] = [gpr2a[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr2athird'] = [gpr2a[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr2afourth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr2afifth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr2afirst'] = 'numeric'
arfftype['gpr2asecond'] = 'numeric'
arfftype['gpr2athird'] = 'numeric'
arfftype['gpr2afourth'] = 'numeric'
arfftype['gpr2afifth'] = 'numeric'
pgrams['gpr2bfirst'] = [gpr2b[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr2bsecond'] = [gpr2b[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr2bthird'] = [gpr2b[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr2bfourth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr2bfifth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr2bfirst'] = 'numeric'
arfftype['gpr2bsecond'] = 'numeric'
arfftype['gpr2bthird'] = 'numeric'
arfftype['gpr2bfourth'] = 'numeric'
arfftype['gpr2bfifth'] = 'numeric'
pgrams['gpr3afirst'] = [gpr3a[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr3asecond'] = [gpr3a[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr3athird'] = [gpr3a[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr3afourth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr3afifth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr3afirst'] = 'numeric'
arfftype['gpr3asecond'] = 'numeric'
arfftype['gpr3athird'] = 'numeric'
arfftype['gpr3afourth'] = 'numeric'
arfftype['gpr3afifth'] = 'numeric'
pgrams['gpr3dfirst'] = [gpr3d[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr3dsecond'] = [gpr3d[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr3dthird'] = [gpr3d[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr3dfourth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr3dfifth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr3dfirst'] = 'numeric'
arfftype['gpr3dsecond'] = 'numeric'
arfftype['gpr3dthird'] = 'numeric'
arfftype['gpr3dfourth'] = 'numeric'
arfftype['gpr3dfifth'] = 'numeric'
pgrams['gprsumfirst'] = [gprsum[ix-1] for ix in pgrams['ix0_1']]
pgrams['gprsumsecond'] = [gprsum[ix-1] for ix in pgrams['ix1_1']]
pgrams['gprsumthird'] = [gprsum[ix-1] for ix in pgrams['ix2_1']]
pgrams['gprsumfourth'] = [gprsum[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gprsumfifth'] = [gprsum[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gprsumfirst'] = 'numeric'
arfftype['gprsumsecond'] = 'numeric'
arfftype['gprsumthird'] = 'numeric'
arfftype['gprsumfourth'] = 'numeric'
arfftype['gprsumfifth'] = 'numeric'
pgrams['pitchproximityfirst'] = pd.array([pprox[ix] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['pitchproximitysecond'] = pd.array([pprox[ix] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['pitchproximitythird'] = pd.array([pprox[ix] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['pitchproximityfourth'] = pd.array([pprox[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['pitchproximityfifth'] = pd.array([pprox[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['pitchproximityfirst'] = 'numeric'
arfftype['pitchproximitysecond'] = 'numeric'
arfftype['pitchproximitythird'] = 'numeric'
arfftype['pitchproximityfourth'] = 'numeric'
arfftype['pitchproximityfifth'] = 'numeric'
pgrams['pitchreversalfirst'] = [prev[ix] for ix in pgrams['ix0_0']]
pgrams['pitchreversalsecond'] = [prev[ix] for ix in pgrams['ix1_0']]
pgrams['pitchreversalthird'] = [prev[ix] for ix in pgrams['ix2_0']]
pgrams['pitchreversalfourth'] = [prev[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['pitchreversalfifth'] = [prev[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['pitchreversalfirst'] = 'numeric'
arfftype['pitchreversalsecond'] = 'numeric'
arfftype['pitchreversalthird'] = 'numeric'
arfftype['pitchreversalfourth'] = 'numeric'
arfftype['pitchreversalfifth'] = 'numeric'
#get values for last note in pitchgroup
pgrams['lbdmpitchfirst'] = [lbdmpitch[ix-1] for ix in pgrams['ix0_1']]
pgrams['lbdmpitchsecond'] = [lbdmpitch[ix-1] for ix in pgrams['ix1_1']]
pgrams['lbdmpitchthird'] = [lbdmpitch[ix-1] for ix in pgrams['ix2_1']]
pgrams['lbdmpitchfourth'] = [lbdmpitch[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['lbdmpitchfifth'] = [lbdmpitch[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['lbdmpitchfirst'] = 'numeric'
arfftype['lbdmpitchsecond'] = 'numeric'
arfftype['lbdmpitchthird'] = 'numeric'
arfftype['lbdmpitchfourth'] = 'numeric'
arfftype['lbdmpitchfifth'] = 'numeric'
pgrams['lbdmioifirst'] = [lbdmioi[ix-1] for ix in pgrams['ix0_1']]
pgrams['lbdmioisecond'] = [lbdmioi[ix-1] for ix in pgrams['ix1_1']]
pgrams['lbdmioithird'] = [lbdmioi[ix-1] for ix in pgrams['ix2_1']]
pgrams['lbdmioifourth'] = [lbdmioi[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['lbdmioififth'] = [lbdmioi[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['lbdmioifirst'] = 'numeric'
arfftype['lbdmioisecond'] = 'numeric'
arfftype['lbdmioithird'] = 'numeric'
arfftype['lbdmioifourth'] = 'numeric'
arfftype['lbdmioififth'] = 'numeric'
pgrams['lbdmrestfirst'] = [lbdmrest[ix-1] for ix in pgrams['ix0_1']]
pgrams['lbdmrestsecond'] = [lbdmrest[ix-1] for ix in pgrams['ix1_1']]
pgrams['lbdmrestthird'] = [lbdmrest[ix-1] for ix in pgrams['ix2_1']]
pgrams['lbdmrestfourth'] = [lbdmrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['lbdmrestfifth'] = [lbdmrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['lbdmrestfirst'] = 'numeric'
arfftype['lbdmrestsecond'] = 'numeric'
arfftype['lbdmrestthird'] = 'numeric'
arfftype['lbdmrestfourth'] = 'numeric'
arfftype['lbdmrestfifth'] = 'numeric'
pgrams['lbdmfirst'] = [lbdm[ix-1] for ix in pgrams['ix0_1']]
pgrams['lbdmsecond'] = [lbdm[ix-1] for ix in pgrams['ix1_1']]
pgrams['lbdmthird'] = [lbdm[ix-1] for ix in pgrams['ix2_1']]
pgrams['lbdmfourth'] = [lbdm[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['lbdmfifth'] = [lbdm[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['lbdmfirst'] = 'numeric'
arfftype['lbdmsecond'] = 'numeric'
arfftype['lbdmthird'] = 'numeric'
arfftype['lbdmfourth'] = 'numeric'
arfftype['lbdmfifth'] = 'numeric'
if vocal:
pgrams['wordstressfirst'] = [wordstress[ix-1] for ix in pgrams['ix0_1']]
pgrams['wordstresssecond'] = [wordstress[ix-1] for ix in pgrams['ix1_1']]
pgrams['wordstressthird'] = [wordstress[ix-1] for ix in pgrams['ix2_1']]
pgrams['wordstressfourth'] = [wordstress[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['wordstressfifth'] = [wordstress[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['wordstressfirst'] = '{True, False}'
arfftype['wordstresssecond'] = '{True, False}'
arfftype['wordstressthird'] = '{True, False}'
arfftype['wordstressfourth'] = '{True, False}'
arfftype['wordstressfifth'] = '{True, False}'
#NB only take content words
pgrams['rhymesfirst'] = [rhymescontentwords[ix-1] for ix in pgrams['ix0_1']]
pgrams['rhymessecond'] = [rhymescontentwords[ix-1] for ix in pgrams['ix1_1']]
pgrams['rhymesthird'] = [rhymescontentwords[ix-1] for ix in pgrams['ix2_1']]
pgrams['rhymesfourth'] = [rhymescontentwords[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['rhymesfifth'] = [rhymescontentwords[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['rhymesfirst'] = '{True, False}'
arfftype['rhymessecond'] = '{True, False}'
arfftype['rhymesthird'] = '{True, False}'
arfftype['rhymesfourth'] = '{True, False}'
arfftype['rhymesfifth'] = '{True, False}'
#rhyme_noteoffset
#rhyme_beatoffset
pgrams['rhyme_noteoffset'] = [rhymescontentwords_noteoffset[ix-1] for ix in pgrams['ix2_1']]
pgrams['rhyme_beatoffset'] = [rhymescontentwords_beatoffset[ix-1] for ix in pgrams['ix2_1']]
arfftype['rhyme_noteoffset'] = 'numeric'
arfftype['rhyme_beatoffset'] = 'numeric'
pgrams['noncontentwordfirst'] = [noncontentword[ix-1] for ix in pgrams['ix0_1']]
pgrams['noncontentwordsecond'] = [noncontentword[ix-1] for ix in pgrams['ix1_1']]
pgrams['noncontentwordthird'] = [noncontentword[ix-1] for ix in pgrams['ix2_1']]
pgrams['noncontentwordfourth'] = [noncontentword[ix-1] if not | pd.isna(ix) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 14:28:27 2020
@author: <NAME>
"""
import difflib # 计算两个字符串相似度的
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import copy #用来深度复制
import matplotlib.ticker as mtick # 用来改变坐标抽格式
plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
# 做分类汇总的函数
def pivot1(listn, version):
# csv_data[csv_data['area'].isna()]
subset = csv_data[csv_data['area'].isin(listn)]
subset['list_date_short'] = subset['list_date'].apply(str).str[0:4]
global result
result = pd.crosstab(subset.list_date_short, subset.industry, margins = True)
result.to_excel(r'D:\桌面的文件夹\实习\睿丛\output_%s.xls' %version)
return
# 统计的三个层次
list1 = ['南京', '苏州', '无锡', '常州', '镇江', '扬州', '泰州', '南通', '淮安', '连云港', '盐城', '徐州', '宿迁', '杭州', '宁波', '温州', '绍兴', '湖州', '嘉兴', '金华', '衢州', '台州', '丽水', '舟山', '合肥 ', '马鞍山', '淮北', '宿州', '阜阳', '蚌埠', '淮南', '滁州', '六安', '巢湖', '芜湖', '亳州', '安庆', '池州', '铜陵', '宣城', '黄山', '上海', '江苏', '安徽', '浙江']
list2 = ['南京', '苏州', '无锡', '常州', '镇江', '扬州', '泰州', '南通', '淮安', '连云港', '盐城', '徐州', '宿迁', '杭州', '宁波', '温州', '绍兴', '湖州', '嘉兴', '金华', '衢州', '台州', '丽水', '舟山', '上海', '江苏', '浙江']
list3 = ['上海']
# 导入数据
csv_file = r'D:\桌面的文件夹\实习\睿丛\分年份、分行业统计长三角地区当年上市数量\df_stock.csv'
csv_data = pd.read_csv(csv_file, low_memory = False)#防止弹出警告
print(csv_data)
csv_data.info()
csv_data.head()
csv_data.describe()
csv_data.head(50)
# 进行三个层次的分类汇总
pivot1(list1,'list1')
pivot1(list2,'list2')
pivot1(list3,'list3')
result # 查看分类汇总的结果
# 处理行业名称
# 准备好申万行业分类的数据
Tpye=pd.read_excel(r'D:\桌面的文件夹\实习\睿丛\分年份、分行业统计长三角地区当年上市数量\申银万国行业分类标准 .xlsx',sheet_name='处理', header=None) # 导入行业分类
type1 = Tpye.sort_values(1, axis=0) # 按照行业编号有小到大排序
type1=type1.drop_duplicates(subset=0, keep='first', inplace=False, ignore_index=False) # 去除重复行。有些母分类和子分类是同名的,就只保留母分类。
type1=type1.rename(columns={0:'industry'}) # 给行业名称的列命名。
type1=type1.rename(columns={1:'code'}) # 给行业名称的列命名。
type1 = type1.set_index("industry") # 让行业名称成为行标签,便于后续合并
print(type1.index.is_unique) # 发现行标签没有重复的
type1
# 在最前面插入一个空列,用来保存匹配的结果
test=result.T.iloc[0:79,:] # 取消行业类型里面的“all”
col_name=test.columns.tolist() # 将数据框的列名全部提取出来存放在列表里
col_name.insert(0,'new') # 在列索引为0的位置插入一列,列名为:new,刚插入时不会有值,整列都是NaN
test=test.reindex(columns=col_name) # DataFrame.reindex() 对原行/列索引重新构建索引值
test
# 把申万分类匹配到原始分类上
test.iloc[:,0] = test.index.map(lambda x: difflib.get_close_matches(x, type1.index, cutoff=0.3,n=1)[0]) # map()就是对于一个可迭代对象中的元素,轮流执行一个function
test.head(60) # 查看匹配结果
test.iloc[61:81,:] # 查看匹配结果
test.to_excel(r'D:\桌面的文件夹\实习\睿丛\行业分类匹配结果.xls') # 导出匹配结果,手工在excel里面处理匹配不正确的项目。发现有11个需要手工调整
# 把行业名称转换为申万的命名体系。
#导入并整理
data=pd.read_excel(r'D:\桌面的文件夹\实习\睿丛\行业分类匹配结果_修改后.xls', index_col = 'industry') # 重新导入匹配好分类的行业汇总
data = data.groupby(data.index).sum() # 把重复的行业进行加和。因为concat要求index不能重复。注:此时子行业和母行业是混乱出现的。
# 合并
outcome = pd.concat([data, type1], axis=1, join='inner', ignore_index=False) # 这里是按照index合并数据,可以合并object类型的。inner表示求交集,outer表示求并集。由于data里面的index是type1的子集,所以可以用inner方式。axis=1表示横向合并。
# 改行业代码
outcome['code'] = outcome['code'].apply(str).str[0:2].map(lambda x: x+'0000') # 把行业代码改成一级行业的代码,即后四位全是0
outcome['code'] = outcome['code'].astype('int64')
# 生成新的index
outcome1 = outcome.set_index('code')
outcome1 = outcome1.groupby(outcome1.index).sum()
type2 = type1.reset_index().set_index('code') # 把原来作为index的‘industry’还原成一列数据
outcome2 = pd.concat([outcome1, type2], axis=1, join='inner', ignore_index=False) # 把申万的中文一级行业名称匹配到数据上。这个地方一定要注意,index的数据类型也必须一致,否则合并不出来。
result = outcome2.set_index('industry').T
row_name=result.index.tolist() # 将数据框的列名全部提取出来存放在列表里
type(row_name[1]) # 确认是字符型元素
row_name.insert(1,'1991') # 在列索引为1的位置插入一行,行名为:1991。因为前面的分类汇总会导致一些没有上市的年份被省略掉。
row_name.insert(15,'2005')
row_name.insert(-8,'2013')
result=result.reindex(index=row_name) # DataFrame.reindex() 对原行/列索引重新构建索引值
result.iloc[[1, 15, -9],:]=0.0 # 把NaN的值填充成零
result # result是整理完的总的数据集
# 到这里,数据的整理就完成了。
# 下面开始分析数据
nameDF = pd.DataFrame() # # 空df储存分析类型、行业名称
# 提取分行业的上市总量,用于1和2
industry = result[31:32] # 提取最后一行加总的值ALL
# 1.上市数量最多的10个行业
# 提取
temp1 = industry.T.sort_values('All',ascending=False,inplace=False)[0:11] # 提取行业名称以及上市数量
temp1
# 画图
title='过去30年上市数量最多的10个行业' # 单独设置title,一遍存储到nameDF中
fig1 = temp1.plot(kind='bar', fontsize=16, figsize=(14,14*0.618), title=title, rot=0, legend='') #设置图的格式
fig1.axes.title.set_size(20) #设置标题
# 储存
fig1.figure.savefig(r'D:\桌面的文件夹\实习\睿丛\过去30年上市数量最多的10个行业.png') #保存图片
type(temp1) # 查看temp1的类型
stri=',' # 设置分隔符
seq=temp1.index.tolist() # 获取行业名称
industryName = stri.join(seq) # 把列表中的所有元素合并成一个字符串。
s = pd.Series([title,industryName]) #保存标题和行业名称
nameDF = nameDF.append(s, ignore_index=True) # 添加到df中
# 2.上市数量最少的10个行业。这里的代码比1可复制性更高。
# 提取
temp2 = industry.T.sort_values('All',ascending=True,inplace=False)[0:11].sort_values('All',ascending=False,inplace=False) # 和1一样的规则。提取行业名称以及上市数量。先从小到大提取前10,再把筛选出来的从大到小排。
# 画图
title='过去30年上市数量最少的10个行业' # 单独设置title,一遍存储到nameDF中
fig2 = temp2.plot(kind='bar', fontsize=16, figsize=(14,14*0.618), title=title, rot=0, legend='') #设置图的格式
fig2.axes.title.set_size(20) #设置标题
fmt='%.0f'
yticks = mtick.FormatStrFormatter(fmt)
fig2.yaxis.set_major_formatter(yticks) # 设置不要有小数位数。dataframe里面每一个数都是浮点型的。
# 储存
fig2.figure.savefig(r'D:\桌面的文件夹\实习\睿丛\%s.png' %title) #保存图片
seq=temp2.index.tolist() # 获取行业名称
industryName = stri.join(seq) # 把列表中的所有元素合并成一个字符串。
s = pd.Series([title,industryName]) #保存标题和行业名称
nameDF = nameDF.append(s, ignore_index=True) # 添加到df中
# 3.提取分年度的上市总量
# 提取
result['All'] = result.apply(lambda x: x.sum(),axis=1) # 增加每一行的汇总值,下面一步提取的就是这个值
# 画图
title='上海地区过去30年每年的上市数量变化'
temp3= result.iloc[:,-1].drop(['All'])
fig3 = temp3.plot(kind='line', fontsize=16, figsize=(14,14*0.618),use_index=True, title=title, rot=0)
fig3.axes.title.set_size(20)
# 储存
fig3.figure.savefig(r'D:\桌面的文件夹\实习\睿丛\%s.png' %title) #保存图片
# 年份合并,来平滑波动
result4 = result.iloc[:-1,:]
# 4.五年一合并,绝对数
i = 0
data_new = pd.DataFrame()
while i < (result.shape[0]-1):
try:
data_new = data_new.append(result4.iloc[i,:]+result4.iloc[i+1,:]+result4.iloc[i+2,:]+result4.iloc[i+3,:]+result4.iloc[i+4,:], ignore_index=True)
except:
i +=5
i +=5
s=data_new.sum(axis=0)
data_new = data_new.append(s, ignore_index=True)
data_new
# 提取
title='上市总数最多的12个行业的上市数量'
temp4 = data_new.T.sort_values(by=[6],ascending=False,inplace=False).iloc[0:12,:-1].T
# 画图
fig4 = temp4.plot(kind='line', subplots=True,sharex=True, sharey=True, fontsize=16, layout=(3,4),figsize=(18,18*0.618),use_index=True, title=title, legend=True, rot=90)
labels = ['1990-1994', '1995-1999', '2000-2004', '2005-2009', '2010-2014','2015-2019'] # 设置标签的名称
x = np.arange(len(labels)) # the label locations
fig4[1,1].set_xticks(x) # 设置刻度
fig4[1,1].set_xticklabels(labels) # 设置刻度的名称
fmt='%.0f'
yticks = mtick.FormatStrFormatter(fmt)
fig4[1,1].yaxis.set_major_formatter(yticks) # 设置不要有小数位数。dataframe里面每一个数都是浮点型的。
# 储存
fig4[1,1].figure.savefig(r'D:\桌面的文件夹\实习\睿丛\%s.png' %title) #保存图片,这里,fig4是一个AxesSubplot对象,实际形式是一个ndarray。因此,只要调用这个ndarray里面的任何一个图像,就能把所有的图片画出来。注意,这一调用的是第二行、第二列的图片。
fig4[0,0].figure.show()
seq=temp4.T.index.tolist() # 获取行业名称
industryName = stri.join(seq) # 把列表中的所有元素合并成一个字符串。
s = pd.Series([title,industryName]) #保存标题和行业名称
nameDF = nameDF.append(s, ignore_index=True) # 添加到df中
# 5.五年一合并,相对数
# 准备加总数
data_reg = copy.deepcopy(data_new) #这里需要一个深度复制,保持Df是不变的。否则如果运行一次程序要连着查好几次,就会出问题。因为我们要对Df的格式整个进行改变。
data_reg['All']=data_reg.sum(axis=1) # 每一年所有行业的上市量求和,放在最后一列。每个行业的加总已经有了,在第六行。
# 求相对数
data_reg=data_reg.div(data_reg.iloc[:,-1],axis=0).iloc[:,:-1] # 用来回归的数据集,是相对数
# 提取
title='上市总数最多的12个行业的上市占比'
temp5 = data_reg.T.sort_values(by=[6],ascending=False,inplace=False).iloc[0:12,:-1].T
# 画图
fig5 = temp5.plot(kind='line', subplots=True,sharex=True, sharey=True, fontsize=16, layout=(3,4),figsize=(18,18*0.618),use_index=True, title=title, legend=True, rot=90)
labels = ['1990-1994', '1995-1999', '2000-2004', '2005-2009', '2010-2014','2015-2019'] # 设置标签的名称
x = np.arange(len(labels)) # the label locations
fig5[1,1].set_xticks(x) # 设置x轴刻度
fig5[1,1].set_xticklabels(labels) # 设置x轴刻度的名称
fig5[1,1].yaxis.set_major_formatter(mtick.PercentFormatter(1,0)) # 设置y轴的格式为没有小数点的百分比。第一个参数为把多少的数值设置为100%,第二个参数为保留几位小数。
# 储存
fig5[1,1].figure.savefig(r'D:\桌面的文件夹\实习\睿丛\%s.png' %title) #保存图片,这里,fig4是一个AxesSubplot对象,实际形式是一个ndarray。因此,只要调用这个ndarray里面的任何一个图像,就能把所有的图片画出来。注意,这一调用的是第二行、第二列的图片。
fig5[0,0].figure.show()
seq=temp5.T.index.tolist() # 获取行业名称
industryName = stri.join(seq) # 把列表中的所有元素合并成一个字符串。
s = | pd.Series([title,industryName]) | pandas.Series |
'''
SYNBIOCHEM-DB (c) University of Manchester 2017
SYNBIOCHEM-DB is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=invalid-name
import os
import xlrd
import pandas as pd
def convert(xl_filename):
'''Convert Excel file.'''
dir_name, _ = os.path.splitext(xl_filename)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
workbook = xlrd.open_workbook(xl_filename)
for sheet in workbook.sheets():
columns = None
data = []
for row_num in range(sheet.nrows):
row_data = sheet.row_values(row_num)
if not columns:
columns = row_data
else:
data.append(row_data)
df = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
### Model Training and Evaluation ###
# Author: <NAME>
from IPython import get_ipython
get_ipython().magic('reset -sf')
import os, shutil
import re
import csv
from utils import bigrams, trigram, replace_collocation
import timeit
import pandas as pd
import string
from nltk.stem import PorterStemmer
import numpy as np
import pickle
import random
from scipy import sparse
import itertools
from scipy.io import savemat, loadmat
import string
from sklearn.feature_extraction.text import CountVectorizer
from gensim.test.utils import datapath
from gensim.models import Word2Vec
from data_concatenate import *
import gensim.downloader
import pprint
from manetm import etm
pp = pprint.PrettyPrinter()
# =============================================================================
DATAPATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/etm/")
OVERLEAF = os.path.expanduser("~/Dropbox/Apps/Overleaf/FOMC_Summer2019/files")
if not os.path.exists(f"{DATAPATH}/full_results"):
os.makedirs(f"{DATAPATH}/full_results")
# =============================================================================
# #0 Set Parameters
# =============================================================================
# Dataset parameters
embphrase_itera = 2 # Number of phrase iterations
embthreshold = "inf" # Threshold value for collocations. If "inf": no collocations
emb_max_df = 1.0 # in a maximum of # % of documents if # is float.
emb_min_df = 1 # choose desired value for min_df // in a minimum of # documents
EMBDATASET = f"BBTSST_min{emb_min_df}_max{emb_max_df}_iter{embphrase_itera}_th{embthreshold}"
meetphrase_itera = 2
meetthreshold = "inf"
meetmax_df = 1.0
meetmin_df = 10
MEEETDATA = f"MEET_min{meetmin_df}_max{meetmax_df}_iter{meetphrase_itera}_th{meetthreshold}"
sta_phrase_itera = 2
sta_threshold = "inf"
sta_max_df = 1.0
sta_min_df = 5
STADATASET = f"STATEMENT_min{sta_min_df}_max{sta_max_df}_iter{sta_phrase_itera}_th{sta_threshold}"
# Skipgram parameters
mincount = 2
d_sg = 1
vectorsize = 300
iters = 100
cpus = 16
neg_samples = 10
windowsize = 4
# Activate code
d_construct = False
d_estemb = False
d_train = False
# =============================================================================
# #1 Data Preparation
# =============================================================================
if d_construct:
print("*" * 80)
print("Build datasets")
build_embdata(emb_max_df,emb_min_df,embphrase_itera,embthreshold,EMBDATASET)
build_meeting(meetmax_df,meetmin_df,meetphrase_itera,meetthreshold,MEEETDATA)
build_statement_data(sta_max_df,sta_min_df,sta_phrase_itera,sta_threshold,STADATASET)
print("*" * 80)
print("Datasets Construction Completed")
print("*" * 80)
print("\n")
# =============================================================================
# #2 Train Word Embeddings
# =============================================================================
if d_estemb:
# Run Skipgram
print(f"Run model: {EMBDATASET}\n")
sentences = pd.read_pickle(f"{DATAPATH}/data/{EMBDATASET}/corpus.pkl")
model = gensim.models.Word2Vec(sentences, min_count = mincount, sg = d_sg, vector_size = vectorsize, epochs = iters, workers = cpus, negative = neg_samples, window = windowsize)
model.save(f"{DATAPATH}/word2vecmodels/{EMBDATASET}")
# Write the embeddings to a file
with open(f"{DATAPATH}/embeddings/{EMBDATASET}_emb", 'w') as f:
for v in model.wv.index_to_key:
vec = list(model.wv[v])
f.write(v + ' ')
vec_str = ['%.9f' % val for val in vec]
vec_str = " ".join(vec_str)
f.write(vec_str + '\n')
print("*" * 80)
print(f"Embedding Training Completed")
print("*" * 80)
print("\n\n")
# =============================================================================
## #4 TRAIN TOPIC MODELS
# =============================================================================
# =============================================================================
## SPEAKERDATA - Pre-Trained Emb.
# speaker_ckpt = etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# batch_size = 1000, epochs = 150, num_topics = 10, rho_size = 300,
# emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
# train_embeddings = 0, lr = 0.005, lr_factor=4.0,
# mode = 'train', optimizer = 'adam',
# seed = 2019, enc_drop = 0.0, clip = 0.0,
# nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
# num_words =10, log_interval = 2, visualize_every = 10, eval_batch_size = 1000,
# load_from = "", tc = 1, td = 1)
#
# print(f"Evaluate model: {speaker_ckpt}")
# etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'eval', load_from = f"{speaker_ckpt}", train_embeddings = 0 ,tc = 1, td = 1)
#
# print(f"Output the topic distribution: {speaker_ckpt}")
# etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'retrieve',load_from = f"{speaker_ckpt}", train_embeddings = 0)
#
# =============================================================================
## MEETINGS - Pre-Trained Emb.
if d_train:
meeting_ckpt = etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
batch_size = 1000, epochs = 2000, num_topics = 10, rho_size = 300,
emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
train_embeddings = 0, lr = 0.005, lr_factor=4.0,
mode = 'train', optimizer = 'adam',
seed = 2019, enc_drop = 0.0, clip = 0.0,
nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
num_words =10, log_interval = 2, visualize_every = 10, eval_batch_size = 1000,
load_from = "", tc = 1, td = 1)
print(f"Evaluate model: {meeting_ckpt}")
etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
mode = 'eval', load_from = f"{meeting_ckpt}", train_embeddings = 0 ,tc = 1, td = 1)
print(f"Output the topic distribution: {meeting_ckpt}")
etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
mode = 'retrieve',load_from = f"{meeting_ckpt}", train_embeddings = 0)
# =============================================================================
## #5 OUTPUT DATA
# =============================================================================
# =============================================================================
# ## SPEAKERDATA
# raw_df = pd.read_pickle(f"raw_data/{SPEAKERDATA}.pkl")
#
# idx_df = pd.read_pickle(f'{OUTPATH}/{SPEAKERDATA}/original_indices.pkl')
# idx_df = idx_df.set_index(0)
# idx_df["d"] = 1
#
# data = pd.concat([idx_df,raw_df],axis=1)
# data_clean = data[data["d"]==1].reset_index()
# dist_df = pd.read_pickle(f'{speaker_ckpt}tpdist.pkl')
#
# full_data = pd.concat([data_clean,dist_df],axis=1)
# full_data.drop(columns=["content","d"],inplace=True)
# full_data.rename(columns=dict(zip([i for i in range(10)],[f"topic_{i}" for i in range(10)])),inplace=True)
# full_data["start_date"] = pd.to_datetime(full_data["start_date"])
# full_data.to_stata(f"{DATAPATH}/full_results/{SPEAKERDATA}.dta",convert_dates={"start_date":"td"})
#
# =============================================================================
### MEETING ###
# Retrieve raw data
raw_df = pd.read_pickle(f"raw_data/{MEEETDATA}.pkl")
idx_df = pd.read_pickle(f'{OUTPATH}/{MEEETDATA}/original_indices.pkl')
idx_df = idx_df.set_index(0)
idx_df["d"] = 1
data = pd.concat([idx_df,raw_df],axis=1)
data_clean = data[data["d"]==1].reset_index()
dist_df = pd.read_pickle(f'{meeting_ckpt}tpdist.pkl')
full_data = | pd.concat([data_clean,dist_df],axis=1) | pandas.concat |
import pandas as __pd
import datetime as __dt
from dateutil import relativedelta as __rd
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
import requests as __requests
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
__first_part_url = "production/"
def santraller(tarih=__dt.datetime.now().strftime("%Y-%m-%d")):
"""
İlgili tarihte EPİAŞ sistemine kayıtlı YEKDEM santral bilgilerini vermektedir.
Parametre
----------
tarih : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Santral Bilgileri(Id, Adı, EIC Kodu, Kısa Adı)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "renewable-sm-licensed-power-plant-list?period=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["powerPlantList"])
df.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu",
"shortName": "Kısa Adı"}, inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def kurulu_guc(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığına tekabül eden aylar için EPİAŞ sistemine kayıtlı YEKDEM santrallerin kaynak bazlı toplam
kurulu güç bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Kurulu Güç Bilgisi (Tarih, Kurulu Güç)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m')
son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m')
date_list = []
while ilk <= son and ilk <= __dt.datetime.today():
date_list.append(ilk.strftime("%Y-%m-%d"))
ilk = ilk + __rd.relativedelta(months=+1)
with __Pool(__mp.cpu_count()) as p:
df_list = p.map(__yekdem_kurulu_guc, date_list)
return __pd.concat(df_list, sort=False)
def lisansli_uevm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik YEKDEM kapsamındaki lisanslı santrallerin kaynak bazında uzlaştırmaya esas veriş
miktarı (UEVM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Saatlik YEKDEM Lisanslı UEVM (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "renewable-sm-licensed-injection-quantity" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewableSMProductionList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"canalType": "Kanal Tipi", "riverType": "Nehir Tipi", "biogas": "Biyogaz",
"biomass": "Biyokütle", "landfillGas": "Çöp Gazı", "sun": "Güneş",
"geothermal": "Jeotermal", "reservoir": "Rezervuarlı", "wind": "Rüzgar",
"total": "Toplam", "others": "Diğer"},
inplace=True)
df = df[
["Tarih", "Saat", "Rüzgar", "Jeotermal", "Rezervuarlı", "Kanal Tipi", "Nehir Tipi", "Çöp Gazı",
"Biyogaz", "Güneş", "Biyokütle", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return | __pd.DataFrame() | pandas.DataFrame |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, models
import pandas as pd
import numpy as np
from pandas import DataFrame
from sklearn.preprocessing import QuantileTransformer
import seaborn as sns
import matplotlib.pyplot as plt
import os, sys
# from google.colab import drive
# drive.mount('/content/drive')
def gen_dataset1(NUM_DATPOINTS, IMG_SHAPE, toLoad=False):
if toLoad:
return np.load("../data/2D_100000data.npy")
else:
mean = [0, 0]
cov = [[1, -0.8], [-0.8, 1]] # diagonal covariance
Z = np.random.multivariate_normal(mean, cov, NUM_DATPOINTS) #[:, :, np.newaxis]
print(Z.shape)
return Z.astype("float32")
def gen_dataset2(NUM_DATPOINTS, IMG_SHAPE, toLoad=False):
if toLoad:
return np.load("../data/2D_100000data.npy")
else:
np.random.seed(0)
sigma = 0.5
# generate spherical data centered on (20, 20)
shifted_gaussian = sigma * np.random.randn(NUM_DATPOINTS//2, 2) + np.array([1, 1.5])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.2], [1.5, .7]])
stretched_gaussian = np.dot(np.random.randn(NUM_DATPOINTS//2, 2), C)
Z = np.vstack([shifted_gaussian, stretched_gaussian])#[:, :, np.newaxis]
return Z.astype("float32")
def get_slice(f, id1, id2):
a = np.vstack((f[:, id1], f[:, id2])).T
# neg1 = np.min(a[0, :])
# neg2 = np.min(a[1, :])
# if neg1 < 1:
# a[0, :] += np.abs(neg1) + 1
# if neg2 < 1:
# a[1, :] += np.abs(neg2) + 1
# log_f = np.log(a.T)
# print(len(log_f[np.where(log_f == - np.inf)]))
# print(len(log_f[np.where(log_f[:, 0] == np.inf)]))
df = | pd.DataFrame(a) | pandas.DataFrame |
# -*- coding: utf-8 -*-.
"""
Created on Tue Jan 21 13:04:58 2020
@author: xavier.mouy
"""
import pandas as pd
import xarray as xr
import os
import uuid
import warnings
import ecosound.core.tools
import ecosound.core.decorators
from ecosound.core.metadata import DeploymentInfo
import copy
class Annotation():
"""
A class used for manipulating annotation data.
The Annotation object stores from both manual analysis annotations
collected with software like PAMlab and Raven, and outputs from
automated detectors and classifiers.
Attributes
----------
data : pandas DataFrame
Annotation DataFranme.
Methods
-------
check_integrity(verbose=False, time_duplicates_only=False)
Check integrity of Annotation object.
from_raven(files, class_header='Sound type',subclass_header=None,
verbose=False)
Import annotation data from 1 or several Raven files.
to_raven(outdir, single_file=False)
Write annotation data to one or several Raven files.
from_pamlab(files, verbose=False)
Import annotation data from 1 or several PAMlab files.
to_pamlab(outdir, single_file=False)
Write annotation data to one or several Raven files.
from_parquet(file)
Import annotation data from a Parquet file.
to_parquet(file)
Write annotation data to a Parquet file.
from_netcdf(file)
Import annotation data from a netCDF4 file.
to_netcdf(file)
Write annotation data to a netCDF4 file.
insert_values(**kwargs)
Manually insert values for given Annotation fields.
insert_metadata(deployment_info_file)
Insert metadata information to the annotation from a
deployment_info_file.
filter_overlap_with(annot, freq_ovp=True, dur_factor_max=None,
dur_factor_min=None,ovlp_ratio_min=None,
remove_duplicates=False,inherit_metadata=False,
filter_deploymentID=True, inplace=False)
Filter annotations overalaping with another set of annotations.
get_labels_class()
Return all unique class labels.
get_labels_subclass()
Return all unique subclass labels.
get_fields()
Return list with all annotations fields.
summary(rows='deployment_ID',columns='label_class')
Produce a summary pivot table with the number of annotations for two
given annotation fields.
__add__()
Concatenate data from annotation objects uisng the + sign.
__len__()
Return number of annotations.
"""
def __init__(self):
"""
Initialize Annotation object.
Sets all the annotation fields.:
-'uuid': UUID,
Unique identifier code
-'from_detector': bool,
True if data comes from an automatic process.
-'software_name': str,
Software name. Can be Raven or PAMlab for manual analysis.
-'software_version': str,
Version of the software used to create the annotations.
-'operator_name': str,
Name of the person responsible for the creation of the
annotations.
-'UTC_offset': float,
Offset hours to UTC.
-'entry_date': datetime,
Date when the annotation was created.
-'audio_channel': int,
Channel number.
-'audio_file_name': str,
Name of the audio file.
-'audio_file_dir': str,
Directory where the audio file is.
-'audio_file_extension': str,
Extension of teh audio file.
-'audio_file_start_date': datetime,
Date of the audio file start time.
-'audio_sampling_frequency': int,
Sampling frequecy of the audio data.
-'audio_bit_depth': int,
Bit depth of the audio data.
-'mooring_platform_name': str,
Name of the moorig platform (e.g. 'glider','Base plate').
-'recorder_type': str,
Name of the recorder type (e.g., 'AMAR'), 'SoundTrap'.
-'recorder_SN': str,
Serial number of the recorder.
-'hydrophone_model': str,
Model of the hydrophone.
-'hydrophone_SN': str,
Serial number of the hydrophone.
-'hydrophone_depth': float,
Depth of the hydrophone in meters.
-'location_name': str,
Name of the deploymnet location.
-'location_lat': float,
latitude of the deployment location in decimal degrees.
-'location_lon': float,
longitude of the deployment location in decimal degrees.
-'location_water_depth': float,
Water depth at the deployment location in meters.
-'deployment_ID': str,
Unique ID of the deployment.
-'frequency_min': float,
Minimum frequency of the annotaion in Hz.
-'frequency_max': float,
Maximum frequency of the annotaion in Hz.
-'time_min_offset': float,
Start time of the annotaion, in seconds relative to the
begining of the audio file.
-'time_max_offset': float,
Stop time of the annotaion, in seconds relative to the
begining of the audio file.
-'time_min_date': datetime,
Date of the annotation start time.
-'time_max_date': datetime,
Date of the annotation stop time.
-'duration': float,
Duration of the annotation in seconds.
-'label_class': str,
label of the annotation class (e.g. 'fish').
-'label_subclass': str,
label of the annotation subclass (e.g. 'grunt')
'confidence': float,
Confidence of the classification.
Returns
-------
Annotation object.
"""
self.data = pd.DataFrame({
'uuid': [],
'from_detector': [], # True, False
'software_name': [],
'software_version': [],
'operator_name': [],
'UTC_offset': [],
'entry_date': [],
'audio_channel': [],
'audio_file_name': [],
'audio_file_dir': [],
'audio_file_extension': [],
'audio_file_start_date': [],
'audio_sampling_frequency': [],
'audio_bit_depth': [],
'mooring_platform_name': [],
'recorder_type': [],
'recorder_SN': [],
'hydrophone_model': [],
'hydrophone_SN': [],
'hydrophone_depth': [],
'location_name': [],
'location_lat': [],
'location_lon': [],
'location_water_depth': [],
'deployment_ID': [],
'frequency_min': [],
'frequency_max': [],
'time_min_offset': [],
'time_max_offset': [],
'time_min_date': [],
'time_max_date': [],
'duration': [],
'label_class': [],
'label_subclass': [],
'confidence': []
})
self._enforce_dtypes()
def check_integrity(self, verbose=False, ignore_frequency_duplicates=False):
"""
Check integrity of Annotation object.
Tasks performed:
1- Check that start time < stop time
2- Check that min frequency < max frequency
3- Remove duplicate entries based on time and frequency, filename,
labels and filenames
Parameters
----------
verbose : bool, optional
Print summary of the duplicate entries deleted.
The default is False.
ignore_frequency_duplicates : bool, optional
If set to True, doesn't consider frequency values when deleting
duplicates. It is useful when data are imported from Raven.
The default is False.
Raises
------
ValueError
If annotations have a start time > stop time
If annotations have a min frequency > max frequency
Returns
-------
None.
"""
# Drop all duplicates
count_start = len(self.data)
if ignore_frequency_duplicates: # doesn't use frequency boundaries
self.data = self.data.drop_duplicates(
subset=['time_min_offset',
'time_max_offset',
'label_class',
'label_subclass',
'audio_file_name',
], keep="first",).reset_index(drop=True)
else: # remove annot with exact same time AND frequency boundaries
self.data = self.data.drop_duplicates(
subset=['time_min_offset',
'time_max_offset',
'frequency_min',
'frequency_max',
'label_class',
'label_subclass',
'audio_file_name',
], keep="first",).reset_index(drop=True)
count_stop = len(self.data)
if verbose:
print('Duplicate entries removed:', str(count_start-count_stop))
# Check that start and stop times are coherent (i.e. t2 > t1)
time_check = self.data.index[
self.data['time_max_offset'] <
self.data['time_min_offset']].tolist()
if len(time_check) > 0:
raise ValueError(
'Incoherent annotation times (time_min > time_max). \
Problematic annotations:' + str(time_check))
# Check that min and max frequencies are coherent (i.e. fmin < fmax)
freq_check = self.data.index[
self.data['frequency_max'] < self.data['frequency_min']].tolist()
if len(freq_check) > 0:
raise ValueError(
'Incoherent annotation frequencies (frequency_min > \
frequency_max). Problematic annotations:' + str(freq_check))
if verbose:
print('Integrity test succesfull')
def from_raven(self, files, class_header='Sound type', subclass_header=None, verbose=False):
"""
Import data from 1 or several Raven files.
Load annotation tables from .txt files generated by the software Raven.
Parameters
----------
files : str, list
Path of the txt file(s) to import. Can be a str if importing a single
file. Needs to be a list if importing multiple files. If 'files' is
a folder, all files in that folder ending with '.selections.txt'
will be imported.
class_header : str, optional
Name of the header in the Raven file corresponding to the class
name. The default is 'Sound type'.
subclass_header : str, optional
Name of the header in the Raven file corresponding to the subclass
name. The default is None.
verbose : bool, optional
If set to True, print the summary of the annatation integrity test.
The default is False.
Returns
-------
None.
"""
if os.path.isdir(files):
files = ecosound.core.tools.list_files(files,
'.selections.txt',
recursive=False,
case_sensitive=True,
)
if verbose:
print(len(files), 'annotation files found.')
data = Annotation._import_csv_files(files)
files_timestamp = ecosound.core.tools.filename_to_datetime(
data['Begin Path'].tolist())
self.data['audio_file_start_date'] = files_timestamp
self.data['audio_channel'] = data['Channel']
self.data['audio_file_name'] = data['Begin Path'].apply(
lambda x: os.path.splitext(os.path.basename(x))[0])
self.data['audio_file_dir'] = data['Begin Path'].apply(
lambda x: os.path.dirname(x))
self.data['audio_file_extension'] = data['Begin Path'].apply(
lambda x: os.path.splitext(x)[1])
self.data['time_min_offset'] = data['Begin Time (s)']
self.data['time_max_offset'] = data['End Time (s)']
self.data['time_min_date'] = pd.to_datetime(
self.data['audio_file_start_date'] + pd.to_timedelta(
self.data['time_min_offset'], unit='s'))
self.data['time_max_date'] = pd.to_datetime(
self.data['audio_file_start_date'] +
pd.to_timedelta(self.data['time_max_offset'], unit='s'))
self.data['frequency_min'] = data['Low Freq (Hz)']
self.data['frequency_max'] = data['High Freq (Hz)']
if class_header is not None:
self.data['label_class'] = data[class_header]
if subclass_header is not None:
self.data['label_subclass'] = data[subclass_header]
self.data['from_detector'] = False
self.data['software_name'] = 'raven'
self.data['uuid'] = self.data.apply(lambda _: str(uuid.uuid4()), axis=1)
self.data['duration'] = self.data['time_max_offset'] - self.data['time_min_offset']
self.check_integrity(verbose=verbose, ignore_frequency_duplicates=True)
if verbose:
print(len(self), 'annotations imported.')
def to_raven(self, outdir, outfile='Raven.Table.1.selections.txt', single_file=False):
"""
Write data to 1 or several Raven files.
Write annotations as .txt files readable by the software Raven. Output
files can be written in a single txt file or in several txt files (one
per audio recording). In the latter case, output file names are
automatically generated based on the audio file's name.
Parameters
----------
outdir : str
Path of the output directory where the Raven files are written.
outfile : str
Name of the output file. Only used is single_file is True. The
default is 'Raven.Table.1.selections.txt'.
single_file : bool, optional
If set to True, writes a single output file with all annotations.
The default is False.
Returns
-------
None.
"""
if single_file:
annots = [self.data]
else:
annots = [ | pd.DataFrame(y) | pandas.DataFrame |
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from _imports import *
os.system('cls')
remove_duplicates = ask_for_user_preference('Czy usunąć duplikaty projektów wygenerowanych przez algorytmy?')
verify_designs = ask_for_user_preference('Czy symulacyjnie zweryfikować własności najlepszych projektów?')
# procedury pomocnicze
def show_geometry_preview(settings_sim, pattern, scale_geometries = 3):
courant_number = settings_sim['basic']['courant_number']
basic_element_dimensions = settings_sim['diffuser_geometry']['basic_element_dimensions']
fs = settings_sim['basic']['fs']
T_air_C = settings_sim['propagation_medium']['T_air_C']
p_air_hPa = settings_sim['propagation_medium']['p_air_hPa']
RH = settings_sim['propagation_medium']['RH']
c, Z_air = get_air_properties(T_air_C, p_air_hPa, RH)
T = 1/fs # [s]
X = c*T/courant_number # [m]
num_element_height_levels = settings_sim['diffuser_geometry']['num_element_height_levels']
diffuser_depth = settings_sim['diffuser_geometry']['diffuser_depth']
shape_skyline = generate_2D_Skyline_diffuser(
pattern,
element_seg_depth=cont2disc(diffuser_depth*scale_geometries/num_element_height_levels,X),
element_size=cont2disc(basic_element_dimensions*scale_geometries,X))
show_shape(shape_skyline)
def verify_scattering_properties(settings_sim, pattern, reference_data):
mean_coeff = evaluate_design(settings_sim, pattern, reference_data)
print('średnia dyfuzja: ', mean_coeff)
# print (mean_coeff)
# draw_subband_polar_response(settings_sim, imp_res_object[0])
# plt.title('xy')
# draw_subband_polar_response(settings_sim, imp_res_object[1])
# plt.title('yz')
def remove_duplicate_designs(patterns, diffusions):
filtered_patterns = []
filtered_diffusions = []
def pattern_in_list(pattern, list):
list_of_comparisons = []
for element in list:
list_of_comparisons.append(np.array_equal(pattern,element))
return np.any(list_of_comparisons)
already_existing_patterns = []
for pattern, diffusion in zip(patterns, diffusions):
if not pattern_in_list(pattern, already_existing_patterns):
filtered_patterns.append(pattern)
already_existing_patterns.append(pattern)
filtered_diffusions.append(diffusion)
return filtered_patterns, filtered_diffusions
# konfiguracja procedur bazujących na AI
CONFIG_PATH_AI = '_settings/ai_default.ini'
CONFIG_PATH_SIM = '_settings/sim_default.ini'
settings_ai = read_config(CONFIG_PATH_AI)
settings_sim = read_config(CONFIG_PATH_SIM)
algenet_outcomes_dir = '../_joint_algenet_results'
file_save_dir = settings_sim['basic']['file_save_dir']
reference_file_path = os.path.join(file_save_dir,'reference.npy')
# odczyt danych referencyjnych do pomiaru dyfuzora
try:
print('obliczanie danych referencyjnych:')
reference_data = np.load(reference_file_path, allow_pickle=True).item()
except:
print(f'odczyt plik z danymi referencyjnymi ({reference_file_path}) nie powiódł się, rreferencja zostanie obliczona automatycznie')
imp_res_set_empty, imp_res_set_plate, _ = run_simulation_for_pattern(None,settings_sim, mode='reference_only')
reference_data = {
'plate':imp_res_set_plate,
'room':imp_res_set_empty,
'num_element_height_levels':settings_sim['diffuser_geometry']['num_element_height_levels'],
'diffuser_depth':settings_sim['diffuser_geometry']['diffuser_depth'],
'basic_element_dimensions':settings_sim['diffuser_geometry']['basic_element_dimensions'],
'fs':settings_sim['basic']['fs']}
# Zapis wyników obliczeń na dysk.
np.save(reference_file_path,reference_data)
# odczyt postępu algorytmu genetycznego
algenet_diffusions = []
algenet_patterns = []
algenet_gen_nums = []
if os.path.isdir(algenet_outcomes_dir):
for fname in os.listdir(algenet_outcomes_dir):
_, ext = os.path.splitext(fname)
if ext != '.npy': continue
fdata = np.load(os.path.join(algenet_outcomes_dir,fname), allow_pickle=True)
for item in fdata:
algenet_diffusions.append(item['diffusion'])
algenet_patterns.append(item['pattern'])
algenet_gen_nums.append(item['generation_number'])
best_dif_argmax = np.argmax(algenet_diffusions)
pattern = algenet_patterns[best_dif_argmax]
dif = algenet_diffusions[best_dif_argmax]
if remove_duplicates:
algenet_patterns, algenet_diffusions = remove_duplicate_designs(algenet_patterns, algenet_diffusions)
algenet_best_pattern_idx = np.argmax(algenet_diffusions)
# odczyt danych dla poszukiwania losowego
_, consolidated_data = obtain_replay_folder_contents(settings_ai)
random_diffusions = []
random_patterns = []
for entry in consolidated_data:
if 'input_pattern_generation' in list(entry.keys()):
if entry['input_pattern_generation'] != 'random':
continue
random_pattern = entry['replay_transitions'][0]['current_pattern']
random_diffusion = entry['episode_diffusions'][0] - entry['episode_rewards'][0]
random_diffusions.append(random_diffusion)
random_patterns.append(random_pattern)
if remove_duplicates:
random_patterns, random_diffusions = remove_duplicate_designs(random_patterns, random_diffusions)
random_diffusions = np.array(random_diffusions)
random_best_pattern_idx = np.argmax(random_diffusions)
# odczyt danych dla głębokiego gradientu strategii
agent_diffusions_rnd = []
agent_diffusions_bst = []
agent_patterns_rnd = []
agent_patterns_bst = []
for entry in consolidated_data:
episode_diffusions_argmax = np.argmax(entry['episode_diffusions'])
best_pattern = entry['replay_transitions'][episode_diffusions_argmax]['new_pattern']
if 'input_pattern_generation' in list(entry.keys()):
if entry['input_pattern_generation'] != 'random':
agent_diffusions_bst.append(np.max(entry['episode_diffusions']))
agent_patterns_bst.append(best_pattern)
continue
agent_diffusions_rnd.append(np.max(entry['episode_diffusions']))
agent_patterns_rnd.append(best_pattern)
if remove_duplicates:
agent_patterns_rnd, agent_diffusions_rnd = remove_duplicate_designs(agent_patterns_rnd, agent_diffusions_rnd)
agent_patterns_bst, agent_diffusions_bst = remove_duplicate_designs(agent_patterns_bst, agent_diffusions_bst)
dpg_best_pattern_bst_idx = np.argmax(agent_diffusions_bst)
dpg_best_pattern_rnd_idx = np.argmax(agent_diffusions_rnd)
print()
print(f'random - num designs: {len(random_diffusions)}')
print(f'genetic alg. - num designs: {len(algenet_diffusions)}')
print(f'deep policy gradient (random input) - num designs: {len(agent_diffusions_rnd)}')
print(f'deep policy gradient (best 10 input) - num designs: {len(agent_diffusions_bst)}')
print()
print()
print(f'best pattern random choice')
print(random_patterns[random_best_pattern_idx])
print(f'provided diffusion: {random_diffusions[random_best_pattern_idx]}')
if os.path.isdir(algenet_outcomes_dir):
print()
print(f'best pattern by genetic algorithm (generation no {algenet_gen_nums[algenet_best_pattern_idx]})')
print(algenet_patterns[algenet_best_pattern_idx])
print(f'provided diffusion: {algenet_diffusions[algenet_best_pattern_idx]}')
print()
print(f'best pattern by deep policy gradient (random input)')
print(agent_patterns_rnd[dpg_best_pattern_rnd_idx])
print(f'provided diffusion: {agent_diffusions_rnd[dpg_best_pattern_rnd_idx]}')
print()
print(f'best pattern by deep policy gradient (best 10 input)')
print(agent_patterns_bst[dpg_best_pattern_bst_idx])
print(f'provided diffusion: {agent_diffusions_bst[dpg_best_pattern_bst_idx]}')
print()
# Wykreślenie estymat gęstości prawdopodobieństwa
random_diffusions_df = pd.DataFrame()
random_diffusions_df = random_diffusions_df.assign(**{'mean diffusion coefficient':random_diffusions})
random_diffusions_df = random_diffusions_df.assign(**{'algorithm type':'random'})
agent_diffusions_rnd_df = pd.DataFrame()
agent_diffusions_rnd_df = agent_diffusions_rnd_df.assign(**{'mean diffusion coefficient':agent_diffusions_rnd})
agent_diffusions_rnd_df = agent_diffusions_rnd_df.assign(**{'algorithm type':'deep policy gradient (random input)'})
agent_diffusions_bst_df = pd.DataFrame()
agent_diffusions_bst_df = agent_diffusions_bst_df.assign(**{'mean diffusion coefficient':agent_diffusions_bst})
agent_diffusions_bst_df = agent_diffusions_bst_df.assign(**{'algorithm type':'deep policy gradient (best input)'})
algenet_diffusions_df = | pd.DataFrame() | pandas.DataFrame |
from collections import defaultdict
from multiprocessing import Pool
import os.path
import random
import igraph
from numpy import *
import numpy.random as nprandom
import pandas as pd
from sklearn.metrics import adjusted_rand_score
from sklearn import svm
"""
The names of the datasets used for training.
"""
TRAIN_SETS = ['afrasian', 'bai', 'chinese_1964', 'chinese_2004', 'huon',
'japanese', 'kadai', 'kamasau', 'lolo_burmese', 'mayan', 'miao_yao',
'mixe_zoque', 'mon_khmer', 'ob_ugrian', 'tujia']
"""
The names of the datasets used for testing. Note that central_asian is manually
split in two files because of file size limits.
"""
TEST_SETS = ['abvd', 'central_asian', 'central_asian_2', 'ielex']
"""
The relevant subset of features; for feature selection, simply alter this list.
"""
FEATURES = ['feature1', 'feature4', 'feature6', 'feature7', 'feature8']
"""
Module-level variables, used within the workhorse functions.
"""
training = None
trainingVectors = None
test = None
def infer(vectors_dir, output_dir):
"""
Inits and orchestrates the cognate class inferring algorithm.
"""
global training
global trainingVectors
global test
dDict = {'gloss':unicode,
'l1':unicode, 'w1':unicode, 'cc1':unicode,
'l2':unicode, 'w2':unicode, 'cc2':unicode,
'feature1':double, 'feature2':double, 'feature3':double,
'feature4':double, 'feature5':double,
'lexstat_simAA':double, 'lexstat_simBB':double, 'lexstat_simAB':double,
'feature7':double, 'target':int, 'db':unicode }
# load the training data
training = pd.DataFrame()
for dataset_name in TRAIN_SETS:
file_path = os.path.join(vectors_dir, '{}.csv'.format(dataset_name))
training = training.append(pd.read_csv(file_path, encoding='utf-8', dtype=dDict))
training['feature8'] = 1-((2*training.lexstat_simAB)/(training.lexstat_simAA+training.lexstat_simBB))
nprandom.seed(1234)
random.seed(1234)
trainingVectors = training.ix[nprandom.permutation(training.index)].drop_duplicates(['db','gloss'])
# cross-validation over training data
pool = Pool()
totalCC = pool.map(f,training.db.unique())
pool.close()
pool.terminate()
for db,wl in zip(training.db.unique(),totalCC):
file_path = os.path.join(output_dir, '{}.svmCC.csv'.format(db))
wl['fullCC'] = [':'.join(x) for x in wl[['db','concept','cc']].values]
wl[['db','concept','doculect','counterpart',
'fullCC','inferredCC']].to_csv(file_path, encoding='utf-8', index=False)
# load the test data
test = pd.DataFrame()
for dataset_name in TEST_SETS:
file_path = os.path.join(vectors_dir, '{}.csv'.format(dataset_name))
test = test.append(pd.read_csv(file_path, encoding='utf-8', dtype=dDict))
test['feature8'] = 1-((2*test.lexstat_simAB)/(test.lexstat_simAA+test.lexstat_simBB))
for db in test.db.unique():
file_path = os.path.join(output_dir, '{}.svmCC.csv'.format(db))
wl = testCluster(db)
wl.to_csv(file_path, encoding='utf-8', index=False)
def f(x):
return svmInfomapCluster(x)
def infomap_clustering(threshold, matrix, taxa=False, revert=False):
"""
Compute the Infomap clustering analysis of the data. Taken from LingPy's
implementation of the algorithm.
"""
if not igraph:
raise ValueError("The package igraph is needed to run this analysis.")
if not taxa:
taxa = list(range(1, len(matrix) + 1))
G = igraph.Graph()
vertex_weights = []
for i in range(len(matrix)):
G.add_vertex(i)
vertex_weights += [0]
# variable stores edge weights, if they are not there, the network is
# already separated by the threshold
for i,row in enumerate(matrix):
for j,cell in enumerate(row):
if i < j:
if cell <= threshold:
G.add_edge(i, j)
comps = G.community_infomap(edge_weights=None,
vertex_weights=None)
D = {}
for i,comp in enumerate(comps.subgraphs()):
vertices = [v['name'] for v in comp.vs]
for vertex in vertices:
D[vertex] = i+1
if revert:
return D
clr = defaultdict(list)
for i,t in enumerate(taxa):
clr[D[i]] += [t]
return clr
def svmInfomapCluster(vdb,featureSubset=FEATURES,th=.34,C=.82,kernel='linear',gamma=1E-3):
"""
The first argument is the validation data base, the rest of the training
databases are used for training.
"""
newWordList = pd.DataFrame()
fitting = trainingVectors[trainingVectors.db!=vdb]
validation = training[training.db==vdb].copy()
X = fitting[featureSubset].values
y = fitting.target.values
svClf = svm.SVC(kernel=kernel,C=C,gamma=gamma,
probability=True)
svClf.fit(X,y)
nprandom.seed(1234)
random.seed(1234)
svScores = svClf.predict_proba(validation[featureSubset].values)[:,1]
validation['svScores'] = svScores
scores = pd.DataFrame()
wordlist = pd.DataFrame()
concepts = validation.gloss.unique()
taxa = unique(validation[['l1','l2']].values.flatten())
dataWordlist = vstack([validation[['gloss','l1','w1','cc1']].values,
validation[['gloss','l2','w2','cc2']].values])
dataWordlist = pd.DataFrame(dataWordlist,columns=['concept','doculect',
'counterpart','cc'])
dataWordlist = dataWordlist.drop_duplicates()
dataWordlist.index = ['_'.join(map(unicode,x))
for x in
dataWordlist[['concept','doculect','counterpart']].values]
validation['id_1'] = [c+'_'+l+'_'+unicode(w)
for (c,l,w) in validation[['gloss','l1','w1']].values]
validation['id_2'] = [c+'_'+l+'_'+unicode(w)
for (c,l,w) in validation[['gloss','l2','w2']].values]
for c in concepts:
dataC= validation[validation.gloss==c].copy()
dataC['id_1'] = [x.replace(' ','').replace(',','') for x in dataC.id_1]
dataC['id_2'] = [x.replace(' ','').replace(',','') for x in dataC.id_2]
wlC = dataWordlist[dataWordlist.concept==c].copy()
if len(wlC)>1:
wlC.index = [x.replace(' ','').replace(',','') for x in wlC.index]
svMtx = zeros((len(wlC.index),len(wlC.index)))
svMtx[ | pd.match(dataC.id_1,wlC.index) | pandas.match |
# plotting_static
# TO BE EDITED
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
import matplotlib.dates as mdates
from matplotlib import rcParams, rc
import sys
import pickle
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
# -*- coding: utf-8 -*-
# school-bot-demo
# All doxxing information has been removed.
#Image-------------------------------------------------------------------------
import re
#try:
# from PIL import Image
#except ImportError:
# import Image
#import pytesseract
#
#pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
#
#def readimage(imagepath):
# return(pytesseract.image_to_string(Image.open(imagepath)))
#
#
#def findclasses(theschedule):
# person = []
# for i in range(len(classdata)):
# try:
# m = re.search(classdata['Key'][i], theschedule.lower())
# if m:
# person.append(i)
# except AttributeError:
# continue
# if 7 in person and 18 in person:
# person.remove(7)
# return person
#Data--------------------------------------------------------------------------
import pandas as pd
botpath = ''
#botpath = './'
#botpath = ''
#botpath = ''
classdata = pd.read_csv(botpath + 'classes.csv')
classdata = classdata.set_index('ID')
usrdata = pd.read_csv(botpath + 'users.csv')
graderole = {'6': '6th Grade', '7': '7th Grade', '8': '8th Grade', '9': 'Freshman', '10': 'Sophomore', '11': 'Junior', '12': 'Senior', '13': 'Graduate', '14': 'Teacher'}
guestStatus = {0 : "Not in SCHOOL", 1 : "SCHOOL 1", 2 : "SCHOOL 2", 3 : "Other SCHOOL", '0' : "Not in SCHOOL", '1' : "SCHOOL 1", '2' : "SCHOOL 2", '3' : "Other SCHOOL"}
#Register----------------------------------------------------------------------
async def Register(user):
global usrdata
issues = 0
print(datetime.datetime.now(), "Registering", user.name)
await user.send("Welcome to the SCHOOL 1 discord (unofficial)! You may say 'cancel' at any point to exit and '" + prefix + "register' to retry.")
embed = discord.Embed(title = "Are you currently in SCHOOL? (Graduates included)", description = "0: Not in SCHOOL\n1: In SCHOOL 1\n2: SCHOOL 2\n3: Other SCHOOL School", color = discord.Color.dark_purple())
chooseGuest = await user.send(embed = embed)
emojilist = [str(i) + "\N{combining enclosing keycap}" for i in range(0,4)]
for i in emojilist:
await chooseGuest.add_reaction(i)
def check2(reaction, person):
nonlocal emojilist
return person == user and str(reaction) in emojilist
try:
reaction, _ = await client.wait_for('reaction_add', timeout = 600.0, check = check2)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at choose from list")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
guest = str(reaction)[0]
await user.send("What is your real name? (First and last, if you would not like to give your name say 'Anonymous')")
print(datetime.datetime.now(), user.name, "on step name")
while True:
def check(m):
return m.guild == None and m.author == user
try:
msg = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at name")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg.content.lower() == "cancel":
await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at name")
return None
elif ''.join(re.split(' |-|,', msg.content)).isalpha():
irlname = msg.content.lower()
break
else:
await user.send("Please only use letters a-z in your name. Enter your name again and contact an admin if you continue having issues.")
issues += 1
print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at name")
continue
await user.send("Now, please say your grade (number 6-12, graduate = 13, teacher = 14)")
print(datetime.datetime.now(), user.name, "on step grade")
while True:
try:
msg2 = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at grade")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg2.content in graderole:
grade = msg2.content
break
elif msg2.content.lower() == "cancel":
await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at grade")
return None
else:
await user.send("Please only use numbers 6-14 in your grade. Enter your grade again and contact an admin if you continue having issues.")
issues += 1
print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at grade")
continue
if guest == "1":
await user.send("Great, now begin to list your classes one by one (most abbreviations are allowed) or send a picture of your schedule (Coming soon!) and say 'done' when you are done. (Say done now to skip) (For precalc use 'pre-calc')")
print(datetime.datetime.now(), user.name, "on step classes")
listofclasses = []
while True:
if listofclasses:
embed = discord.Embed(title = "Classes for " + user.name + ":", description = ''.join([classdata.loc[i]['Name'] + "\n" for i in listofclasses]), color = discord.Color.dark_purple())
embed.set_footer(text = "Continue listing your classes and say 'done' when all of your classes are on this list")
embed.set_thumbnail(url = user.avatar_url)
await user.send(embed = embed)
try:
msg3 = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at classes")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg3.attachments:
await user.send("Feature not implemented yet, please list your classes through text.")
continue
# await user.send("Reading schedule...")
# await msg3.attachments[0].save(botpath + 'Saved/sched_' + user.name + '.png')
# print(datetime.datetime.now(), "Saved schedule from", user.name, "as sched_" + user.name + ".png")
# classes = pytesseract.image_to_string(Image.open(botpath + 'Saved/sched_' + user.name + '.png'))
# listofclasses.append(findclasses(classes))
# if len(listofclasses) >= 7:
# embed = discord.Embed(title = "Classes for " + user.name + ":", description = ''.join([classdata.loc[i]['Name'] + "\n" for i in listofclasses]), color = discord.Color.dark_purple())
# embed.set_thumbnail(url = user.avatar_url)
# await user.send(embed = embed)
# await user.send("Is this correct?")
# try:
# msg4 = await client.wait_for('message', timeout = 60.0, check = check)
# except asyncio.TimeoutError:
# print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at check classes")
# await user.send("Registration failed. You may do " + prefix + "register to retry.")
# return None
# if msg4.content.lower().startswith("y"):
# listofclasses.sort()
# usrdata = usrdata.append(pd.DataFrame({'User':['a' + str(user.id)], 'Classes':[str(listofclasses)], 'IRL' : [irlname], 'Grade' : [grade]}), sort = False, ignore_index = True)
# usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
# usrdata = pd.read_csv(botpath + 'users.csv')
# print(datetime.datetime.now(), "Registered", user.name, "with classes in users.csv and", issues, "issues")
# break
# elif msg4.content.lower() == "cancel":
# await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
# print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at image (Check classes)")
# return None
# else:
# await user.send("Please send a better image or say no to skip adding classes. You may contact an admin if you continue having issues.")
# issues += 1
# print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at image (incorrect classes)")
# continue
# else:
# await user.send("Only found " + str(len(listofclasses)) + " classes, please send a better image or say no to skip adding classes. You may contact an admin if you continue having issues.")
# issues += 1
# print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at image (too few classes - " + str(len(listofclasses)) + ")")
# continue
elif msg3.content.lower() == "cancel":
await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at classes (send)")
return None
elif msg3.content.lower() == "done":
if len(listofclasses) >= 7:
listofclasses.sort()
usrdata = usrdata.append(pd.DataFrame({'User':['a' + str(user.id)], 'Classes':[str(listofclasses)], 'IRL' : [irlname], 'Grade' : [grade], 'Guest' : [guest]}), sort = False, ignore_index = True)
usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
print(datetime.datetime.now(), "Registered", user.name, "with classes in users.csv and", issues, "issues")
break
elif listofclasses:
await user.send("You have only added " + str(len(listofclasses)) + " classes, are you sure?")
try:
msg4 = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at check classes")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg4.content.lower().startswith("y"):
listofclasses.sort
usrdata = usrdata.append(pd.DataFrame({'User':['a' + str(user.id)], 'Classes':[str(listofclasses)], 'IRL' : [irlname], 'Grade' : [grade], 'Guest' : [guest]}), sort = False, ignore_index = True)
usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
usrdata = | pd.read_csv(botpath + 'users.csv') | pandas.read_csv |
import logging
import os
import pandas as pd
from jade.utils.utils import load_data
from disco.analysis import Analysis, Input
from disco.exceptions import AnalysisRunException
from disco.utils.custom_type import CustomType
from disco.utils.dss_utils import extract_upgrade_results
logger = logging.getLogger(__name__)
class UpgradeCostAnalysis(Analysis):
INPUTS = [
Input("unit_cost_data_file", CustomType(str), "DISCO_cost_database.xlsx")
]
def run(self, output, *args, **kwargs):
# unit_cost_data_file
unit_cost_data_file = self.get_input("unit_cost_data_file").current_value
# relative job paths
job_output = os.path.join(output, self._job_name)
# output_path
post_process_output = os.path.join(job_output, "post_process")
os.makedirs(post_process_output, exist_ok=True)
# upgrade files
project_path = os.path.join(job_output, "pydss_project")
upgrade_files = extract_upgrade_results(project_path, file_ext=".json")
thermal_upgrade_file = upgrade_files["thermal"]
voltage_upgrade_file = upgrade_files["voltage"]
try:
# Cost calculation
thermal_df = self.get_thermal_costs(
thermal_upgrade_file, unit_cost_data_file, post_process_output
)
metadata = load_data(voltage_upgrade_file)
vreg_df = self.get_vreg_costs(
voltage_upgrade_file,
unit_cost_data_file,
metadata["feederhead_basekV"],
)
cap_df = self.get_cap_costs(voltage_upgrade_file, unit_cost_data_file)
# Cost summary
total_costs_df = self.get_total_costs(thermal_df, vreg_df, cap_df)
# Output CSV file
summary_of_upgrade_costs_file = os.path.join(
post_process_output, "summary_of_upgrade_costs.csv"
)
total_costs_df.to_csv(summary_of_upgrade_costs_file, index=False)
# total_costs_df.to_feather(output_path + 'summary_of_upgrade_costs.feather')
self._add_to_results(
"summary_of_upgrade_costs", summary_of_upgrade_costs_file
)
except AnalysisRunException:
logger.exception("Unexcepted UpgradeCostAnalysis Error.")
raise
finally:
if os.path.exists(thermal_upgrade_file):
os.remove(thermal_upgrade_file)
if os.path.exists(voltage_upgrade_file):
os.remove(voltage_upgrade_file)
def indiv_line_cost(self, upgrade_df, unit_cost_lines):
"""Function to calculate costs of upgrading each individual line that is overloaded.
Returns a dataframe with columns containing the line ID's and cost to upgrade.
"""
# Dictionary used to convert between different length units and meters, which are used for all the calculations.
# OpenDSS can output results in any of these lengths.
len_unit_mult = {
"mi": 1609.34,
"kft": 0.00328084,
"km": 0.001,
"ft": 3.28084,
"in": 39.3701,
"cm": 100,
}
line_costs_df = pd.DataFrame()
for k in upgrade_df.keys():
# print(k)
if "Line." in k:
new_line_len = upgrade_df[k]["new"][1][
"length"
] # upgraded lines and new lines run along exisiting circuit, so length is the same for both
if upgrade_df[k]["new"][0] > 0:
# print(k)
new_line_len_unit = upgrade_df[k]["new"][1]["length_unit"]
if new_line_len_unit == "m":
new_line_len_m = new_line_len
else:
new_line_len_m = new_line_len / len_unit_mult[new_line_len_unit]
# print('line length is ',new_line_len, new_line_len_unit, 'or', new_line_len_m, 'm')
line_count = upgrade_df[k]["new"][
0
] # count of new lines added to address overload. Often 1, but could be > 1 with severe overloads
new_line_cost_per_line = new_line_len_m * float(
unit_cost_lines[
unit_cost_lines["description"] == "new_line"
].cost_per_m
)
new_line_cost = line_count * new_line_cost_per_line
elif upgrade_df[k]["new"][0] == 0:
new_line_cost = 0
new_line_cost_per_line = 0
elif upgrade_df[k]["new"][0] < 0:
logger.error(
"Error: number of new lines is negative: %s",
upgrade_df[k]["new"][0],
)
raise AnalysisRunException(
"Error: number of new lines is negative: {}".format(
upgrade_df[k]["new"][0]
)
)
upgraded_line_count = upgrade_df[k]["upgrade"][0]
upgraded_line_cost = (
new_line_cost_per_line * upgraded_line_count
) # TODO: update to take ampacities as an option. X data currently does not have sufficient resolution
dict_k = {
"id": [k],
"new_equip_cost": [new_line_cost],
"upgraded_equip_cost": [upgraded_line_cost],
}
df_k = | pd.DataFrame.from_dict(dict_k) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 14 10:52:25 2019
@author: xkadj
"""
import pandas as pd
import k_plot
can_file = r"C:\Users\xkadj\Desktop\ROBOTIKA\osgar_191206\osgar\tmp.csv"
desired_speed_file = r"C:\Users\xkadj\Desktop\ROBOTIKA\osgar_191206\osgar\tmp_desired_speed.csv"
downdrops_front_file = r"C:\Users\xkadj\Desktop\ROBOTIKA\osgar_191206\osgar\downdrops_front.csv"
downdrops_rear_file = r"C:\Users\xkadj\Desktop\ROBOTIKA\osgar_191206\osgar\downdrops_rear.csv"
class MessageParser:
def __init__(self, can_messages):
self.values_0x91 = self.get_vesc_status(can_messages, 0x91)
self.values_0x92 = self.get_vesc_status(can_messages, 0x92)
self.values_0x93 = self.get_vesc_status(can_messages, 0x93)
self.values_0x94 = self.get_vesc_status(can_messages, 0x94)
self.values_0x81 = self.get_voltage(can_messages, 0x81)
self.values_0x82 = self.get_voltage(can_messages, 0x82)
self.desired_speed = self.get_desired_speed(desired_speed_messages)
self.downdrops_front = self.get_downdrops(downdrops_front_messages)
self.downdrops_rear = self.get_downdrops(downdrops_rear_messages)
def get_vesc_status(self,messages, message_ID):
# =============================================================================
# VESC 90x message:
# bytes 0-3 are the current RPM as a whole number, from most significant byte to least significant, respectively.
# bytes 4-5 are the current current, most to least, but I’m not sure what units yet.
# bytes 6-7 are the current dutycycle in 10ths of a percent, from most to least.
# =============================================================================
values = []
for row in range(len(messages)):
if messages[row][2] == message_ID:
erpm = int(messages[row][3][0:8],16)
if erpm > 2147483647: erpm = erpm - 4294967294
current = int(messages[row][3][8:12],16)
if current > 32767: current = current - 65534
duty_cycle = int(messages[row][3][12:16],16)
if duty_cycle > 32767: duty_cycle = duty_cycle - 65534
values.append([messages[row][1], erpm/10, current/10, duty_cycle/10])
values = pd.DataFrame(values,columns=['time','erpm','current','duty_cycle'])
return values
def get_voltage(self,messages, message_ID):
values = []
for row in range(len(messages)):
if messages[row][2] == message_ID:
voltage = int(messages[row][3],16)
values.append([messages[row][1], voltage/1000])
values = pd.DataFrame(values,columns=['time','voltage'])
return values
def get_desired_speed(self,messages):
values = pd.DataFrame(messages,columns=['0','time','left','right']).drop('0',axis=1)
return values
def get_downdrops(self,messages):
values = pd.DataFrame(messages,columns=['0','time','left','right']).drop('0',axis=1)
return values
# =============================================================================
# MAIN
# =============================================================================
can_messages = pd.read_csv(can_file, sep=';', engine='python').values.tolist()
desired_speed_messages = pd.read_csv(desired_speed_file, sep=';', engine='python').values.tolist()
downdrops_front_messages = | pd.read_csv(downdrops_front_file, sep=';', engine='python') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 09:15:01 2019
@author: zahid
"""
# Load libraries
import pandas as pd
import numpy as np
from sklearn import preprocessing # Import the preprocessing module
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn import metrics # Import scikit-learn metrics module for accuracy calculation
from sklearn.metrics import confusion_matrix # confusion_matrix for model evaluation
data = | pd.read_csv('data/CleanDataROSE.csv') | pandas.read_csv |
import glob
import warnings
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn import tree
warnings.filterwarnings("ignore")
PATH = "../vince_with_dates.csv"
data_sets_files = glob.glob(PATH)
data_set = (pd.concat(( | pd.read_csv(f, sep=';', header=0) | pandas.read_csv |
import argparse
import datetime
import os
import pandas as pd
import tasking_manager_stats.data_management as dm
def get_args():
parser = argparse.ArgumentParser(description='Agregate users data from tasking manager API')
parser.add_argument('merged_stats', type=str, help='Path of the merged stats CSV file')
parser.add_argument('stats_one_author', type=str, help='Path of the merged stats 1 author by task type CSV file')
parser.add_argument('mapathon', type=str, help='Path of the mapathon CSV file')
parser.add_argument('-max_date', type=str, help='Date (%Y_%m_%d) to stop data and compute if contributor come back')
return parser.parse_args()
def compute_mapathon_number(mapathon_file, stats_file, max_date=None):
mapathons = pd.read_csv(mapathon_file)
mapathons['Date'] = pd.to_datetime(mapathons['Date'], dayfirst=True)
# Extract projects of the mapathon
mapathons2 = pd.DataFrame()
for _, row in mapathons.iterrows():
tasks = row['Tasks']
if pd.isnull(tasks):
continue
projects = set()
for s in tasks.split('/'):
try:
projects.add(int(s))
except:
pass
for s in tasks.split(', '):
try:
projects.add(int(s))
except:
pass
# Create new mapathon line for each project
for project in projects:
mapathons2 = pd.concat([mapathons2, pd.DataFrame(data=[(row['Date'], row['City'], project)],
columns=['date', 'City', 'Project'])], axis=0,
ignore_index=True)
# Compute number of mapathons by contributor
df = pd.read_csv(stats_file, encoding='ISO-8859-1')
df['date'] = df['Year'].astype(str) + '-' + df['Month'].astype(str) + '-' + df['Day'].astype(str)
df['date'] = pd.to_datetime(df['date'], yearfirst=True)
if max_date is not None:
df = df[df['date'] <= max_date]
df2 = df[(df['Hour'] > 17) & (df['Hour'] < 22)]
df3 = pd.merge(df2.loc[df2['Type'] == 'MAPPING'], mapathons2, on=['date', 'Project'])
df4 = df3[['date', 'Author', 'City', 'Project']].drop_duplicates()
res = df4[['Author', 'date']].drop_duplicates().groupby('Author').count().date.reset_index()
res.columns = ['Author', 'MapathonNb']
return res
def compute_validation_time_by_task(stats_dir, csv_file, max_date=None):
df_project = pd.read_csv(os.path.join(stats_dir, csv_file), encoding='ISO-8859-1')
df_project = df_project[df_project['Type'] == 'VALIDATION']
if len(df_project) == 0:
return pd.DataFrame()
if max_date is not None:
df_project['date'] = df_project['Year'].astype(str) + '-' + df_project['Month'].astype(str) + '-' + df_project['Day'].astype(str)
df_project['date'] = | pd.to_datetime(df_project['date'], yearfirst=True) | pandas.to_datetime |
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
from IMLearn.utils import split_train_test
import pickle
import numpy as np
import pandas as pd
import datetime
from sklearn.metrics import mean_absolute_error, mean_squared_error
# from tqdm import tqdm
from sklearn.model_selection import train_test_split
BEGIN_CHECK_DATE = datetime.date(2018, 12, 7)
END_CHECK_DATE = datetime.date(2018, 12, 13)
def load_data1(filename: str):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# TODO - replace below code with any desired preprocessing
full_data = pd.read_csv(filename).dropna().drop_duplicates()
__add_time_between_booking_to_cancel(full_data)
features = full_data[[
"hotel_id",
"accommadation_type_name",
"hotel_star_rating",
]]
features = pd.get_dummies(features, columns=["accommadation_type_name"])
labels = full_data["diff_booking_to_cancel"]
return features, labels
def load_data_to_predict(filename: str):
full_data = pd.read_csv(filename).drop_duplicates()
full_data = __add_did_cancel(full_data, True)
features = full_data[[
"original_selling_amount",
"day_of_year", "how_far_in_advance",
"length_of_stay", "no_of_adults", "hotel_brand_code",
"customer_nationality", "charge_option",
'hotel_chain_code', 'original_payment_method'
]]
features = features.fillna(0)
features["hotel_brand_code"] = features["hotel_brand_code"].rank(method='dense').astype(int)
features["customer_nationality"] = features["customer_nationality"].rank(method='dense').astype(int)
features["charge_option"] = features["charge_option"].rank(method='dense').astype(int)
features["hotel_chain_code"] = features["hotel_chain_code"].rank(method='dense').astype(int)
features["original_payment_method"] = features["original_payment_method"].rank(method='dense').astype(int)
return features
def load_classsifier(filename: str):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# length of stay
# TODO - replace below code with any desired preprocessing
full_data = pd.read_csv(filename).drop_duplicates()
full_data = __add_did_cancel(full_data)
features = full_data[[
"diff_booking_to_cancel",
"original_selling_amount",
"day_of_year", "how_far_in_advance",
"length_of_stay", "no_of_adults", "hotel_brand_code",
"customer_nationality", "charge_option",
'hotel_chain_code', 'original_payment_method', "canceled"
]]
features = features.dropna()
features["hotel_brand_code"] = features["hotel_brand_code"].rank(method='dense').astype(int)
features["customer_nationality"] = features["customer_nationality"].rank(method='dense').astype(int)
features["charge_option"] = features["charge_option"].rank(method='dense').astype(int)
features["hotel_chain_code"] = features["hotel_chain_code"].rank(method='dense').astype(int)
features["original_payment_method"] = features["original_payment_method"].rank(method='dense').astype(int)
labels = features["canceled"]
return features.drop(["canceled", "diff_booking_to_cancel"], axis=1), labels
def load_regression(filename: str):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# length of stay
# TODO - replace below code with any desired preprocessing
full_data = pd.read_csv(filename).drop_duplicates()
full_data = __add_did_cancel(full_data)
features = full_data[[
"diff_booking_to_cancel",
"original_selling_amount",
"day_of_year", "how_far_in_advance",
"length_of_stay", "no_of_adults", "hotel_brand_code",
"customer_nationality", "charge_option",
'hotel_chain_code', 'original_payment_method'
]]
features = features.dropna()
features["hotel_brand_code"] = features["hotel_brand_code"].rank(method='dense').astype(int)
features["customer_nationality"] = features["customer_nationality"].rank(method='dense').astype(int)
features["charge_option"] = features["charge_option"].rank(method='dense').astype(int)
features["hotel_chain_code"] = features["hotel_chain_code"].rank(method='dense').astype(int)
features["original_payment_method"] = features["original_payment_method"].rank(method='dense').astype(int)
labels = features["diff_booking_to_cancel"]
return features.drop(["diff_booking_to_cancel"], axis=1), labels
def __get_day_of_year(full_data):
date_list = full_data.booking_datetime.split(" ")[0].split("-")
year = date_list[0]
month = date_list[1]
day = date_list[2]
day_of_year = datetime.date(int(year), int(month), int(day)).timetuple().tm_yday
return day_of_year
def __how_far_in_advance(full_data):
date_list = full_data.booking_datetime.split(" ")[0].split("-")
year = date_list[0]
month = date_list[1]
day = date_list[2]
booking = datetime.date(int(year), int(month), int(day))
checking = full_data.checkout_date.split(" ")[0].split("-")
year = checking[0]
month = checking[1]
day = checking[2]
checkin = datetime.date(int(year), int(month), int(day))
return abs((booking - checkin).days)
def __length_of_stay(full_data):
date_list = full_data.checkin_date.split(" ")[0].split("-")
year = date_list[0]
month = date_list[1]
day = date_list[2]
booking = datetime.date(int(year), int(month), int(day))
checking = full_data.checkout_date.split(" ")[0].split("-")
year = checking[0]
month = checking[1]
day = checking[2]
checkin = datetime.date(int(year), int(month), int(day))
return abs((booking - checkin).days)
def __add_did_cancel(full_data, pred=False):
pd.options.mode.chained_assignment = None
canceled = list()
diff_booking_to_cancel = list()
day_of_year = list()
how_far_in_advance = list()
length_of_stay = list()
pay_now = list()
for i in tqdm(range(len(full_data))):
if not pred:
if str(full_data.cancellation_datetime.iloc[i]) == "nan":
diff_booking_to_cancel.append(-1)
canceled.append(0)
else:
diff_booking_to_cancel.append(
__diff_of_date_start(full_data.iloc[i][["checkout_date", "cancellation_datetime"]]))
canceled.append(1)
if full_data.charge_option.iloc[i] == "Pay Now":
pay_now.append(1)
else:
pay_now.append(0)
day_of_year.append(__get_day_of_year(full_data.iloc[i]))
how_far_in_advance.append(__how_far_in_advance(full_data.iloc[i]))
length_of_stay.append(__length_of_stay(full_data.iloc[i]))
if not pred:
full_data["canceled"] = canceled
full_data["diff_booking_to_cancel"] = diff_booking_to_cancel
full_data["day_of_year"] = day_of_year
full_data["how_far_in_advance"] = how_far_in_advance
full_data["length_of_stay"] = length_of_stay
full_data["pay_now"] = pay_now
return full_data
def __diff_of_date_start(date_string):
date_list = date_string.checkout_date.split(" ")[0].split("-")
year = date_list[0]
month = date_list[1]
day = date_list[2]
date_booking = datetime.date(int(year), int(month), int(day))
date_cancel_list = date_string.cancellation_datetime.split(" ")[0].split("-")
year = date_cancel_list[0]
month = date_cancel_list[1]
day = date_cancel_list[2]
date_cancel = datetime.date(int(year), int(month), int(day))
return int(abs((date_booking - date_cancel).days))
def __add_time_between_booking_to_cancel(full_data):
booking_time_df = pd.DataFrame(full_data[["checkout_date", "cancellation_datetime"]])
full_data["diff_booking_to_cancel"] = booking_time_df.apply(__diff_of_date_start, axis=1)
return full_data
def evaluate_and_export(estimator, X: np.ndarray, filename: str):
"""
Export to specified file the prediction results of given estimator on given testset.
File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing
predicted values.
Parameters
----------
estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn)
Fitted estimator to use for prediction
X: ndarray of shape (n_samples, n_features)
Test design matrix to predict its responses
filename:
path to store file at
"""
pred_y = estimator.predict(X)
X = X.reset_index()
for i, y in enumerate(pred_y):
if BEGIN_CHECK_DATE.timetuple().tm_yday-1 <= X.iloc[i]["day_of_year"] + y <= END_CHECK_DATE.timetuple().tm_yday+1:
pred_y[i] = 1
else:
pred_y[i] = 0
| pd.DataFrame(pred_y, columns=["predicted_values"]) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.