prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ['one', 'two']]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (['foo'], ['bar'])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\."
" NOTE: this index is in an inconsistent state")
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[['a'], ['b']],
codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([['a'], ['b']])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]],
verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[-1, -1, -1, -1, 3, 4]])
tm.assert_index_equal(result, expected)
result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[-1, -1, 1, -1, 3, -1]])
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels(
[[np.nan, 's', pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[1, 2, 2, 2, 2, 2]]).set_codes(
[[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
with tm.assert_produces_warning(FutureWarning):
idx.labels
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes],
copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes))
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, codes=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_sequence_of_arrays', [
1, [1], [1, 2], [[1], 2], [1, [2]], 'a', ['a'], ['a', 'b'], [['a'], 'b'],
(1,), (1, 2), ([1], 2), (1, [2]), 'a', ('a',), ('a', 'b'), (['a'], 'b'),
[(1,), 2], [1, (2,)], [('a',), 'b'],
((1,), 2), (1, (2,)), (('a',), 'b')
])
def test_from_arrays_invalid_input(invalid_sequence_of_arrays):
msg = "Input must be a list / sequence of array-likes"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays)
@pytest.mark.parametrize('idx1, idx2', [
([1, 2, 3], ['a', 'b']),
([], ['a', 'b']),
([1, 2, 3], [])
])
def test_from_arrays_different_lengths(idx1, idx2):
# see gh-13599
msg = '^all arrays must be same length$'
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays([idx1, idx2])
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
def test_from_tuples():
msg = 'Cannot infer number of levels from empty list'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples([])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator():
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
msg = 'Input must be a list / sequence of tuple-likes.'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples(0)
def test_from_tuples_empty():
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_index_values(idx):
result = MultiIndex.from_tuples(idx)
assert (result.values == idx.values).all()
def test_tuples_with_name_string():
# GH 15110 and GH 14848
li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='abc')
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='a')
def test_from_tuples_with_tuple_label():
# GH 15457
expected = pd.DataFrame([[2, 1, 2], [4, (1, 2), 3]],
columns=['a', 'b', 'c']).set_index(['a', 'b'])
idx = pd.MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=('a', 'b'))
result = pd.DataFrame([2, 3], columns=['c'], index=idx)
tm.assert_frame_equal(expected, result)
# ----------------------------------------------------------------------------
# from_product
# ----------------------------------------------------------------------------
def test_from_product_empty_zero_levels():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_product([])
def test_from_product_empty_one_level():
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
@pytest.mark.parametrize('first, second', [
([], []),
(['foo', 'bar', 'baz'], []),
([], ['a', 'b', 'c']),
])
def test_from_product_empty_two_levels(first, second):
names = ['A', 'B']
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
codes=[[], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('N', list(range(4)))
def test_from_product_empty_three_levels(N):
# GH12258
names = ['A', 'B', 'C']
lvl2 = list(range(N))
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
codes=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_input', [
1,
[1],
[1, 2],
[[1], 2],
'a',
['a'],
['a', 'b'],
[['a'], 'b'],
])
def test_from_product_invalid_input(invalid_input):
msg = (r"Input must be a list / sequence of iterables|"
"Input must be list-like")
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(iterables=invalid_input)
def test_from_product_datetimeindex():
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([
(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02')),
])
tm.assert_numpy_array_equal(mi.values, etalon)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('f', [
lambda x: x,
lambda x: pd.Series(x),
lambda x: x.values
])
def test_from_product_index_series_categorical(ordered, f):
# GH13743
first = ['foo', 'bar']
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
result = pd.MultiIndex.from_product([first, f(idx)])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_from_product():
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator():
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
msg = "Input must be a list / sequence of iterables."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(0)
def test_create_index_existing_name(idx):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
index = idx
index.names = ['foo', 'bar']
result = pd.Index(index)
expected = Index(
Index([
('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'
),
names=['foo', 'bar']
)
tm.assert_index_equal(result, expected)
result = pd.Index(index, names=['A', 'B'])
expected = Index(
Index([
('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'
),
names=['A', 'B']
)
tm.assert_index_equal(result, expected)
# ----------------------------------------------------------------------------
# from_frame
# ----------------------------------------------------------------------------
def test_from_frame():
# GH 22420
df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']],
columns=['L1', 'L2'])
expected = pd.MultiIndex.from_tuples([('a', 'a'), ('a', 'b'),
('b', 'a'), ('b', 'b')],
names=['L1', 'L2'])
result = pd.MultiIndex.from_frame(df)
tm.assert_index_equal(expected, result)
@pytest.mark.parametrize('non_frame', [
pd.Series([1, 2, 3, 4]),
[1, 2, 3, 4],
[[1, 2], [3, 4], [5, 6]],
pd.Index([1, 2, 3, 4]),
np.array([[1, 2], [3, 4], [5, 6]]),
27
])
def test_from_frame_error(non_frame):
# GH 22420
with pytest.raises(TypeError, match='Input must be a DataFrame'):
pd.MultiIndex.from_frame(non_frame)
def test_from_frame_dtype_fidelity():
# GH 22420
df = pd.DataFrame(OrderedDict([
('dates', pd.date_range('19910905', periods=6, tz='US/Eastern')),
('a', [1, 1, 1, 2, 2, 2]),
('b', | pd.Categorical(['a', 'a', 'b', 'b', 'c', 'c'], ordered=True) | pandas.Categorical |
"""
Create DASS features for both the ground truth dataset.
"""
# region PREPARE WORKSPACE
# Import dependencies
import os
import pandas as pd
from joblib import load
import sklearn
# Get current working directory
my_path = os.getcwd()
# Load the ground truth datasets
truth_depression = pd.read_csv(my_path + '/data/cleaned/dass/truth/truth_depression.csv')
truth_anxiety = pd.read_csv(my_path + '/data/cleaned/dass/truth/truth_anxiety.csv')
truth_stress = pd.read_csv(my_path + '/data/cleaned/dass/truth/truth_stress.csv')
truth_suicide = pd.read_csv(my_path + '/data/cleaned/dass/truth/truth_suicide.csv')
# Load the SVM models
svm_depression = load(my_path + '/models/dass_depression.joblib')
svm_anxiety = load(my_path + '/models/dass_anxiety.joblib')
svm_stress = load(my_path + '/models/dass_stress.joblib')
svm_suicide = load(my_path + '/models/dass_suicide.joblib')
# endregion
# region PREPARE DATA
# Get features
x_depression = truth_depression.drop(['text', 'dysphoria'], axis=1)
x_anxiety = truth_anxiety.drop(['text', 'dysphoria'], axis=1)
x_stress = truth_stress.drop(['text', 'dysphoria'], axis=1)
x_suicide = truth_suicide.drop(['text', 'dysphoria'], axis=1)
# endregion
# region ENGINEER DASS FEATURES FROM SVM-GENERATED LABELS
# Start file output
with open(my_path + '/doc/dass_output.txt', 'a') as f:
print('##############################################################', file=f)
print('SVM CLASSIFICATION OF DASS - GROUND TRUTH ####################', file=f)
print('\n', file=f)
# Predict depression
y_depression = svm_depression.predict(x_depression)
| pd.DataFrame(y_depression) | pandas.DataFrame |
#Importing the required packages
from flask import Flask, render_template, request
import os
import pandas as pd
from pandas import ExcelFile
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import *
from sklearn.metrics import *
from sklearn.model_selection import cross_val_score
import itertools
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
app = Flask(__name__)
#Routing to initial home page
@app.route('/')
def home():
return render_template('home.html')
@app.route('/admin_login')
def admin_login():
return render_template('admin_login.html')
@app.route('/admin', methods=['GET','POST'])
def admin():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('admin_cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('admin.html')
else:
k=1
return render_template('admin_login.html',k=k)
else:
k=1
return render_template('admin_login.html',k=k)
@app.route('/admin_printed', methods=['GET','POST'])
def admin_printed():
trainfile=request.files['admin_doc']
t=pd.read_excel(trainfile)
t.to_excel('trainfile.xlsx')
return render_template('admin_printed.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/index', methods=['GET','POST'])
def index():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('index.html')
else:
k=1
return render_template('login.html',k=k)
else:
k=1
return render_template('login.html',k=k)
#Routing to page when File Upload is selected
@app.route('/file_upload')
def file_upload():
return render_template("file_upload.html")
@app.route('/upload_printed', methods=['GET','POST'])
def upload_printed():
abc=request.files['printed_doc']
test1=pd.read_excel(abc)
test=test1
train=pd.read_excel('trainfile.xlsx')
train['TenurePerJob']=0
for i in range(0,len(train)):
if train.loc[i,'NumCompaniesWorked']>0:
train.loc[i,'TenurePerJob']=train.loc[i,'TotalWorkingYears']/train.loc[i,'NumCompaniesWorked']
a=np.median(train['MonthlyIncome'])
train['CompRatioOverall']=train['MonthlyIncome']/a
full_col_names=train.columns.tolist()
num_col_names=train.select_dtypes(include=[np.int64,np.float64]).columns.tolist()
num_cat_col_names=['Education','JobInvolvement','JobLevel','StockOptionLevel']
target=['Attrition']
num_col_names=list(set(num_col_names)-set(num_cat_col_names))
cat_col_names=list(set(full_col_names)-set(num_col_names)-set(target))
#print("total no of numerical features:",len(num_col_names))
#print("total no of categorical & ordered features:",len(cat_col_names))
cat_train=train[cat_col_names]
num_train=train[num_col_names]
for col in num_col_names:
if num_train[col].skew()>0.80:
num_train[col]=np.log1p(num_train[col])
for col in cat_col_names:
col_dummies=pd.get_dummies(cat_train[col],prefix=col)
cat_train=pd.concat([cat_train,col_dummies],axis=1)
Attrition={'Yes':1,'No':0}
train.Attrition=[Attrition[item] for item in train.Attrition]
cat_train.drop(cat_col_names,axis=1,inplace=True)
final_train=pd.concat([num_train,cat_train],axis=1)
final_train['pr_mean_psh'] = final_train['PerformanceRating'].add(final_train['PercentSalaryHike'])
final_train['pr_mean_psh']=final_train['pr_mean_psh']/2
final_train.drop(labels=['PerformanceRating','PercentSalaryHike'],axis=1,inplace=True)
df1=final_train
for col in list(df1):
df1[col]=df1[col]/df1[col].max()
empnum=test['EmployeeNumber']
test['TenurePerJob']=0
for i in range(0,len(test)):
if test.loc[i,'NumCompaniesWorked']>0:
test.loc[i,'TenurePerJob']=test.loc[i,'TotalWorkingYears']/test.loc[i,'NumCompaniesWorked']
a=np.median(test['MonthlyIncome'])
test['CompRatioOverall']=test['MonthlyIncome']/a
test.drop(labels=['EmployeeNumber'],axis=1,inplace=True)
#test.drop(labels=['EmployeeCount','EmployeeNumber','Over18','StandardHours'],axis=1,inplace=True)
full_col_names=test.columns.tolist()
num_col_names=test.select_dtypes(include=[np.int64,np.float64]).columns.tolist()
num_cat_col_names=['Education','JobInvolvement','JobLevel','StockOptionLevel']
target=['Attrition']
num_col_names=list(set(num_col_names)-set(num_cat_col_names))
cat_col_names=list(set(full_col_names)-set(num_col_names)-set(target))
#print("total no of numerical features:",len(num_col_names))
#print("total no of categorical & ordered features:",len(cat_col_names))
cat_test=test[cat_col_names]
num_test=test[num_col_names]
for col in num_col_names:
if num_test[col].skew()>0.80:
num_test[col]=np.log1p(num_test[col])
for col in cat_col_names:
col_dummies=pd.get_dummies(cat_test[col],prefix=col)
cat_test=pd.concat([cat_test,col_dummies],axis=1)
cat_test.drop(cat_col_names,axis=1,inplace=True)
final_test=pd.concat([num_test,cat_test],axis=1)
final_test['pr_mean_psh'] = final_test['PerformanceRating'].add(final_test['PercentSalaryHike'])
final_test['pr_mean_psh']=final_test['pr_mean_psh']/2
final_test.drop(labels=['PerformanceRating','PercentSalaryHike'],axis=1,inplace=True)
#final_test.drop(labels=['HourlyRate','MonthlyRate','DailyRate'],axis=1,inplace=True)
#final_test.drop(labels=['Gender_Male','Gender_Female'],axis=1,inplace=True)
#final_test.drop(labels=['Department_Human Resources','Department_Research & Development','Department_Sales',],axis=1,inplace=True)
#final_test.drop(labels=['WorkLifeBalance_1','WorkLifeBalance_2','WorkLifeBalance_3','WorkLifeBalance_4','RelationshipSatisfaction_1','RelationshipSatisfaction_2','RelationshipSatisfaction_3','RelationshipSatisfaction_4','JobSatisfaction_1','JobSatisfaction_2','JobSatisfaction_3','JobSatisfaction_4','EnvironmentSatisfaction_1','EnvironmentSatisfaction_2','EnvironmentSatisfaction_3','EnvironmentSatisfaction_4'],axis=1,inplace=True)
df2=final_test
for col in list(df2):
df2[col]=df2[col]/df2[col].max()
#list(df2)
df3=df1[list(df2)]
#if(list(df3)==list(df2)):
#print('y')
#print(list(df2))
X_train=np.asarray(df3)
Y_train=np.asarray(train['Attrition'])
X_test=np.asarray(df2)
test1['EmployeeNumber']=np.asarray(empnum).tolist()
lr=LogisticRegression(solver='liblinear').fit(X_train,Y_train)
yhat=lr.predict(X_test)
yhat.tolist()
test1['Attrition'] = yhat
Attrition={1:'Yes',0:'No'}
test1.Attrition=[Attrition[item] for item in test1.Attrition]
conf=[]
for i in (lr.predict_proba(X_test).tolist()):
i= max(i)
conf.append(i)
#print(len(conf))
for j in range(len(conf)):
conf[j]=conf[j]*100
conf[j] = round(conf[j], 2)
test1['Reliability Percentage'] = conf
#added affecting parameters here
l=np.abs(lr.coef_).tolist()
coefs = [item for sublist in l for item in sublist]
data=np.asarray(df2).tolist()
weights=[]
for row in data:
c=np.multiply(row,coefs).tolist()
weights.append(c)
cols=list(df2)
L=[]
for val in weights:
dic = dict(enumerate(val))
L.append(dic)
ColWeights=[]
for dic in L:
i=0
tempDic={}
for key,value in dic.items():
key=cols[i]
tempDic[key]=value
i=i+1
ColWeights.append(tempDic)
df_yes=test1[test1.Attrition =='Yes']
df_no=test1[test1.Attrition =='No']
for index, row in df_yes.iterrows():
if(row['Attrition']=='Yes'):
yes_changable_cols=['YearsWithCurrManager',
'MonthlyIncome',
'YearsInCurrentRole',
'DistanceFromHome',
'YearsSinceLastPromotion',
'JobLevel_1',
'JobLevel_2',
'JobLevel_3',
'JobLevel_4',
'BusinessTravel_Non-Travel',
'BusinessTravel_Travel_Frequently',
'BusinessTravel_Travel_Rarely',
'OverTime_Yes']
Col_Weights_Yes=[]
for dic in ColWeights:
a={}
for k,v in dic.items():
if k in yes_changable_cols :
a[k]=v
Col_Weights_Yes.append(a)
AscendingCols=[]
for dic in Col_Weights_Yes:
AscendingCols.append((sorted(dic, key=dic.get)))
AllParams=[]
for h in AscendingCols:
params=[ h[12], h[11], h[10], h[9], h[8] ]
AllParams.append(params)
frame=pd.DataFrame(AllParams)
frame.columns =['YesParam_1','YesParam_2','YesParam_3','YesParam_4','YesParam_5']
df_yes=pd.concat([df_yes, frame], axis=1)
df_yes = df_yes[np.isfinite(df_yes['Age'])]
#df_yes=df_yes[df_yes.Age != float('nan')]
#disp=df_yes[df_yes.Attrition=='Yes']
disp=df_yes[['EmployeeNumber','Reliability Percentage','YesParam_1','YesParam_2']]
disp.drop(labels=[],axis=1,inplace=True)
#print(disp.shape)
for index, row in df_no.iterrows():
if(row['Attrition']=='No'):
aff_params_no=['YearsWithCurrManager',
'YearsInCurrentRole',
'MonthlyIncome',
'YearsAtCompany',
'TotalWorkingYears']
#MAIN PARAMS FOR NO
Col_Weights_No=[]
for dic in ColWeights:
b={}
for k,v in dic.items():
if k in aff_params_no :
b[k]=v
Col_Weights_No.append(b)
AscendingCols1=[]
for dic in Col_Weights_No:
AscendingCols1.append((sorted(dic, key=dic.get)))
AllParams1=[]
for h in AscendingCols1:
params1=[ h[4], h[3], h[2], h[1], h[0] ]
AllParams1.append(params1)
frame1=pd.DataFrame(AllParams1)
frame1.columns =['NoParam_1','NoParam_2','NoParam_3','NoParam_4','NoParam_5']
df_no=pd.concat([df_no, frame1], axis=1)
df_no = df_no[np.isfinite(df_no['Age'])]
#df_no=df_no[df_no.Age !=float('nan')]
#disp=test1[test1.Attrition=='Yes']
#disp=disp[['EmployeeNumber','Reliability Percentage','AffectingParam_1','AffectingParam_2']]
#disp.drop(labels=[],axis=1,inplace=True)
#print(disp.shape)
#for index, row in test1.iterrows():
#if(row['Attrition']=='Yes'):
#test1['NoParam_1']=' '
#test1['NoParam_2']=' '
#test1['NoParam_3']=' '
#test1['NoParam_4']=' '
#test1['NoParam_5']=' '
#elif(row['Attrition']=='No'):
#test1['YesParam_1']=' '
#test1['YesParam_2']=' '
#test1['YesParam_3']=' '
#test1['YesParam_4']=' '
#test1['YesParam_5']=' '
writer = pd.ExcelWriter('Result.xlsx', engine='xlsxwriter')
#store your dataframes in a dict, where the key is the sheet name you want
frames = {'Yes_Predictions': df_yes, 'No_predictions': df_no}
#now loop thru and put each on a specific sheet
for sheet, frame in frames.items(): # .use .items for python 3.X
frame.to_excel(writer, sheet_name = sheet)
#critical last step
writer.save()
#test1.to_excel('result.xlsx')
return render_template("upload_printed.html",tables=[disp.to_html(classes='data')], titles=disp.columns.values[-1:])
#Routing to page when Attribute Entry is selected
@app.route('/attribute_entry')
def attribute_entry():
return render_template('attribute_entry.html')
#Obtaining values from attribute entry and processing them
@app.route('/yes', methods=['GET', 'POST'])
def yes():
#Obtaining the values from HTML form
age=int(request.form['age'])
dfh=int(request.form['dfh'])
ncw=int(request.form['ncw'])
twy=int(request.form['twy'])
ylp=int(request.form['ylp'])
yac=int(request.form['yac'])
ycr=int(request.form['ycr'])
ycm=int(request.form['ycm'])
tly=int(request.form['tly'])
shp=int(request.form['shp'])
mi=int(request.form['mi'])
ji=request.form['ji']
jl=request.form['jl']
ot=request.form['ot']
bt=request.form['bt']
jr=request.form['jr']
el=request.form['el']
ms=request.form['ms']
ef=request.form['ef']
sol=request.form['sol']
pr=int(request.form['pr'])
#print(age,'\n',dfh,'\n',ncw,'\n',twy,'\n',ylp,'\n',yac,'\n',ycr,'\n',ycm,'\n',tly,'\n',
#shp,'\n',mi,'\n',ji,'\n',jl,'\n',ot,'\n',bt,'\n',jr,'\n',el,'\n',ms,'\n',ef,'\n',sol,'\n',pr)
#Initializing the one hot encoded columns to 0
ms_S=0
ms_M=0
ms_D=0
ef_HR=0
ef_TD=0
ef_LS=0
ef_Ma=0
ef_Me=0
ef_O=0
jr_HCR=0
jr_HR=0
jr_LT=0
jr_M=0
jr_MD=0
jr_RD=0
jr_RS=0
jr_SE=0
jr_SR=0
bt_NT=0
bt_TF=0
bt_TR=0
ji_1=0
ji_2=0
ji_3=0
ji_4=0
ot_N=0
ot_Y=0
sol_0=0
sol_1=0
sol_2=0
sol_3=0
jl_1=0
jl_2=0
jl_3=0
jl_4=0
jl_5=0
el_1=0
el_2=0
el_3=0
el_4=0
el_5=0
#Setting the value obtained from form to 1
if(ms=="1"):
ms_S=1
elif(ms=="2"):
ms_M=1
else:
ms_D=1
if(ef=="1"):
ef_HR=1
elif(ef=="2"):
ef_TD=1
elif(ef=="3"):
ef_LS=1
elif(ef=="4"):
ef_Ma=1
elif(ef=="5"):
ef_Me=1
else:
ef_O=1
if(jr=="1"):
jr_HCR=1
elif(jr=="2"):
jr_HR=1
elif(jr=="3"):
jr_LT=1
elif(jr=="4"):
jr_M=1
elif(jr=="5"):
jr_MD=1
elif(jr=="6"):
jr_RD=1
elif(jr=="7"):
jr_RS=1
elif(jr=="8"):
jr_SE=1
else:
jr_SR=1
if(bt=="0"):
bt_NT=1
elif(bt=="1"):
bt_TR=1
else:
bt_TF=1
if(ji=="1"):
ji_1=1
elif(ji=="2"):
ji_2=1
elif(ji=="3"):
ji_3=1
else:
ji_4=1
if(ot=="1"):
ot_Y=1
else:
ot_N=1
if(sol=="0"):
sol_0=1
elif(sol=="1"):
sol_1=1
elif(sol=="2"):
sol_2=1
else:
sol_3=1
if(jl=="1"):
jl_1=1
elif(jl=="2"):
jl_2=1
elif(jl=="3"):
jl_3=1
elif(jl=="4"):
jl_4=1
else:
jl_5=1
if(el=="1"):
el_1=1
elif(el=="2"):
el_2=1
elif(el=="3"):
el_3=1
elif(el=="4"):
el_4=1
else:
el_5=1
#Training the data
train= | pd.read_excel('trainfile.xlsx') | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 17:40:53 2021
@author: ali_d
"""
#Pandas
import pandas as pd
import numpy as np
#data
numbers = [20,30,40,50]
print("----")
leters = ["a","b","c","d",40]
pandas_pd = pd.Series(numbers)
pandas_pd1 = pd.Series(leters)
print(pandas_pd)
print(type(pandas_pd))
print(pandas_pd1)
scalers = 5
print(pd.Series(scalers))
print("---")
pandas_series1 = pd.Series(numbers,["a","b","c","d"])
print(pandas_series1)
print("---")
dict = {"a":15,"b":25,"c":35,"d":45}
pandas_series2 = pd.Series(dict)
print(pandas_series2)
print("---")
a = np.random.randint(10,100,5)
pandas_series3 = pd.Series(a)
print(pandas_series3)
print("---")
pandas_series3 = pd.Series(a,["a","b","c","d","e"])
print(pandas_series3)
print("---")
pandas_series4 = pd.Series([20,30,40,50],["a","b","c","d"])
print(pandas_series4[0])
print(pandas_series4["a"])
print(pandas_series4[:2])
print(pandas_series4[-2])
print(pandas_series4["a"])
print(pandas_series4["d"])
print(pandas_series4["a"])
#
print(pandas_series4.ndim) #1boyutlu liste oldugunu soyluyor
print(pandas_series4.dtype)#type
print(pandas_series4.shape)
print(pandas_series4.sum())
print(pandas_series4.max())#max
print(pandas_series4.min())#min
print(pandas_series4+pandas_series4)
print(pandas_series4+1000)
print("---")
print(pandas_series4>35)
print("---")
result = pandas_series4 % 2 ==0
print(result)
print("---")
print(pandas_series4[pandas_series4 %2 ==0])
print(pandas_series4[pandas_series4 %2 ==1])
print("---")
opel2018 = pd.Series([20,30,40,10],["astra","corsa","mokka","insignia"])
opel2019 = pd.Series([20,80,40,20,None],["astra","corsa","mokka","insignia","Grandland"])
total = opel2018+opel2019
print(total)
#%% Pandas dataFrame
import pandas as pd
s1 = pd.Series([3,2,0,1])
s2 = pd.Series([0,3,7,2])
data = dict(apples=s1,oranges = s2)
print(data)
print("---")
df=pd.DataFrame(data)
print(df)
print("-------")
# "
# df1= pd.DateFrame()
# print(df1)
# "
df1 = pd.DataFrame([1,2,3,4,5])
print(df1)
print("---")
df2 = pd.DataFrame([["Ahmet",50],["Ali",60],["Yağmur",70],["Çınar",80]],columns = ["Name","Grade"],index=[1,2,3,4])
print(df2)
#columns = sütünlar
print("---")
dict1 = {"Name":["Ahmet","Ali","Yağmur","Çınar"],
"Grade":[50,60,70,80]
}
#Grade =sınıf
pd4= pd.DataFrame(dict1)
print(pd4)
liste = [["Ahmet",50],["Ali",60],["Yağmur",70],["Çınar",80]]
##
dict1 = {"Name":["Ahmet","Ali","Yağmur","Çınar"],
"Grade":[50,60,70,80]}
a1 = (pd.DataFrame(dict1,index=["212","232","236","456"]))
dict_list=[
{"Name":"Ahmet","Grade":50},
{"Name":"Alis","Grade":60},
{"Name":"Uğurcan","Grade":70},
{"Name":"Hasan","Grade":80},
{"Name":"Miray","Grade":90}
]
a2 = pd.DataFrame(dict_list)
#%% Pandas ile DataFrame calısma
import pandas as pd
import numpy as np
a=np.random.randn(3,3)
df = pd.DataFrame(a,index=["A","B","C"],columns=["Column1","Column2","Column3"])
print(df)
result = df
print("---")
print(result["Column1"])
print("---")
print(type(result["Column1"]))
print("---")
result = df[["Column1","Column2"]]
print(result)
print("---")
result1 = df.loc["A"]
print(result1)
print("---")
result2 = type(df.loc["A"])
print(result2)
print("---")
result3 = df.loc[:]
print(result3)
print("---")
result4 = df.loc[:,["Column1","Column2"]]
print(result4)
print("---")
result5 = df.loc[:,["Column1","Column3"]]
print(result5)
print("---")
result6 = df.loc["A":"B","Column2"]
print(result6)
print("---")
a=df.iloc[1]
print(a)
print("---1")
b =df.iloc[2]
print(b)
print("---2")
c=df.iloc[0]
print(c)
print("---3")
#%% fitlreleme
data = np.random.randint(10,100,75).reshape(15,5)
dfx = pd.DataFrame(data,columns=["Columns1","Columns2","Columns3","Columns4","Columns5"])
print(dfx)
print("---")
df = dfx.columns
print(df)
print("---")
df = dfx.head()
print(df)
print("---")
df =dfx.head(10)
print(df)
print("---")
df =dfx.tail()
print(df)
print("---")
df = dfx.tail(10)
print(df)
print("---")
df =dfx["Columns1"].head()
print(df)
print("---")
df=dfx.Columns1.head()
print(df)
print("---")
df = dfx[["Columns1","Columns2"]].head()
print(df)
print("----")
df = dfx[["Columns1","Columns2"]].tail()
print(df)
print("---")
#df = dfx[5:15] 5 -15 arasındakılerı alırım
df = dfx[5:15][["Columns1","Columns2"]].head()
print(df)
print("---")
df = dfx[5:15][["Columns1","Columns2"]].tail()
print(df)
print("---"*10)
df = dfx > 50
print(df)
print("----")
df = dfx[dfx > 50]
print(df)
print("---")
df = dfx[dfx % 2 == 0]
print(df)
print("---")
df = dfx[df["Columns1"] > 50]
print(df)
print("---")
df = dfx[df["Columns1"] > 50][["Columns1","Columns2"]]
print(df)
print("---")
#df = dfx.query("Columns1 >= 10 & Columns1 % 2 == 1")
#df = dfx.query("Columns1 >= 10 & Columns1 % 2 == 1")[["Columns1","Columns2"]]
#(df)
#Query = Sorgu
#%% DataFrame GroupBy
import pandas as pd
import numpy as np
peronel = {"Çalışan":["<NAME>","<NAME>","<NAME>","<NAME>","<NAME>","<NAME>","<NAME>"],
"Departman":["İnsan kaynakları","Bilgi İşlem","Muhasebe","İnsan Kaynakları","Bilgi İşlem","Muhasebe","Bilgi İşlem"],
"Yaş":[30,25,45,50,23,34,42],
"Semt":["KadıKöy","Tuzla","Maltepe","Tuzla","Maltepe","Tuzla","KadıKöy"],
"Maaş":[5000,3000,4000,3500,2750,6500,4500]}
df = pd.DataFrame(peronel)
print(df)
print("---")
result = df["Maaş"].sum()
print(result)
print("---")
result1 = df.groupby("Departman")
print(result1)
print("---")
result2 = df.groupby("Departman").groups
print(result2)
print("---")
result3 = df.groupby(["Departman","Semt"]).groups
print(result3)
print()
print("---")
semtler = df.groupby("Semt")
for name,group in semtler:
print(name)
print(group)
print()
print("---")
print()
for name,group in df.groupby("Departman"):
print(name)
print(group)
print()
print("--------------")
print()
xv = df.groupby("Semt").get_group("KadıKöy")
print(xv)
print("--------------")
xv1 = df.groupby("Departman").get_group("Muhasebe")
print(xv1)
print("---------------")
xv2 = df.groupby("Departman").sum()
print(xv2)
print("---------------")
xv3 = df.groupby("Departman").mean()
print(xv3)
print("--------------")
xv4 = df.groupby("Departman")["Maaş"].mean()
print(xv4)
print("--------------")
xv5 = df.groupby("Semt")["Çalışan"].count()
print(xv5)
print("-------------")
xv6 = df.groupby("Departman")["Yaş"].max()
print(xv6)
print("-------------")
xv7 = df.groupby("Departman")["Maaş"].max()["Muhasebe"]
print(xv7)
print("-------------")
xv8 = df.groupby("Departman").agg([np.sum,np.mean,np.max,np.min]).loc["Muhasebe"]
print(xv8)
#%% Pandas ile Kayıp ve Bozuk Veri Analizi
import pandas as pd
import numpy as np
data = np.random.randint(20,200,15).reshape(5,3)
print(data)
df = pd.DataFrame(data,index = ["a","c","e","f","h"], columns = ["Column1","Column2","Column3"])
print(df)
print("---")
df = df.reindex(["a","b","c","d","e","f","g","h"])
print(df)
print("---")
newColumn =[np.nan,30,np.nan,51,np.nan,30,np.nan,10]
df["Column4"]=newColumn
result =df
result=df.drop("Column1",axis =1)
print("---")
result=df.drop(["Column1","Column2"],axis =1)
print("---")
result = df.drop("a",axis=0)
print("---")
result = df.drop(["a","b","c"],axis=0)
print("---")
result = df.isnull()
print(result)
print("---")
result = df.notnull()
print(result)
print("---")
result = df.isnull().sum()
print(result)
print("---")
result = df["Column1"].isnull().sum()
print(result)
print()
result =df["Column2"].isnull().sum()
print(result)
print("---")
result = df[df["Column1"].isnull()]
print(result)
print("---")
result = df[df["Column1"].isnull()]["Column1"]
print(result)
print("---")
result = df[df["Column1"].notnull()]["Column1"]
print(result)
print("---")
print()
result = df.dropna()
print(result)
print("---")
print(df)
print("---")
result = df.dropna(axis = 1)
print(result)
print("---")
result = df.dropna(how="any")
print(result)
print("---")
result = df.dropna(how="all")
print(result)
print("---")
result = df.dropna(subset=["Column1","Column2"],how="all")
print(result)
print("----")
result = df.dropna(subset=["Column1","Column2"],how="all")
print(result)
print("---")
result = df.dropna(thresh=2)
print(result)
print("---")
result = df.dropna(thresh=4)
print(result)
print("----")
result = df.fillna(value = "no input")
print(result)
print("---")
result = df.fillna(value = 1)
print(result)
print("---")
result = df.sum().sum()
print(result)
print("---")
result = df.size
print(result)
print("---")
result = df.isnull().sum()
print(result)
print("---")
result = df.isnull().sum().sum()
print(result)
print("----")
##############
def ortalama(df):
toplam = df.sum().sum()
adet = df.size - df.isnull().sum().sum()
return toplam / adet
result = df.fillna(value = ortalama(df))
print(result)
##############
#%% Pandas ile String Fonksiyonlar
import pandas as pd
customers = {
"CostomerId":[1,2,3,4],
"firstName":["Ahmet","Ali","Hasan","Can"],
"lastName":["Yılmaz","Korkmaz","Çelik","Toprak"],
}
orders = {
"OrderId":[10,11,12,13],
"CustomerId":[1,2,5,7],
"OrderDate":["2010-07-04","2010-08-04","2010-07-07","2012-07-04"],
}
df_customers = pd.DataFrame(customers,columns=["CostomerId","firstName","lastName"])
df_orders = pd.DataFrame(orders,columns=["OrderId","CustomerId","OrderDate"])
result = pd.merge(left_on = df_customers,right_on= df_orders, how="inner")
#Merge = Birleştirmek
#%%
customersA = {
"CostomerId":[1,2,3,4],
"firstName":["Ahmet","Ali","Hasan","Can"],
"lastName":["Yılmaz","Korkmaz","Çelik","Toprak"]
}
ordersB = {
"OrderId":[10,11,12,13],
"FirstName":["Yağmur","Çınar","Cengiz","Can"],
"LastName":["Bilge","Turan","Yılmaz","Turan"]
}
df_customersA = pd.DataFrame(customersA,columns=["CostomerId","firstName","lastName"])
df_ordersB = pd.DataFrame(ordersB,columns=["OrderId","CustomerId","OrderDate"])
result = | pd.concat([df_customersA,df_ordersB]) | pandas.concat |
import pandas as pd
# setting display options for df
| pd.set_option('display.max_rows', 500) | pandas.set_option |
from datetime import datetime
import pandas as pd
from iexfinance.base import _IEXBase
class APIReader(_IEXBase):
@property
def url(self):
return "status"
def fetch(self):
return super(APIReader, self).fetch()
def _convert_output(self, out):
converted_date = datetime.fromtimestamp(out["time"] / 1000).strftime("%c")
return | pd.DataFrame(out, index=[converted_date]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas import DataFrame, MultiIndex, Index, Series, isnull
from pandas.compat import lrange
from pandas.util.testing import assert_frame_equal, assert_series_equal
from .common import MixIn
class TestNth(MixIn):
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.loc[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.loc[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
| assert_frame_equal(last, expected) | pandas.util.testing.assert_frame_equal |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code( | offsets.Hour() | pandas.tseries.offsets.Hour |
#!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
from nlppln.utils import create_dirs, get_files
@click.command()
@click.argument('in_dir', type=click.Path(exists=True))
@click.option('--out_dir', '-o', default=os.getcwd(), type=click.Path())
@click.option('--name', '-n', default='ner_stats.csv')
def nerstats(in_dir, out_dir, name):
create_dirs(out_dir)
frames = []
in_files = get_files(in_dir)
for fi in in_files:
with codecs.open(fi, encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [os.path.basename(fi)
for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = | pd.concat(frames, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import random
from sklearn.model_selection import ShuffleSplit
from datetime import datetime
from sklearn.preprocessing import FunctionTransformer
import scipy.io as sio
datasets = ['bugzilla', 'columba', 'jdt', 'mozilla', 'platform', 'postgres']
key = ['ns', 'nm', 'nf', 'entropy', 'la', 'ld', 'lt', 'fix', 'ndev', 'pd',\
'npt', 'exp', 'rexp', 'sexp', 'bug']
class preprocessing():
def __init__(self, dataset=None, key=None, rate=0.1, label='bug'):
"""
:param dataset: the name of file
:param key: the names of change measures(column)
:param label the name of label, default 'bug'
"""
self.rate = rate
self.dataset = dataset
self.key = key
self.label_name = label
self.data = None
self.data_array = None
def _data_init(self):
self.data = pd.read_csv('jit_datasets/'+self.dataset+'.csv', index_col='commitdate')
self.data.index = | pd.to_datetime(self.data.index, format='%Y/%m/%d %H:%M') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 3 13:46:06 2021
@author: Sebastian
"""
import sys
sys.path.append('..\\src')
import unittest
import common.globalcontainer as glob
from dataobjects.stock import Stock
import engines.scaffold
import engines.analysis
import pandas as pd
import datetime
import logging
import random
class TestGC(unittest.TestCase):
gc=None
def step001(self):
"""
Test general GlobalContainer-Functions
"""
self.gc.resetMySQLDatabases()
self.gc.resetInfluxDatabases()
self.assertEqual(self.gc.jobName, "TestRun")
res = self.gc.ses.query(Stock).all()
self.assertIsNotNone(self.gc.influxClient)
df = self.gc.influxClient.query("select * from StockValues")
self.assertEqual(len(df),0)
def step002(self):
"""Create Test Tata"""
engines.grabstocks.createTestStocks(self.gc)
def step003(self):
"""Create Test XIRR of Cashflows"""
# date
# isin
# numstocks
r = engines.resolver.Resolver(self.gc)
idx = [datetime.date(2010, 1, 1), datetime.date(2017, 1, 1)]
d11 = {'isin': ['TEST0011', 'TEST0011'], 'numstocks': [10, -10]}
df_trans_11 = pd.DataFrame(d11, index=idx)
z = r.xirrCashflow(self.gc, df_trans_11)
self.assertEqual(0.05, round(z,3), "xirr of TEST0011 not correct")
d15 = {'isin': ['TEST0015', 'TEST0015'], 'numstocks': [10, -10]}
df_trans_15 = pd.DataFrame(d15, index=idx)
z = r.xirrCashflow(self.gc, df_trans_15)
self.assertEqual(-0.1, round(z,3), "xirr of TEST0015 not correct")
idx = [datetime.date(2010, 1, 1), datetime.date(2010, 1, 1), datetime.date(2017, 1, 1), datetime.date(2017, 1, 1)]
dmix = {'isin': ['TEST0015', 'TEST0011', 'TEST0015', 'TEST0011'], 'numstocks': [10, 10, -10, -10]}
df_trans_mix = pd.DataFrame(dmix, index=idx)
z = r.xirrCashflow(self.gc, df_trans_mix)
self.assertEqual(0.032, round(z,3), "xirr of Mix not correct")
def step004(self):
"""Monte-Carlo of MSCI"""
logger = logging.getLogger(__name__)
r = engines.resolver.Resolver(self.gc)
isin = "DAX"
s = self.gc.ses.query(Stock).filter(Stock.ISIN == isin)[0]
engines.grabstocks.grabStock(self.gc, s)
engines.scaffold.addStock(self.gc, isin)
start_date = engines.grabstocks.getFirstDate(self.gc, isin)
end_date = engines.grabstocks.getLastDate(self.gc, isin)
logger.info(f"Range for {isin}: {start_date} to {end_date}")
l = []
for i in range(2000):
logger.info(f"Loop {i}")
tf = random.randint(5 * 365, 10 * 365) # interval
#end_date_rng = end_date - datetime.timedelta(days=tf) # latest end date
off = random.randint(0, ((end_date-start_date).days - tf))
random_start_date = start_date + datetime.timedelta(days = off)
random_end_date = random_start_date + datetime.timedelta(days=tf)
idx = [random_start_date, random_end_date]
d = {'isin': [isin, isin], 'numstocks': [10, -10]}
df_trans = | pd.DataFrame(d, index=idx) | pandas.DataFrame |
import pandas as pd
import numpy as np
#from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import mutual_info_classif,chi2
from sklearn.feature_selection import SelectKBest, SelectPercentile
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.metrics import roc_auc_score, mean_squared_error
# 2018.11.17 Created by Eamon.Zhang
def constant_feature_detect(data,threshold=0.98):
""" detect features that show the same value for the
majority/all of the observations (constant/quasi-constant features)
Parameters
----------
data : pd.Dataframe
threshold : threshold to identify the variable as constant
Returns
-------
list of variables names
"""
data_copy = data.copy(deep=True)
quasi_constant_feature = []
for feature in data_copy.columns:
predominant = (data_copy[feature].value_counts() / np.float(
len(data_copy))).sort_values(ascending=False).values[0]
if predominant >= threshold:
quasi_constant_feature.append(feature)
print(len(quasi_constant_feature),' variables are found to be almost constant')
return quasi_constant_feature
def corr_feature_detect(data,threshold=0.8):
""" detect highly-correlated features of a Dataframe
Parameters
----------
data : pd.Dataframe
threshold : threshold to identify the variable correlated
Returns
-------
pairs of correlated variables
"""
corrmat = data.corr()
corrmat = corrmat.abs().unstack() # absolute value of corr coef
corrmat = corrmat.sort_values(ascending=False)
corrmat = corrmat[corrmat >= threshold]
corrmat = corrmat[corrmat < 1] # remove the digonal
corrmat = pd.DataFrame(corrmat).reset_index()
corrmat.columns = ['feature1', 'feature2', 'corr']
grouped_feature_ls = []
correlated_groups = []
for feature in corrmat.feature1.unique():
if feature not in grouped_feature_ls:
# find all features correlated to a single feature
correlated_block = corrmat[corrmat.feature1 == feature]
grouped_feature_ls = grouped_feature_ls + list(
correlated_block.feature2.unique()) + [feature]
# append the block of features to the list
correlated_groups.append(correlated_block)
return correlated_groups
def mutual_info(X,y,select_k=10):
# mi = mutual_info_classif(X,y)
# mi = pd.Series(mi)
# mi.index = X.columns
# mi.sort_values(ascending=False)
if select_k >= 1:
sel_ = SelectKBest(mutual_info_classif, k=select_k).fit(X,y)
col = X.columns[sel_.get_support()]
elif 0 < select_k < 1:
sel_ = SelectPercentile(mutual_info_classif, percentile=select_k*100).fit(X,y)
col = X.columns[sel_.get_support()]
else:
raise ValueError("select_k must be a positive number")
return col
# 2018.11.27 edit Chi-square test
def chi_square_test(X,y,select_k=10):
"""
Compute chi-squared stats between each non-negative feature and class.
This score should be used to evaluate categorical variables in a classification task
"""
if select_k >= 1:
sel_ = SelectKBest(chi2, k=select_k).fit(X,y)
col = X.columns[sel_.get_support()]
elif 0 < select_k < 1:
sel_ = SelectPercentile(chi2, percentile=select_k*100).fit(X,y)
col = X.columns[sel_.get_support()]
else:
raise ValueError("select_k must be a positive number")
return col
def univariate_roc_auc(X_train,y_train,X_test,y_test,threshold):
"""
First, it builds one decision tree per feature, to predict the target
Second, it makes predictions using the decision tree and the mentioned feature
Third, it ranks the features according to the machine learning metric (roc-auc or mse)
It selects the highest ranked features
"""
roc_values = []
for feature in X_train.columns:
clf = DecisionTreeClassifier()
clf.fit(X_train[feature].to_frame(), y_train)
y_scored = clf.predict_proba(X_test[feature].to_frame())
roc_values.append(roc_auc_score(y_test, y_scored[:, 1]))
roc_values = pd.Series(roc_values)
roc_values.index = X_train.columns
print(roc_values.sort_values(ascending=False))
print(len(roc_values[roc_values > threshold]),'out of the %s featues are kept'% len(X_train.columns))
keep_col = roc_values[roc_values > threshold]
return keep_col
def univariate_mse(X_train,y_train,X_test,y_test,threshold):
"""
First, it builds one decision tree per feature, to predict the target
Second, it makes predictions using the decision tree and the mentioned feature
Third, it ranks the features according to the machine learning metric (roc-auc or mse)
It selects the highest ranked features
"""
mse_values = []
for feature in X_train.columns:
clf = DecisionTreeRegressor()
clf.fit(X_train[feature].to_frame(), y_train)
y_scored = clf.predict(X_test[feature].to_frame())
mse_values.append(mean_squared_error(y_test, y_scored))
mse_values = | pd.Series(mse_values) | pandas.Series |
"""
lib/vector.py
FIT3162 - Team 10 - Final Year Computer Science Project
Copyright <NAME>, <NAME>, <NAME> 2019
Script containing the class to process vector files to get environment data
"""
from osgeo import ogr, osr, gdal
from pathlib import Path
import pandas as pd
def conv_to_point(row):
"""
Method to create a point series from vic coordinates
:param row: dataset's row to be updated
:return: the point series
"""
geo = ogr.Geometry(ogr.wkbPoint)
geo.AddPoint(int(row["vic_x"]), int(row["vic_y"]))
geo.InShape = False
return geo
def ProcessPoints(dataframe, fname):
"""
Method to process vector dataset's points and update the dataframe
:param dataframe: dateframe to be update with the vector file's env data
:param fname: vector file containing the env data
:return: updated dataframe with vector file's env data
"""
shapefile = ogr.Open(fname)
layer = shapefile.GetLayer()
column = Path(fname).stem
points_series = dataframe.apply(conv_to_point, axis=1)
points = points_series.values
for feature in layer:
geo = feature.GetGeometryRef()
for point in points:
if point.Within(geo):
point.InShape = True
series = pd.Series(map(lambda x: x.InShape, points), index=points_series.index)
retdf = | pd.DataFrame() | pandas.DataFrame |
# Test for evaluering af hvert forecast og sammenligning mellem forecast
import pandas as pd
import numpy as np
from numpy.random import rand
from numpy import ix_
from itertools import product
import chart_studio.plotly as py
import chart_studio
import plotly.graph_objs as go
import statsmodels.api as sm
chart_studio.tools.set_credentials_file(username='Emborg', api_key='<KEY>')
np.random.seed(1337)
# Predictions from each forecast
data = pd.read_csv('Data/All_Merged.csv') # , parse_dates=[0], date_parser=dateparse
data.isna().sum()
data.fillna(0, inplace=True)
data = data.set_index('date')
data = data.loc[~data.index.duplicated(keep='first')]
data = data.drop('2018-10-29')
# Forecasts
LSTM = pd.read_csv('Data/LSTM_Pred.csv', index_col=0)
LSTM = LSTM.loc[~LSTM.index.duplicated(keep='first')]
LSTM = LSTM.iloc[:-11, :]
LSTM = LSTM.drop('2018-10-29')
LSTM_NS = pd.read_csv('Data/LSTM_Pred_NoSent.csv', index_col=0)
LSTM_NS = LSTM_NS.loc[~LSTM_NS.index.duplicated(keep='first')]
LSTM_NS = LSTM_NS.iloc[:-11, :]
LSTM_NS = LSTM_NS.drop('2018-10-29')
ARIMA = pd.read_csv('Data/ARIMA_Pred.csv', index_col=0)
ARIMA = ARIMA.iloc[:-11, :]
ARIMA_NS = pd.read_csv('Data/ARIMA_Pred_NoSent.csv', index_col=0)
ARIMA_NS = ARIMA_NS.iloc[:-11, :]
XGB = pd.read_csv('Data/XGB_Pred.csv', index_col=0)
XGB = XGB.loc[~XGB.index.duplicated(keep='first')]
XGB = XGB.iloc[1:, :]
XGB = XGB.drop('2018-10-29')
XGB_NS = pd.read_csv('Data/XGB_Pred_nosenti.csv', index_col=0)
XGB_NS = XGB_NS.loc[~XGB_NS.index.duplicated(keep='first')]
XGB_NS = XGB_NS.iloc[1:, :]
XGB_NS = XGB_NS.drop('2018-10-29')
AR1 = pd.read_csv('Data/AR1.csv', index_col=0)
AR1 = AR1.iloc[:-11, :]
VAR = pd.read_csv('Data/VAR_pred.csv', index_col=0)
VAR = VAR.loc[~VAR.index.duplicated(keep='first')]
VAR = VAR[VAR.index.isin(LSTM.index)]['price']
VAR_NS = pd.read_csv('Data/VAR_pred_nosenti.csv', index_col=0)
VAR_NS = VAR_NS.loc[~VAR_NS.index.duplicated(keep='first')]
VAR_NS = VAR_NS[VAR_NS.index.isin(LSTM.index)]['price']
# Price for the forecasting period
price = data[data.index.isin(LSTM.index)]
price = price[['price']]
ARIMA.index = price.index
ARIMA_NS.index = price.index
XGB.index = price.index
XGB_NS.index = price.index
colors = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
# Combined Forecast DataFrame
fc = pd.DataFrame()
fc = price
fc = fc.merge(AR1[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.merge(ARIMA[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.merge(ARIMA_NS[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.merge(VAR, how='left', left_index=True, right_index=True)
fc = fc.merge(VAR_NS, how='left', left_index=True, right_index=True)
fc = fc.merge(XGB, how='left', left_index=True, right_index=True)
fc = fc.merge(XGB_NS, how='left', left_index=True, right_index=True)
fc = fc.merge(LSTM[['LSTM']], how='left', left_index=True, right_index=True)
fc = fc.merge(LSTM_NS[['LSTM']], how='left', left_index=True, right_index=True)
# fc = fc.merge(XGB_NS, how='left', left_index=True, right_index=True)
fc.columns = ['Price', 'AR1', 'ARIMAX', 'ARIMAX_NS', 'VAR', 'VAR_NS', 'XGB', 'XGB_NS', 'LSTM', 'LSTM_NS']
# fc.to_csv(r'Data\All_Forecasts.csv')
fig = go.Figure()
n = 0
for key in fc.columns:
fig.add_trace(go.Scatter(x=fc.index,
y=fc[key],
mode='lines',
name=key,
line=dict(color=colors[n % len(colors)])))
n = n + 1
fig.update_layout(yaxis=dict(title='USD'),
xaxis=dict(title='date'))
py.plot(fig, filename='price_all_fc')
# Actual price
actual = fc[['Price']]
fc = fc.iloc[:, 1:]
# Error metrics
def RMSE(fc, actual):
actual = actual.values
fc = fc.values
losses = fc - actual
RMSE = np.sqrt(np.mean(losses ** 2, axis=0))
return (RMSE)
def MAE(fc, actual):
actual = actual.values
fc = fc.values
losses = fc - actual
MAE = np.mean(np.abs(losses), axis=0)
return (MAE)
def residual_bar_plot(fc_1, fc_2, actuals, name1, name2):
df = pd.DataFrame(fc_1.values - actuals.values)
df[name2] = fc_2.values - actuals.values
df.columns = [name1,name2]
df.hist()
print(name1)
print(round(sm.tsa.stattools.adfuller(df[name1])[1],4))
print(round(sm.stats.stattools.jarque_bera(df[name1])[1],4))
print(name2)
print(round(sm.tsa.stattools.adfuller(df[name2])[1],4))
print(round(sm.stats.stattools.jarque_bera(df[name2])[1],4))
residual_bar_plot(fc[['ARIMAX']], fc[['ARIMAX_NS']], actual, 'ARIMA', 'ARIMA_NS')
residual_bar_plot(fc[['LSTM']], fc[['LSTM_NS']], actual, 'LSTM', 'LSTM_NS')
residual_bar_plot(fc[['VAR']], fc[['VAR_NS']], actual, 'VAR', 'VAR_NS')
residual_bar_plot(fc[['XGB']], fc[['XGB_NS']], actual, 'XGB', 'XGB_NS')
name1 = 'ARIMAX'
fc_1 = fc[['ARIMAX']]
# split_date = '2019-05-01'
# fc = fc.loc[fc.index >= split_date]
# actual = actual.loc[actual.index >= split_date]
rmse = RMSE(fc, actual)
mae = MAE(fc, actual)
print(pd.DataFrame(rmse).to_latex())
# <NAME> testing
dm_result = list()
done_models = list()
models_list = fc.columns
for model1 in models_list:
for model2 in models_list:
if model1 != model2:
dm_result.append(dm_test(fc[[model1]], fc[[model2]], actual))
dm_result = pd.DataFrame(dm_result)
# dm_result['t-stat'] = np.abs(dm_result['t-stat'])
dm_result = dm_result.loc[~np.abs(dm_result['t-stat']).duplicated(keep='first')]
dm_result['t-stat'] = round(dm_result['t-stat'],2)
dm_result['p-value'] = round(dm_result['p-value'],4)
print(dm_result.to_latex())
# <NAME>
cw1 = cw_test(ARIMA, ARIMA_NS, actual)
print(cw1)
cw2 = cw_test(LSTM[['LSTM']], LSTM_NS[['LSTM']], actual)
print(cw2)
cw3 = cw_test(XGB[['est']], XGB_NS[['est']], actual)
print(cw3)
cspe_plot(fc[['XGB_NS']], fc[['XGB']], actual)
# Model Confidence Set
# https://michael-gong.com/blogs/model-confidence-set/?fbclid=IwAR38oo302TSJ4BFqTpluh5aeivkyM6A1cc0tnZ_JUX08PNwRzQkIi4WPlps
# Wrap data and compute the Mean Absolute Error
MCS_data = pd.DataFrame(np.c_[fc.AR1, fc.ARIMAX, fc.ARIMAX_NS, fc.LSTM, fc.LSTM_NS, fc.VAR, fc.VAR_NS, fc.XGB, fc.XGB_NS, actual.Price],
columns=['AR1','ARIMAX', 'ARIMAX_NS', 'LSTM', 'LSTM_NS','VAR','VAR_NS','XGB','XGB_NS', 'Actual'])
losses = pd.DataFrame()
for model in MCS_data.columns: #['ARIMA', 'ARIMA_NS', 'LSTM', 'LSTM_NS']:
losses[model] = np.abs(MCS_data[model] - MCS_data['Actual'])
losses=losses.iloc[:,:-1]
mcs = ModelConfidenceSet(losses, 0.1, 3, 1000).run()
mcs.included
mcs.pvalues
# Forecast combinations
fc.columns[1:]
l1 = fc.columns[1:].values
l2 = ['ARIMAX', 'VAR', 'XGB','LSTM']
l3 = ['ARIMAX_NS', 'VAR_NS', 'XGB_NS','LSTM_NS']
comb_results = pd.DataFrame([[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]])
comb_results.index = ['All','S','NS']
comb_results.columns = ['Equal', 'MSE', 'Rank', 'Time(1)','Time(7)']
l_list = [l1,l2,l3]
i = 0
for l in l_list:
print(l)
pred = fc[l]
# Combinations
eq = fc_comb(actual=actual, fc=pred, weights="equal")
#bgw = fc_comb(actual=actual, fc=fc[fc.columns[1:]], weights="BGW")
mse = fc_comb(actual=actual, fc=pred, weights="MSE")
rank = fc_comb(actual=actual, fc=pred, weights="rank")
time = fc_comb(actual=actual, fc=pred, weights="time")
time7 = fc_comb(actual=actual, fc=pred, weights="time", window=7)
time14 = fc_comb(actual=actual, fc=pred, weights="time", window=14)
time30 = fc_comb(actual=actual, fc=pred, weights="time", window=30)
time60 = fc_comb(actual=actual, fc=pred, weights="time", window=60)
comb_results.iloc[i,0] = MAE(eq, actual)
comb_results.iloc[i,1] = MAE(mse, actual)
comb_results.iloc[i,2] = MAE(rank, actual)
comb_results.iloc[i,3] = MAE(time, actual)
comb_results.iloc[i,4] = MAE(time7, actual)
i = i + 1
print(round(comb_results,2).to_latex())
rank = pd.DataFrame(rank)
rank.columns = ['Rank']
eq = pd.DataFrame(eq)
eq.columns = ['Eq']
dm_test(rank[['Rank']], eq[['Eq']], actual)
# Fun
# ctions
# <NAME> test function
def dm_test(fc, fc_nested, actual):
fc_name = fc.columns[0]
fc_nested_name = fc_nested.columns[0]
import statsmodels.formula.api as smf
from sklearn.metrics import mean_squared_error
fc = fc.values
fc_nested = fc_nested.values
actual = price.values
e_fc = actual - fc
e_nested = actual - fc_nested
f_dm = e_nested ** 2 - e_fc ** 2
f_dm = | pd.DataFrame(f_dm, columns=['f_dm']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# file_name : process_csv.py
# time : 3/08/2019 14:10
# author : ruiyang
# email : <EMAIL>
import sys
import numpy as np
import pandas as pd
from Bio.PDB.PDBParser import PDBParser
def split_csv(path):
"""
:function: 将csv原文件分列,返回标准由DataFrame组成的csv,并加入主键列key.
:param path: 描述突变信息的原始csv文件的绝对路径.
:return: 分列后的df
"""
file = open(path, 'r')
data = | pd.read_csv(file) | pandas.read_csv |
import pandas as pd
import numpy as np
#***********************From dict of Series or dicts********************
#dictionary takes key:value
dict = {"Name":pd.Series(["Nahid", "Rafi", "Meem"]),
"Age":pd.Series([21,22,21]),
"Weight":pd.Series([48,75,76]),
"Height":pd.Series([5.3, 5.8, 5.6])}
df = pd.DataFrame(dict)
print(df,"\n")
'''
Output:
0 Nahid 21 48 5.3
1 Rafi 22 75 5.8
2 Meem 21 76 5.6
'''
df = pd.DataFrame(dict, index=[1]) #1 Rafi 22 75 5.8
print(df,"\n") #1 Rafi 22 75 5.8
'''
output:
Name Age Weight Height
1 Rafi 22 75 5.8
'''
#with use index
dict1 = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(dict1)
print(df,"\n")
'''
output:
one two
a 1.0 1.0
b 2.0 2.0
c 3.0 3.0
d NaN 4.0
'''
print(pd.DataFrame(dict1, index=['b', 'a', 'd']),"\n")
'''
output:
one two
b 2.0 2.0
a 1.0 1.0
d NaN 4.0
'''
#The row and column labels can be accessed respectively
#by accessing the index and columns attributes:
#Note When a particular set of columns is passed along with a dict of data,
#the passed columns override the keys in the dict.
print(pd.DataFrame(dict1, index=['d','b','a'], columns=['two','three']),"\n")
'''
output:
two three
d 4.0 NaN
b 2.0 NaN
a 1.0 NaN
'''
#***********getting index name and column name of a dataFrame obj**********
print(df.columns) #Index(['one', 'two'], dtype='object')
print(df.index,"\n") #Index(['a', 'b', 'c', 'd'], dtype='object')
#we can passed a list
dict = {"Name":["Nahid", "Rafi", "Meem"],
"Age":[21,22,21],
"Weight":[48,75,76],
"Height":[5.3, 5.8, 5.6]}
df = | pd.DataFrame(dict) | pandas.DataFrame |
# Global Summary
# Infections / Deaths
# Administered / Fully Vaccinated (%)
# Daily Changes / Daily Changes Per 100K (%)
# Infections, Deaths, Administered, Fully Vaccinated
# Country + State
# Line Graphs / Heatmap
# pylint: disable=unused-variable
# pylint: disable=anomalous-backslash-in-string
import generic
import pandas as pd
import numpy as np
import math
import altair as alt
import pydeck as pdk
import streamlit as st
read_columns = {'infections':{7:['confirmed','Cumulative (Up-To-Date)'],9:['i_confirmed','Changes (Daily)'],11:['Tot_confirmed','Cumulative (Up-To-Date)'],12:['iTot_confirmed','Changes (Daily)'],15:['deaths','Cumulative (Up-To-Date)'],17:['i_deaths','Changes (Daily)'],19:['Tot_deaths','Cumulative (Up-To-Date)'],20:['iTot_deaths','Changes (Daily)']},'vaccines':{7:['r_admin','Cumulative (Up-To-Date)'],9:['ri_admin','Changes (Daily)'],11:['rTot_admin','Cumulative (Up-To-Date)'],12:['riTot_admin','Changes (Daily)'],15:['r_full','Cumulative (Up-To-Date)'],17:['ri_full','Changes (Daily)'],19:['rTot_full','Cumulative (Up-To-Date)'],20:['riTot_full','Changes (Daily)']}}
# Module to display sidebar
def display_sidebar(data):
# adm0_a3, Country/Region
sel_region,sel_country = None, None
# Sidebar sections to provide choices (Region, State)
# if not check:
# # Choose a startdate to display
# st.sidebar.header('Choose a startdate below')
# st.sidebar.markdown('Choose a startdate (e.g., 2020-08-15)')
# startdate = st.sidebar.slider('Startdate',data['Date'].unique()[0],data['Date'].unique()[-1])
#
# else:
st.sidebar.header('Choose options below')
# 0) Need to reset data
st.sidebar.markdown('Reset dataset?')
if st.sidebar.button(label='Clear cache'):
st.caching.clear_cache()
st.experimental_rerun()
# 1) Choose a Region/Country to display
# st.sidebar.subheader('Choose Region/Country below')
# st.sidebar.subheader('*Note*: Only multi-states countries are currently supported!')
# Set candiates of region (Country/Region)
st.sidebar.markdown('Choose a Country/Region (e.g., Canada)')
country = sorted(set(data['infections'].loc[data['infections']['len_states']>1,'Country/Region']) & set(data['vaccines']['Country/Region']))
# country = sorted(data.loc[data['len_states']>1,'Country/Region'].unique())
country = ['Worldwide'] + list(country[:])
sel_country = st.sidebar.selectbox('Country/Region',country)
# Candiates of countries (adm0_a3) are automatically set
if sel_country and sel_country != 'Worldwide':
sel_region = data['infections'].loc[(data['infections']['len_states']>1) & (data['infections']['Country/Region'].str.contains(sel_country)),'adm0_a3'].unique()[0]
# 2) Choose a statistics
st.sidebar.markdown('Choose a Statistics (e.g., Changes (Daily))')
stat_text = sorted(set(val[1] for val in read_columns['infections'].values()))
stat_text = [None] + stat_text[:]
chosen_stat = {'infections':[],'vaccines':[]}
chosen_stat_text = st.sidebar.selectbox('Statistics',stat_text)
if chosen_stat_text:
for key in chosen_stat:
chosen_stat[key] = sorted([val[0] for val in read_columns[key].values() if val[1] in chosen_stat_text])
# iTot
if not sel_region:
chosen_stat[key] = [val for val in chosen_stat[key] if 'Tot' in val]
elif key == 'infections':
chosen_stat[key] = [val for val in chosen_stat[key] if 'Tot' not in val]
# chosen_stat = {}
# if chosen_stat_text:
# for key in chosen_stat_key:
# chosen_stat[key] = chosen_stat_text
# 3) Draw map
sel_map = None
if chosen_stat:
st.sidebar.markdown('Draw a map?')
sel_map = st.sidebar.checkbox('Definitely')
return sel_region, sel_country, chosen_stat, sel_map
# Print latest global status
def show_stats(data,sel_region,sel_country,chosen_stat,candidates,map=None):
date = max([max(data[key]['Date']) for key in data.keys()])
st.header('Summary statistics')
if not sel_region:
st.subheader('Global status as of ' + date.strftime('%m/%d/%y'))
infections = f"\n* Cumulative infections: `{data['infections'][data['infections']['Date']==date].groupby(['adm0_a3','Country/Region'])['Tot_confirmed'].max().sum():,}`"
infections += f"\n* Cumulative casualties: `{data['infections'][data['infections']['Date']==date].groupby(['adm0_a3','Country/Region'])['Tot_deaths'].max().sum():,}`"
infections += f"\n* Daily infections changes: `{data['infections'][data['infections']['Date']==date].groupby(['adm0_a3','Country/Region'])['iTot_confirmed'].max().sum():,}`"
infections += f"\n* Daily casualties changes: `{data['infections'][data['infections']['Date']==date].groupby(['adm0_a3','Country/Region'])['iTot_deaths'].max().sum():,}`"
st.write('***Infections*** '+infections)
vaccinations = f"\n* Cumulative administed doses: `{data['vaccines'][data['vaccines']['Date']==date].groupby(['adm0_a3','Country/Region'])['Tot_admin'].max().sum():,}`"
vaccinations += f"\n* Cumulative fully vaccinations: `{data['vaccines'][data['vaccines']['Date']==date].groupby(['adm0_a3','Country/Region'])['Tot_full'].max().sum():,}`"
vaccinations += f"\n* Daily administed doses changes: `{data['vaccines'][data['vaccines']['Date']==date].groupby(['adm0_a3','Country/Region'])['iTot_admin'].max().sum():,}`"
vaccinations += f"\n* Daily fully vaccinations changes: `{data['vaccines'][data['vaccines']['Date']==date].groupby(['adm0_a3','Country/Region'])['iTot_full'].max().sum():,}`"
st.write('***Vaccinations*** '+vaccinations)
else:
st.subheader(sel_country + ' status as of ' + date.strftime('%m/%d/%y'))
infections = f"\n* Cumulative infections: `{data['infections'][(data['infections']['Date']==date) & (data['infections']['adm0_a3']==sel_region) & (data['infections']['Country/Region']==sel_country)].groupby(['adm0_a3','Country/Region'])['Tot_confirmed'].max().sum():,}`"
infections += f"\n* Cumulative casualties: `{data['infections'][(data['infections']['Date']==date) & (data['infections']['adm0_a3']==sel_region) & (data['infections']['Country/Region']==sel_country)].groupby(['adm0_a3','Country/Region'])['Tot_deaths'].max().sum():,}`"
infections += f"\n* Daily infections changes: `{data['infections'][(data['infections']['Date']==date) & (data['infections']['adm0_a3']==sel_region) & (data['infections']['Country/Region']==sel_country)].groupby(['adm0_a3','Country/Region'])['iTot_confirmed'].max().sum():,}`"
infections += f"\n* Daily casualties changes: `{data['infections'][(data['infections']['Date']==date) & (data['infections']['adm0_a3']==sel_region) & (data['infections']['Country/Region']==sel_country)].groupby(['adm0_a3','Country/Region'])['iTot_deaths'].max().sum():,}`"
st.write('***Infections*** '+infections)
vaccinations = f"\n* Cumulative administed doses: `{data['vaccines'][(data['vaccines']['Date']==date) & (data['vaccines']['adm0_a3']==sel_region) & (data['vaccines']['Country/Region']==sel_country)].groupby(['adm0_a3','Country/Region'])['Tot_admin'].max().sum():,}`"
vaccinations += f"\n* Cumulative fully vaccinations: `{data['vaccines'][(data['vaccines']['Date']==date) & (data['vaccines']['adm0_a3']==sel_region) & (data['vaccines']['Country/Region']==sel_country)].groupby(['adm0_a3','Country/Region'])['Tot_full'].max().sum():,}`"
vaccinations += f"\n* Daily administed doses changes: `{data['vaccines'][(data['vaccines']['Date']==date) & (data['vaccines']['adm0_a3']==sel_region) & (data['vaccines']['Country/Region']==sel_country)].groupby(['adm0_a3','Country/Region'])['iTot_admin'].max().sum():,}`"
vaccinations += f"\n* Daily fully vaccinations changes: `{data['vaccines'][(data['vaccines']['Date']==date) & (data['vaccines']['adm0_a3']==sel_region) & (data['vaccines']['Country/Region']==sel_country)].groupby(['adm0_a3','Country/Region'])['iTot_full'].max().sum():,}`"
st.write('***Vaccinations*** '+vaccinations)
show_chart(data,chosen_stat,candidates,sel_region)
if map and chosen_stat:
show_map(data,chosen_stat,sel_region)
# Load mapdata for selected region
def show_map(data,stat,region=None,date=None):
st.header('Color maps')
if not date:
date = max([max(data[key]['Date']) for key in data.keys()])
# Custom color scale (colorbrewer2.org -> Sequential Single-Hue)
breaks = [.0, .2, .4, .6, .8, 1]
color_range = [
# 6-class Blues
[255,255,255],
[198,219,239],
[158,202,225],
[107,174,214],
[49,130,189],
[8,81,156],
# # 6-class Purples (For reference)
# [242,240,247],
# [218,218,235],
# [188,189,220],
# [158,154,200],
# [117,107,177],
# [84,39,143],
]
def color_scale(val):
for i, b in enumerate(breaks):
if val <= b:
return color_range[i]
return color_range[i]
def elevation_scale(val,scale):
for i, b in enumerate(breaks):
if val <= b:
return i*scale
def set_nan(val):
if np.isnan(val):
return -1
else:
return val
stat_text = {'infections':['Infections','Casualties'],'vaccines':['Administered','Fully Vaccinated']}
for key, stat_key in stat.items():
if key == 'infections':
st.subheader('Infections')
elif key == 'vaccines':
st.subheader('Vaccines')
st.write(f'* Color depths: {stat_text[key][0]} \n* Elevation: {stat_text[key][1]}')
stat_tot = sum(['Tot' in stat_keys for stat_keys in stat_key])
# Load in the JSON data
if region and region != 'Worldwide' and stat_tot == 0:
src_geo = 'data/geojson/'+region+'.json'
else:
src_geo = 'data/geojson/countries.json'
json_geo = pd.read_json(src_geo)
df = pd.DataFrame()
# Parse the geometry out in Pandas
df["coordinates"] = json_geo["features"].apply(lambda row: row["geometry"]["coordinates"])
df["name"] = json_geo["features"].apply(lambda row: row["properties"]["name"])
df["adm0_a3"] = json_geo["features"].apply(lambda row: row["properties"]["adm0_a3"])
df["admin"] = json_geo["features"].apply(lambda row: row["properties"]["admin"])
df['param'] = f"{str.title(key)} {sorted(set(val[1] for val in read_columns[key].values() if val[0] in stat_key))[0]}"
df['stat_text0'] = stat_text[key][0]
df['stat_text1'] = stat_text[key][1]
stat_key = [stat_keys[1:] if stat_keys[0]=='r' else stat_keys for stat_keys in stat_key]
filtered_data = data[key].loc[data[key]['Date']==date,['adm0_a3','Province/State','lat','lon']+stat_key]
if not region or region == 'Worldwide' or stat_tot>0:
filtered_data = filtered_data.groupby(['adm0_a3'])[['lat','lon']+stat_key].mean()
df = pd.merge(df,filtered_data,how='inner',left_on=['adm0_a3'],right_on=['adm0_a3'])
df['name'] = 'N/A'
if region and region != 'Worldwide':
zoom = 3
else:
zoom = 1
else:
filtered_data = filtered_data.loc[filtered_data['adm0_a3']==region,['adm0_a3','Province/State','lat','lon']+stat_key]
df = pd.merge(df,filtered_data,how='inner',left_on=['name','adm0_a3'],right_on=['Province/State','adm0_a3'])
zoom = 3
# Moved to generic.py
# df.loc[df[stat_keys[0]]<0,stat_keys[0]] = 0
# df.loc[df[stat_keys[1]]<0,stat_keys[1]] = 0
# df[stat_keys[0]] = df[stat_keys[0]].apply(set_nan)
# df[stat_keys[1]] = df[stat_keys[1]].apply(set_nan)
df['fill_color'] = (df[stat_key[0]]/df[stat_key[0]].max()).replace(np.nan,0).apply(color_scale)
df['elevation'] = (df[stat_key[1]]/df[stat_key[1]].max()).replace(np.nan,0).apply(lambda x:elevation_scale(x,1e4))
df.rename(columns={stat_key[0]:'stat_0',stat_key[1]:'stat_1'},inplace=True)
if not region or region == 'Worldwide':
lat = df.loc[(df['lat'] != 0) & (df['lon'] != 0),'lat'].mean(skipna=True)
lon = df.loc[(df['lat'] != 0) & (df['lon'] != 0),'lon'].mean(skipna=True)
else:
lat = df.loc[(df['adm0_a3']==region) & (df['lat'] != 0) & (df['lon'] != 0),'lat'].mean(skipna=True)
lon = df.loc[(df['adm0_a3']==region) & (df['lat'] != 0) & (df['lon'] != 0),'lon'].mean(skipna=True)
view_state = pdk.ViewState(
latitude = lat, #df.loc[(df['lat'] != 0) & (df['lon'] != 0),'lat'].mean(skipna=True),
longitude = lon, #df.loc[(df['lat'] != 0) & (df['lon'] != 0),'lon'].mean(skipna=True),
# bearings=15,
# pitch=45,
zoom=zoom)
polygon_layer = pdk.Layer(
"PolygonLayer",
df,
id="geojson",
opacity=0.2,
stroked=False,
get_polygon="coordinates",
filled=True,
get_elevation='elevation',
# elevation_scale=1e5,
# elevation_range=[0,100],
extruded=True,
# wireframe=True,
get_fill_color= 'fill_color',
get_line_color=[255, 255, 255],
auto_highlight=True,
pickable=True,
)
tooltip = {"html": "<b>Country/Region:</b> {admin} <br /><b>Province/State:</b> {name} <br /><b>Type:</b> {param}<br /><b>{stat_text0}:</b> {stat_0} <br /><b>{stat_text1}:</b> {stat_1}"}
r = pdk.Deck(
layers=[polygon_layer],
initial_view_state=view_state,
map_style='light',
tooltip=tooltip,
)
# return r
st.pydeck_chart(r, use_container_width=True)
def show_chart(data,stat,candidates,region,date=None):
if not date:
date = min([min(data[key]['Date']) for key in data.keys()])
dataset = []
# Set quantiles for x-axis ('Date')
# dates = data['Date'].map(lambda x:x.strftime('%m/%d/%y')).unique().tolist()
dates = data['infections']['Date'].map(lambda x:x.strftime('%m/%d/%y')).unique().tolist()
presets = [0,.25,.5,.75,1]
quantiles = np.quantile(np.arange(0,len(dates)),presets).tolist()
quantiles = [int(np.floor(q)) for q in quantiles]
date_visible = [dates[idx] for idx in quantiles]
stat_len = min([len(val) for val in stat.values()])
if stat_len > 0:
st.header('Regional analyses')
for key, stat_key in stat.items():
dataset.append(data[key].loc[(data[key]['Date']>=date),['Date','adm0_a3','Country/Region','Province/State']+stat_key])
for idx, key in enumerate(stat):
if idx == 0:
st.subheader('Infections developments')
else:
st.subheader('Vaccines developments *(Per 100K population)*')
stat_text = {'infections':['Infections','Casualties'],'vaccines':['Administered','Fully Vaccinated']}
for stat_idx, stat_key in enumerate(stat[key]):
if region and 'Tot' not in stat_key:
filtered_data = pd.merge(dataset[idx][['Date','Province/State',stat_key]],candidates[['index',stat_key]],how='inner',left_on='Province/State',right_on=stat_key)
filtered_data.drop([stat_key+'_y'],axis=1,inplace=True)
filtered_data.rename(columns={stat_key+'_x':stat_key,'index':'order'},inplace=True)
filtered_data['Date'] = filtered_data['Date'].map(lambda x:x.strftime('%m/%d/%y'))
target_cat = 'Province/State'
else:
if stat_key in candidates.columns.tolist():
filtered_data = | pd.merge(dataset[idx][['Date','adm0_a3','Country/Region',stat_key]],candidates[['index',stat_key]],how='inner',left_on='adm0_a3',right_on=stat_key) | pandas.merge |
import os
import numpy as np
import pandas as pd
import pytest
from featuretools import list_primitives
from featuretools.primitives import (
Age,
Count,
Day,
GreaterThan,
Haversine,
Last,
Max,
Mean,
Min,
Mode,
Month,
NumCharacters,
NumUnique,
NumWords,
PercentTrue,
Skew,
Std,
Sum,
Weekday,
Year,
get_aggregation_primitives,
get_default_aggregation_primitives,
get_default_transform_primitives,
get_transform_primitives
)
from featuretools.primitives.base import PrimitiveBase
from featuretools.primitives.utils import (
_apply_roll_with_offset_gap,
_get_descriptions,
_get_rolled_series_without_gap,
_get_unique_input_types,
_roll_series_with_gap,
list_primitive_files,
load_primitive_from_file
)
from featuretools.tests.primitive_tests.utils import get_number_from_offset
from featuretools.utils.gen_utils import Library
def test_list_primitives_order():
df = list_primitives()
all_primitives = get_transform_primitives()
all_primitives.update(get_aggregation_primitives())
for name, primitive in all_primitives.items():
assert name in df['name'].values
row = df.loc[df['name'] == name].iloc[0]
actual_desc = _get_descriptions([primitive])[0]
if actual_desc:
assert actual_desc == row['description']
assert row['dask_compatible'] == (Library.DASK in primitive.compatibility)
assert row['valid_inputs'] == ', '.join(_get_unique_input_types(primitive.input_types))
assert row['return_type'] == getattr(primitive.return_type, '__name__', None)
types = df['type'].values
assert 'aggregation' in types
assert 'transform' in types
def test_valid_input_types():
actual = _get_unique_input_types(Haversine.input_types)
assert actual == {'<ColumnSchema (Logical Type = LatLong)>'}
actual = _get_unique_input_types(GreaterThan.input_types)
assert actual == {'<ColumnSchema (Logical Type = Datetime)>',
"<ColumnSchema (Semantic Tags = ['numeric'])>",
'<ColumnSchema (Logical Type = Ordinal)>'}
actual = _get_unique_input_types(Sum.input_types)
assert actual == {"<ColumnSchema (Semantic Tags = ['numeric'])>"}
def test_descriptions():
primitives = {NumCharacters: 'Calculates the number of characters in a string.',
Day: 'Determines the day of the month from a datetime.',
Last: 'Determines the last value in a list.',
GreaterThan: 'Determines if values in one list are greater than another list.'}
assert _get_descriptions(list(primitives.keys())) == list(primitives.values())
def test_get_default_aggregation_primitives():
primitives = get_default_aggregation_primitives()
expected_primitives = [Sum, Std, Max, Skew, Min, Mean, Count, PercentTrue,
NumUnique, Mode]
assert set(primitives) == set(expected_primitives)
def test_get_default_transform_primitives():
primitives = get_default_transform_primitives()
expected_primitives = [Age, Day, Year, Month, Weekday, Haversine, NumWords,
NumCharacters]
assert set(primitives) == set(expected_primitives)
@pytest.fixture
def this_dir():
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def primitives_to_install_dir(this_dir):
return os.path.join(this_dir, "primitives_to_install")
@pytest.fixture
def bad_primitives_files_dir(this_dir):
return os.path.join(this_dir, "bad_primitive_files")
def test_list_primitive_files(primitives_to_install_dir):
files = list_primitive_files(primitives_to_install_dir)
custom_max_file = os.path.join(primitives_to_install_dir, "custom_max.py")
custom_mean_file = os.path.join(primitives_to_install_dir, "custom_mean.py")
custom_sum_file = os.path.join(primitives_to_install_dir, "custom_sum.py")
assert {custom_max_file, custom_mean_file, custom_sum_file}.issubset(set(files))
def test_load_primitive_from_file(primitives_to_install_dir):
primitve_file = os.path.join(primitives_to_install_dir, "custom_max.py")
primitive_name, primitive_obj = load_primitive_from_file(primitve_file)
assert issubclass(primitive_obj, PrimitiveBase)
def test_errors_more_than_one_primitive_in_file(bad_primitives_files_dir):
primitive_file = os.path.join(bad_primitives_files_dir, "multiple_primitives.py")
error_text = "More than one primitive defined in file {}".format(primitive_file)
with pytest.raises(RuntimeError) as excinfo:
load_primitive_from_file(primitive_file)
assert str(excinfo.value) == error_text
def test_errors_no_primitive_in_file(bad_primitives_files_dir):
primitive_file = os.path.join(bad_primitives_files_dir, "no_primitives.py")
error_text = "No primitive defined in file {}".format(primitive_file)
with pytest.raises(RuntimeError) as excinfo:
load_primitive_from_file(primitive_file)
assert str(excinfo.value) == error_text
def test_get_rolled_series_without_gap(rolling_series_pd):
# Data is daily, so number of rows should be number of days not included in the gap
assert len(_get_rolled_series_without_gap(rolling_series_pd, "11D")) == 9
assert len(_get_rolled_series_without_gap(rolling_series_pd, "0D")) == 20
assert len(_get_rolled_series_without_gap(rolling_series_pd, "48H")) == 18
assert len(_get_rolled_series_without_gap(rolling_series_pd, "4H")) == 19
def test_get_rolled_series_without_gap_not_uniform(rolling_series_pd):
non_uniform_series = rolling_series_pd.iloc[[0, 2, 5, 6, 8, 9]]
assert len(_get_rolled_series_without_gap(non_uniform_series, "10D")) == 0
assert len(_get_rolled_series_without_gap(non_uniform_series, "0D")) == 6
assert len(_get_rolled_series_without_gap(non_uniform_series, "48H")) == 4
assert len(_get_rolled_series_without_gap(non_uniform_series, "4H")) == 5
assert len(_get_rolled_series_without_gap(non_uniform_series, "4D")) == 3
assert len(_get_rolled_series_without_gap(non_uniform_series, "4D2H")) == 2
def test_get_rolled_series_without_gap_empty_series(rolling_series_pd):
empty_series = pd.Series()
assert len(_get_rolled_series_without_gap(empty_series, "1D")) == 0
assert len(_get_rolled_series_without_gap(empty_series, "0D")) == 0
def test_get_rolled_series_without_gap_large_bound(rolling_series_pd):
assert len(_get_rolled_series_without_gap(rolling_series_pd, "100D")) == 0
assert len(_get_rolled_series_without_gap(rolling_series_pd.iloc[[0, 2, 5, 6, 8, 9]], "20D")) == 0
@pytest.mark.parametrize(
"window_length, gap",
[
(3, 2),
(3, 4), # gap larger than window
(2, 0), # gap explicitly set to 0
('3d', '2d'), # using offset aliases
('3d', '4d'),
('4d', '0d'),
],
)
def test_roll_series_with_gap(window_length, gap, rolling_series_pd):
rolling_max = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap).max()
rolling_min = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap).min()
assert len(rolling_max) == len(rolling_series_pd)
assert len(rolling_min) == len(rolling_series_pd)
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
for i in range(len(rolling_series_pd)):
start_idx = i - gap_num - window_length_num + 1
if isinstance(gap, str):
# No gap functionality is happening, so gap isn't taken account in the end index
# it's like the gap is 0; it includes the row itself
end_idx = i
else:
end_idx = i - gap_num
# If start and end are negative, they're entirely before
if start_idx < 0 and end_idx < 0:
assert pd.isnull(rolling_max.iloc[i])
assert pd.isnull(rolling_min.iloc[i])
continue
if start_idx < 0:
start_idx = 0
# Because the row values are a range from 0 to 20, the rolling min will be the start index
# and the rolling max will be the end idx
assert rolling_min.iloc[i] == start_idx
assert rolling_max.iloc[i] == end_idx
@pytest.mark.parametrize("window_length", [3, "3d"])
def test_roll_series_with_no_gap(window_length, rolling_series_pd):
actual_rolling = _roll_series_with_gap(rolling_series_pd, window_length).mean()
expected_rolling = rolling_series_pd.rolling(window_length, min_periods=1).mean()
pd.testing.assert_series_equal(actual_rolling, expected_rolling)
@pytest.mark.parametrize(
"window_length, gap",
[
(6, 2),
(6, 0), # No gap - changes early values
('6d', '0d'), # Uses offset aliases
('6d', '2d')
]
)
def test_roll_series_with_gap_early_values(window_length, gap, rolling_series_pd):
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
# Default min periods is 1 - will include all
default_partial_values = _roll_series_with_gap(rolling_series_pd,
window_length,
gap=gap).count()
num_empty_aggregates = len(default_partial_values.loc[default_partial_values == 0])
num_partial_aggregates = len((default_partial_values
.loc[default_partial_values != 0])
.loc[default_partial_values < window_length_num])
assert num_partial_aggregates == window_length_num - 1
if isinstance(gap, str):
# gap isn't handled, so we'll always at least include the row itself
assert num_empty_aggregates == 0
else:
assert num_empty_aggregates == gap_num
# Make min periods the size of the window
no_partial_values = _roll_series_with_gap(rolling_series_pd,
window_length,
gap=gap,
min_periods=window_length_num).count()
num_null_aggregates = len(no_partial_values.loc[pd.isna(no_partial_values)])
num_partial_aggregates = len(no_partial_values.loc[no_partial_values < window_length_num])
# because we shift, gap is included as nan values in the series.
# Count treats nans in a window as values that don't get counted,
# so the gap rows get included in the count for whether a window has "min periods".
# This is different than max, for example, which does not count nans in a window as values towards "min periods"
assert num_null_aggregates == window_length_num - 1
if isinstance(gap, str):
# gap isn't handled, so we'll never have any partial aggregates
assert num_partial_aggregates == 0
else:
assert num_partial_aggregates == gap_num
def test_roll_series_with_gap_nullable_types(rolling_series_pd):
window_length = 3
gap = 2
# Because we're inserting nans, confirm that nullability of the dtype doesn't have an impact on the results
nullable_series = rolling_series_pd.astype('Int64')
non_nullable_series = rolling_series_pd.astype('int64')
nullable_rolling_max = _roll_series_with_gap(nullable_series, window_length, gap=gap).max()
non_nullable_rolling_max = _roll_series_with_gap(non_nullable_series, window_length, gap=gap).max()
pd.testing.assert_series_equal(nullable_rolling_max, non_nullable_rolling_max)
def test_roll_series_with_gap_nullable_types_with_nans(rolling_series_pd):
window_length = 3
gap = 2
nullable_floats = rolling_series_pd.astype('float64').replace({1: np.nan, 3: np.nan})
nullable_ints = nullable_floats.astype('Int64')
nullable_ints_rolling_max = _roll_series_with_gap(nullable_ints, window_length, gap=gap).max()
nullable_floats_rolling_max = _roll_series_with_gap(nullable_floats, window_length, gap=gap).max()
pd.testing.assert_series_equal(nullable_ints_rolling_max, nullable_floats_rolling_max)
expected_early_values = ([np.nan, np.nan, 0, 0, 2, 2, 4] +
list(range(7 - gap, len(rolling_series_pd) - gap)))
for i in range(len(rolling_series_pd)):
actual = nullable_floats_rolling_max.iloc[i]
expected = expected_early_values[i]
if pd.isnull(actual):
assert pd.isnull(expected)
else:
assert actual == expected
@pytest.mark.parametrize(
"window_length, gap",
[
('3d', '2d'),
('3d', '4d'),
('4d', '0d'),
],
)
def test_apply_roll_with_offset_gap(window_length, gap, rolling_series_pd):
def max_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, max, min_periods=1)
rolling_max_obj = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
def min_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, min, min_periods=1)
rolling_min_obj = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap)
rolling_min_series = rolling_min_obj.apply(min_wrapper)
assert len(rolling_max_series) == len(rolling_series_pd)
assert len(rolling_min_series) == len(rolling_series_pd)
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
for i in range(len(rolling_series_pd)):
start_idx = i - gap_num - window_length_num + 1
# Now that we have the _apply call, this acts as expected
end_idx = i - gap_num
# If start and end are negative, they're entirely before
if start_idx < 0 and end_idx < 0:
assert pd.isnull(rolling_max_series.iloc[i])
assert pd.isnull(rolling_min_series.iloc[i])
continue
if start_idx < 0:
start_idx = 0
# Because the row values are a range from 0 to 20, the rolling min will be the start index
# and the rolling max will be the end idx
assert rolling_min_series.iloc[i] == start_idx
assert rolling_max_series.iloc[i] == end_idx
@pytest.mark.parametrize(
"min_periods",
[1, 0, None],
)
def test_apply_roll_with_offset_gap_default_min_periods(min_periods, rolling_series_pd):
window_length = '5d'
window_length_num = 5
gap = '3d'
gap_num = 3
def count_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, len, min_periods=min_periods)
rolling_count_obj = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap)
rolling_count_series = rolling_count_obj.apply(count_wrapper)
# gap essentially creates a rolling series that has no elements; which should be nan
# to differentiate from when a window only has null values
num_empty_aggregates = rolling_count_series.isna().sum()
num_partial_aggregates = len((rolling_count_series
.loc[rolling_count_series != 0])
.loc[rolling_count_series < window_length_num])
assert num_empty_aggregates == gap_num
assert num_partial_aggregates == window_length_num - 1
@pytest.mark.parametrize(
"min_periods",
[2, 3, 4, 5],
)
def test_apply_roll_with_offset_gap_min_periods(min_periods, rolling_series_pd):
window_length = '5d'
window_length_num = 5
gap = '3d'
gap_num = 3
def count_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, len, min_periods=min_periods)
rolling_count_obj = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap)
rolling_count_series = rolling_count_obj.apply(count_wrapper)
# gap essentially creates rolling series that have no elements; which should be nan
# to differentiate from when a window only has null values
num_empty_aggregates = rolling_count_series.isna().sum()
num_partial_aggregates = len((rolling_count_series
.loc[rolling_count_series != 0])
.loc[rolling_count_series < window_length_num])
assert num_empty_aggregates == min_periods - 1 + gap_num
assert num_partial_aggregates == window_length_num - min_periods
def test_apply_roll_with_offset_gap_non_uniform():
window_length = '3d'
gap = '3d'
# When the data isn't uniform, this impacts the number of values in each rolling window
datetimes = (list(pd.date_range(start='2017-01-01', freq='1d', periods=7)) +
list(pd.date_range(start='2017-02-01', freq='2d', periods=7)) +
list(pd.date_range(start='2017-03-01', freq='1d', periods=7)))
no_freq_series = pd.Series(range(len(datetimes)), index=datetimes)
assert pd.infer_freq(no_freq_series.index) is None
expected_series = pd.Series([None, None, None, 1, 2, 3, 3] +
[None, None, 1, 1, 1, 1, 1] +
[None, None, None, 1, 2, 3, 3], index=datetimes)
def count_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, len, min_periods=1)
rolling_count_obj = _roll_series_with_gap(no_freq_series, window_length, gap=gap)
rolling_count_series = rolling_count_obj.apply(count_wrapper)
pd.testing.assert_series_equal(rolling_count_series, expected_series)
def test_apply_roll_with_offset_data_frequency_higher_than_parameters_frequency():
window_length = '5D' # 120 hours
window_length_num = 5
# In order for min periods to be the length of the window, we multiply 24hours*5
min_periods = window_length_num * 24
datetimes = list(pd.date_range(start='2017-01-01', freq='1H', periods=200))
high_frequency_series = pd.Series(range(200), index=datetimes)
# Check without gap
gap = "0d"
gap_num = 0
def max_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, max, min_periods=min_periods)
rolling_max_obj = _roll_series_with_gap(high_frequency_series, window_length, min_periods=min_periods, gap=gap)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
assert rolling_max_series.isna().sum() == (min_periods - 1) + gap_num
# Check with small gap
gap = '3H'
gap_num = 3
def max_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, max, min_periods=min_periods)
rolling_max_obj = _roll_series_with_gap(high_frequency_series, window_length, min_periods=min_periods, gap=gap)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
assert rolling_max_series.isna().sum() == (min_periods - 1) + gap_num
# Check with large gap - in terms of days, so we'll multiply by 24hours for number of nans
gap = '2D'
gap_num = 2
def max_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, max, min_periods=min_periods)
rolling_max_obj = _roll_series_with_gap(high_frequency_series, window_length, min_periods=min_periods, gap=gap)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
assert rolling_max_series.isna().sum() == (min_periods - 1) + (gap_num * 24)
def test_apply_roll_with_offset_data_min_periods_too_big(rolling_series_pd):
window_length = '5D'
gap = "2d"
# Since the data has a daily frequency, there will only be, at most, 5 rows in the window
min_periods = 6
def max_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, max, min_periods=min_periods)
rolling_max_obj = _roll_series_with_gap(rolling_series_pd, window_length, min_periods=min_periods, gap=gap)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
# The resulting series is comprised entirely of nans
assert rolling_max_series.isna().sum() == len(rolling_series_pd)
def test_roll_series_with_gap_different_input_types_same_result_uniform(rolling_series_pd):
# Offset inputs will only produce the same results as numeric inputs
# when the data has a uniform frequency
offset_gap = '2d'
offset_window_length = '5d'
int_gap = 2
int_window_length = 5
# Rolling series' with matching input types
expected_rolling_numeric = _roll_series_with_gap(rolling_series_pd,
window_size=int_window_length,
gap=int_gap).max()
def count_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, offset_gap, max, min_periods=1)
rolling_count_obj = _roll_series_with_gap(rolling_series_pd,
window_size=offset_window_length,
gap=offset_gap)
expected_rolling_offset = rolling_count_obj.apply(count_wrapper)
# confirm that the offset and gap results are equal to one another
| pd.testing.assert_series_equal(expected_rolling_numeric, expected_rolling_offset) | pandas.testing.assert_series_equal |
"""
k-NN module.
**Available routines:**
- class ``KNN``: Builds K-Nearest Neighbours model using cross validation.
Credits
-------
::
Authors:
- Diptesh
- Madhu
Date: Sep 25, 2021
"""
# pylint: disable=invalid-name
# pylint: disable=R0902,R0903,R0913,C0413
from typing import List, Dict, Any
import re
import sys
from inspect import getsourcefile
from os.path import abspath
import pandas as pd
import numpy as np
from sklearn import neighbors as sn
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
path = abspath(getsourcefile(lambda: 0))
path = re.sub(r"(.+\/)(.+.py)", "\\1", path)
sys.path.insert(0, path)
import metrics # noqa: F841
class KNN():
"""K-Nearest Neighbour (KNN) module.
Objective:
- Build
`KNN <https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm>`_
model and determine optimal k
Parameters
----------
df : pandas.DataFrame
Pandas dataframe containing the `y_var` and `x_var`
y_var : str
Dependant variable
x_var : List[str]
Independant variables
method : str, optional
Can be either `classify` or `regression` (the default is regression)
k_fold : int, optional
Number of cross validations folds (the default is 5)
param : dict, optional
KNN parameters (the default is None).
In case of None, the parameters will default to::
n_neighbors: max(int(len(df)/(k_fold * 2)), 1)
weights: ["uniform", "distance"]
metric: ["euclidean", "manhattan"]
Returns
-------
model : object
Final optimal model.
best_params_ : Dict
Best parameters amongst the given parameters.
model_summary : Dict
Model summary containing key metrics like R-squared, RMSE, MSE, MAE,
MAPE for regression and Accuracy, Precision, Recall, F1 score for
classification.
Methods
-------
predict
Example
-------
>>> mod = KNN(df=df_ip, y_var="y", x_var=["x1", "x2", "x3"])
>>> df_op = mod.predict(x_predict)
"""
def __init__(self,
df: pd.DataFrame,
y_var: str,
x_var: List[str],
method: str = "regression",
k_fold: int = 5,
param: Dict = None):
"""Initialize variables for module ``KNN``."""
self.y_var = y_var
self.x_var = x_var
self.df = df.reset_index(drop=True)
self.method = method
self.model = None
self.k_fold = k_fold
if param is None:
max_k = max(int(len(self.df) / (self.k_fold * 2)), 1)
param = {"n_neighbors": list(range(1, max_k, 2)),
"weights": ["uniform", "distance"],
"metric": ["euclidean", "manhattan"]}
self.param = param
self._pre_process()
self.best_params_ = self._fit()
self.model_summary = None
self._compute_metrics()
def _pre_process(self):
"""Pre-process the data, one hot encoding and normalizing."""
df_ip_x = pd.get_dummies(self.df[self.x_var])
self.x_var = list(df_ip_x.columns)
self.norm = MinMaxScaler()
self.norm.fit(df_ip_x)
df_ip_x = pd.DataFrame(self.norm.transform(df_ip_x[self.x_var]))
df_ip_x.columns = self.x_var
self.df = self.df[[self.y_var]].join(df_ip_x)
def _fit(self) -> Dict[str, Any]:
"""Fit KNN model."""
if self.method == "classify":
gs = GridSearchCV(estimator=sn.KNeighborsClassifier(),
param_grid=self.param,
scoring='f1_weighted',
verbose=0,
refit=True,
return_train_score=True,
cv=self.k_fold,
n_jobs=-1)
elif self.method == "regression":
gs = GridSearchCV(estimator=sn.KNeighborsRegressor(),
param_grid=self.param,
scoring='neg_root_mean_squared_error',
verbose=0,
refit=True,
return_train_score=True,
cv=self.k_fold,
n_jobs=-1)
gs_op = gs.fit(self.df[self.x_var], self.df[self.y_var])
self.model = gs_op
return gs_op.best_params_
def _compute_metrics(self):
"""Compute commonly used metrics to evaluate the model."""
y = self.df.loc[:, self.y_var].values.tolist()
y_hat = list(self.model.predict(self.df[self.x_var]))
if self.method == "regression":
model_summary = {"rsq": np.round(metrics.rsq(y, y_hat), 3),
"mae": np.round(metrics.mae(y, y_hat), 3),
"mape": np.round(metrics.mape(y, y_hat), 3),
"rmse": np.round(metrics.rmse(y, y_hat), 3)}
model_summary["mse"] = np.round(model_summary["rmse"] ** 2, 3)
if self.method == "classify":
class_report = classification_report(y,
y_hat,
output_dict=True,
zero_division=0)
model_summary = class_report["weighted avg"]
model_summary["accuracy"] = class_report["accuracy"]
model_summary = {key: round(model_summary[key], 3)
for key in model_summary}
self.model_summary = model_summary
def predict(self, x_predict: pd.DataFrame) -> pd.DataFrame:
"""Predict y_var/target variable.
Parameters
----------
df_predict : pd.DataFrame
Pandas dataframe containing `x_var`.
Returns
-------
pd.DataFrame
Pandas dataframe containing predicted `y_var` and `x_var`.
"""
df_op = x_predict.copy(deep=True)
df_predict = pd.get_dummies(x_predict)
df_predict_tmp = | pd.DataFrame(columns=self.x_var) | pandas.DataFrame |
# Source
# Portfolio optimization in finance is the technique of creating a portfolio of assets, for which your investment has the maximum return and minimum risk.
# https://pythoninvest.com/long-read/practical-portfolio-optimisation
# https://github.com/realmistic/PythonInvest-basic-fin-analysis
##############################################################################################################
# ░█████╗░░██████╗░██████╗███████╗████████╗
# ██╔══██╗██╔════╝██╔════╝██╔════╝╚══██╔══╝
# ███████║╚█████╗░╚█████╗░█████╗░░░░░██║░░░
# ██╔══██║░╚═══██╗░╚═══██╗██╔══╝░░░░░██║░░░
# ██║░░██║██████╔╝██████╔╝███████╗░░░██║░░░
# ╚═╝░░╚═╝╚═════╝░╚═════╝░╚══════╝░░░╚═╝░░░
# ███╗░░░███╗░█████╗░███╗░░██╗░█████╗░░██████╗░███████╗███╗░░░███╗███████╗███╗░░██╗████████╗
# ████╗░████║██╔══██╗████╗░██║██╔══██╗██╔════╝░██╔════╝████╗░████║██╔════╝████╗░██║╚══██╔══╝
# ██╔████╔██║███████║██╔██╗██║███████║██║░░██╗░█████╗░░██╔████╔██║█████╗░░██╔██╗██║░░░██║░░░
# ██║╚██╔╝██║██╔══██║██║╚████║██╔══██║██║░░╚██╗██╔══╝░░██║╚██╔╝██║██╔══╝░░██║╚████║░░░██║░░░
# ██║░╚═╝░██║██║░░██║██║░╚███║██║░░██║╚██████╔╝███████╗██║░╚═╝░██║███████╗██║░╚███║░░░██║░░░
# ╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░╚══╝╚═╝░░╚═╝░╚═════╝░╚══════╝╚═╝░░░░░╚═╝╚══════╝╚═╝░░╚══╝░░░╚═╝░░░
##############################################################################################################
##############################################################################################################
# Portfolio Optimization with Python using Efficient Frontier with Practical Examples
##############################################################################################################
# Portfolio optimization is the process of creating a portfolio of assets, for which your investment has the maximum return and minimum risk.
# Modern Portfolio Theory (MPT), or also known as mean-variance analysis is a mathematical process which allows the user to maximize returns for a given risk level.
# It was formulated by <NAME> and while it is not the only optimization technique known, it is the most widely used.
# Efficient frontier is a graph with ‘returns’ on the Y-axis and ‘volatility’ on the X-axis.
# It shows the set of optimal portfolios that offer the highest expected return for a given risk level or the lowest risk for a given level of expected return.
##############################################################################################################
# Practical Portfolio Optimisation
##############################################################################################################
# What? Identify an optimal split for a known set of stocks and a given investment size.
# Why? Smart portfolio management will add a lot to the risk management of your trades: it can reduce the volatility of a portfolio, increase returns per unit of risk, and reduce the bad cases losses
# How? Use the library PyPortfolioOpt
# User guide: https://pyportfolioopt.readthedocs.io/en/latest/UserGuide.html
# Detailed Colab example (Mean-Variance-Optimisation): https://github.com/robertmartin8/PyPortfolioOpt/blob/master/cookbook/2-Mean-Variance-Optimisation.ipynb
# Plan
# 1. Prep work : imports, getting financial data, and pivot table of daily prices
# 2. Correlation matrix
# 3. PyPortfolioOpt : min volatility, max Sharpe, and min cVAR portfolios
# 4. PyPortfolioOpt : Efficient Frontier
# 5. PyPortfolioOpt : Discrete Allocation
##############################################################################################################
# 0. Prep work : imports, getting financial data, and pivot table of daily prices
##############################################################################################################
# pip install yfinance
import pandas as pd
import yfinance as yf
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
INVESTMENT = 10000
# yFinance ticker list https://finance.yahoo.com/cryptocurrencies
# BTC-USD
# ETH-USD
# BNB-USD
# XRP-USD
# LTC-USD
# ZAM-USD
# ADA-USD
# TRX-USD
TICKERS =['BTC-USD','ETH-USD', 'BNB-USD', 'XRP-USD','LTC-USD', 'ZAM-USD', 'ADA-USD', 'TRX-USD']
| pd.set_option('display.max_colwidth', None) | pandas.set_option |
import os
import pandas as pd
import numpy as np
from scipy import stats
from scipy.stats import norm, skewnorm
from datetime import datetime, date, timedelta, timezone
from dateutil import parser
import pytz
from sklearn.model_selection import ParameterGrid
import matplotlib.pyplot as plt
import seaborn as sns
from concha import Product
from concha import ProfitMaximizer, QuantileRegressor, Mean, MeanWeekPart
from concha.environment import FileHandler
from concha.weather import NOAA
from concha.importers import Square
rgen = np.random.default_rng()
###########- PLANNER CLASS -###########
class Planner:
"""Top level object for creating optimization models"""
def __init__(
self,
planner_name="example",
estimate_missed_demand=True,
model="ProfitMaximizer",
model_layers=4,
model_width=20,
dropout=0.0,
l2_penalty=0.001,
epochs=200,
model_batch_size=15,
round_to_batch=True,
demand_quantile=0.9,
verbose=0,
categorical_feature_cols=None,
demand_estimation_quantiles=10,
**product_settings,
):
"""Creates a planner object to learn from past sales of products, then predict optimal production numbers.
Args:
planner_name (str): The name of the folder (.../concha_planners/[planner_name]) where all sales transactions
csv files should be placed for import, and where the settings file is written.
estimate_missed_demand (bool): If true, estimates of actual demand constructed from transactions are
used in training the prediction models. Defaults to True.
model (str): The model to construct production predictions. Options:
"ProfitMaximizer" (default): Maximizes profit by product batch_size, batch_cost, and unit_sale_price.
"QuantileRegressor": Predicts production value to center on the demand_quantile level of demand.
Setting demand_quantile=0.9 means meeting or exceeding demand 90% of the time given the
training conditions (day of week, weather).
"MeanWeekPart": Finds a mean of sales for weekdays and weekends separately, and uses them for predictions of future production.
"Mean": Finds a mean of all past measured sales by day and uses it as prediction for production.
model_layers (int): The number of dense layers in the multi-layer-perceptron models use by deep learning models. Higher
makes the model able to understand complex relationships, lower is faster. Defaults to 4.
model_width (int): The number of units in each densely connected neural network layer. Higher makes model able to
"understand" more, but slows down training. Defaults to 20.
dropout (float): Value between 0.0 and 1.0 for use in dropout layers. Giving non-zero values will slow down training,
but may improve quality of patterns learned by the models. Defaults to 0.0 (which, at 0.0 means this isn't used by default).
l2_penalty (float): l2_regularization paramater applied to each dense layer. Higher slows down training, and increases loss function
values, and may improve what model can achieve. Default is 0.001.
epochs (int): The maximum number of epochs (training steps) used in training deep learning models. The models use early stopping,
so this is just an upper limit. Defaults to 200.
model_batch_size (int): Size of the batches used in training models. Not to be confused with product batch_size, which is
the size of product batches (i.e. six muffins per tray). Defaults to 15.
round_to_batch (bool): Only applies to ProfitMaximizer model. True just rounds the optimal production regression output to
the nearest batch size (so if batch_size=5, 12.2321 would be rounded to 10 units). When False, another deep learning model
is trained to decide whether or not to round the regression output up or down to the nearest batch size. Defaults to True.
demand_quantile (float): Value between 0.0 and 1.0. Only used by QuantileRegressor model. This is how much of demand to predict
for production. Defaults to 0.9.
verbose (0, 1, or 2): Verbosity of training process. 1 and 2 print a line for each epoch. Defaults to 0.
categorical_feature_cols (List([str,])): Specifies which columns of features dataframe should be consired categorical
and one hot encoded. If set to None, all fetures with less than 12 or fewer unique values are treated as categoricals.
Defaults to None.
demand_estimation_quantiles (int): Used when estimating demand from sales transactions.
The number of quantiles in which to divide up transaction timestamps in order to project total demand
for days when it seems supply ran out early (stockout days). Less is possibly more biased, but more
stable. Defaults to 24.
"""
self.planner_name = planner_name
self.products = {}
self.estimate_missed_demand = estimate_missed_demand
self.model = model
self.model_layers = model_layers
self.model_width = model_width
self.dropout = dropout
self.l2_penalty = l2_penalty
self.epochs = epochs
self.model_batch_size = model_batch_size
self.round_to_batch = round_to_batch
self.demand_quantile = demand_quantile
self.verbose = verbose
self.categorical_feature_cols = categorical_feature_cols
self.demand_estimation_quantiles = demand_estimation_quantiles
self.product_settings = product_settings
# These attributes track the columns in the transaction csv(s).
self.time_column, self.product_column, self.quantity_column = None, None, None
self.filehandler = FileHandler()
self.planner_dir, self.settings_path = self.filehandler.check_planner_path(
self.planner_name
)
# Creates a planner_settings.json file, or updates, if it already exists.
self.update_settings()
# Set up the weather if the right info is available
if "weather" in self.settings:
if self.settings["weather"]["type"] == "noaa":
self.weather = NOAA(name=self.settings["weather"]["name"])
if "importer" in self.settings:
if self.settings["importer"]["type"] == "square":
self.importer = Square(name=self.settings["importer"]["name"])
###########- TOP LEVEL METHODS -###########
def train(self):
"""Wrapper for setup and train steps. This assumes each product already has the correct settings
specified in ...concha_planners/[planner_name]/planner_settings.json."""
if not hasattr(self, "transactions"):
self.update_settings()
self.import_transactions()
self.update_settings()
self.generate_daily_history_metadata()
self.setup_products()
self.train_models()
def predict(self):
"""Wrapper for prediction steps
Returns:
production (pd.DataFrame): The forecast metadata used for the prediction, and the predicted values
in the 'production' column.
"""
self.generate_daily_forecast_metadata()
production = self.predict_production()
return production
def update_history(self, products=None):
"""Wrapper that pulls in new transactions from the importer, then imports them to the planner.
Args:
products (list[str], or str): If a list of strings, the transactions are filtered to only
include the listed products. If a string, only that product's transactions
are imported. If None (the default), no filter is applied and all products
available are imported.
Returns:
new_transactions (DataFrame): The new batch of transactions written to
[planner_name]/history
"""
# Check if an importer is attached first
if not hasattr(self, "importer"):
print("There isn't an importer attached to this planner")
return
# Get new transactions
location = self.settings["location"]
new_transactions = self.importer.get_orders(
location=location["name"], last_timestamp=location["last_timestamp"]
)
most_recent_order = parser.parse(new_transactions.iloc[-1]["timestamp"])
most_recent_order = most_recent_order.isoformat()
# Write the history to file.
current_timestamp = datetime.now().strftime("%Y-%m-%dT%H_%M_%S")
history_path = os.path.join(
self.planner_dir, "history", current_timestamp + ".csv"
)
new_transactions.to_csv(history_path, index=False)
self.settings["location"]["last_timestamp"] = most_recent_order
tz = self.settings["location"]["timezone"]
self.filehandler.dict_to_file(self.settings, self.settings_path)
self.import_transactions(products=products, tz=tz)
self.update_settings()
return new_transactions
def import_transactions(
self,
products=None,
time_column=None,
product_column=None,
quantity_column=None,
tz=None,
):
"""Imports any csv files of transactions for use.
All csv files in the history directory of the planner are assumed to be transaction files.
All files are assumed to be in the same format (data dump from point of sale provider). They can possible overlap
(duplicate rows are removed).
Args:
products (str or list): Optional filter for products imported. If string, only imports that
product, if list, imports only those products. If none, no filter is applied
and all products are imported.
time_column (str): Name of the column with the timestamp for sales transactions used in csv(s).
product_column (str): Name of the product identifier column.
quantity_column (str): Name of column listing number of each product sold per timestamp.
tz (str): Format like "US/Eastern" or "US/Pacific". If set, transactions will be imported at tz.
If not set, the "square_location" location timezone will be used. Otherwise,
"US/Eastern" is used.
Attributes Set:
transactions (pd.DataFrame): Dataframe of all transactions.
Returns:
transactions (pd.DataFrame): Dataframe of all transactions.
"""
# Set a tz if none provided or in settings.
if tz is None:
if "location" in self.settings:
tz = self.settings["location"]["timezone"]
else:
tz = "US/Eastern"
# get the csv files in the history directory of the planner
history_path = os.path.join(self.planner_dir, "history")
self.transaction_csv_paths = [
os.sep.join([history_path, path])
for path in os.listdir(history_path)
if path.endswith(".csv")
]
if len(self.transaction_csv_paths) == 0:
# Just exit if there are not files found to import
print("No transaction csv files to import.")
return
print("Importing from: " + history_path)
csv_data = pd.concat([pd.read_csv(path) for path in self.transaction_csv_paths])
print("Imported csv columns: " + ", ".join(list(csv_data.columns)))
use_columns = []
# Check if the column names were specified as arguments, if so use those names.
# If not, use what was specified in the planner_settings.json file.
# If both are None, try the first three columns of the csvs.
strans = self.settings["transactions"]
if time_column is None and strans["time_column"] is not None:
time_column = strans["time_column"]
use_columns.append(time_column)
if product_column is None and strans["product_column"] is not None:
product_column = strans["product_column"]
use_columns.append(product_column)
if quantity_column is None and strans["quantity_column"] is not None:
quantity_column = strans["quantity_column"]
use_columns.append(quantity_column)
if len(use_columns) < 3:
use_columns = csv_data.columns[:3]
# Write the updated transaction columns names back to settings.
self.settings["transactions"] = {
"time_column": use_columns[0],
"product_column": use_columns[1],
"quantity_column": use_columns[2],
}
self.filehandler.dict_to_file(self.settings, self.settings_path)
csv_data[["timestamp", "product", "quantity"]] = csv_data[use_columns]
csv_data["timestamp"] = csv_data["timestamp"].astype(str)
# Get the date and minute in local time for estimating demand
localtz = pytz.timezone(tz)
csv_data["local_ts"] = csv_data["timestamp"].apply(
lambda x: parser.parse(x).astimezone(localtz)
)
csv_data["date"] = csv_data["local_ts"].apply(lambda x: x.date())
csv_data["date"] = pd.to_datetime(csv_data["date"])
csv_data["minute"] = csv_data.apply(
lambda row: row["local_ts"].hour * 60 + row["local_ts"].minute, axis=1
)
csv_data["product"] = csv_data["product"].astype(str)
csv_data["quantity"] = pd.to_numeric(csv_data["quantity"])
# If only one product was passed in as a string - convert it to a list
if isinstance(products, str):
products = [products]
# Filter the transactions to only include the products specified.
if products is not None:
csv_data = csv_data[csv_data["product"].isin(products)]
# Drop duplicate rows. Transaction data dumps are probably specific time periods, so this prevents the user having
# to figure out which files overlap.
self.transactions = csv_data.drop_duplicates(ignore_index=True)
return self.transactions
def update_settings(self):
"""Syncronizes the values in the settings file with planner object values
If a ...concha_planners/[planner_name]/planner_settings.json file is not present, this creates one.
If it is present, this syncs values in the planner with the file.
Attributes Set:
settings (Dict): Dict (synced) verson of what's present in the json file.
"""
# If the file exists, get the current values.
if os.path.exists(self.settings_path):
self.settings = self.filehandler.dict_from_file(self.settings_path)
else:
# If file isn't present, make a fresh set of values
self.settings = {
"transactions": {
"time_column": self.time_column,
"product_column": self.product_column,
"quantity_column": self.quantity_column,
},
"product": {},
}
# if transactions have been imported, add any new product ids to settings['product']
if hasattr(self, "transactions"):
history_products = list(self.transactions["product"].unique())
for product in history_products:
# If settings don't exist write default values for a product
if product not in self.settings["product"]:
dummy_prod = Product(product)
self.settings["product"][product] = dummy_prod.get_settings()
self.filehandler.dict_to_file(self.settings, self.settings_path)
###########- METADATA METHODS -###########
def generate_daily_history_metadata(self, load_from_file=False, write_csv=True):
"""Creates a dataframe of features for each date in the transaction history.
Columns are ['date', 'day_of_week'] if no weather API info is included, and
['date', 'day_of_week', 'tmax', 'tmin', 'prcp', 'snow'] if it is included.
Args:
load_from_file (bool): True pulls the daily history from ...[planner_name]/metadata/daily_history_metadata.csv.
False generates the metadata from the transactions and pulls the weather data from NOAA.
Attributes Set:
daily_history_metadata (pd.DataFrame): Metadata for each date present in the transactions.
Returns:
daily_history_metadata (pd.DataFrame)
"""
metadata_file_path = os.sep.join(
[self.planner_dir, "metadata", "daily_history_metadata.csv"]
)
if load_from_file:
# load from daily_history_metadata.csv
if os.path.exists(metadata_file_path):
print("Loading history metadata from: " + metadata_file_path)
self.daily_history_metadata = pd.read_csv(
metadata_file_path, parse_dates=["date"]
)
else:
print(
f"file path: daily_history_metadata.csv isn't in {os.path.join(self.planner_dir, 'metadata')}."
)
print(
"Setting load_from_file=False will get historical weather data for dates (if noaa api key provided.)"
)
# load from simulated_demand_history.csv
simulated_demand_path = os.sep.join(
[self.planner_dir, "metadata", "simulated_demand_history.csv"]
)
if os.path.exists(simulated_demand_path):
print("Loading simulated demand from: " + simulated_demand_path)
self.simulated_demand_history = pd.read_csv(
simulated_demand_path, parse_dates=["date"], dtype={"product": str}
)
# generate the daily summaries directly from the transactions and the weather history API
else:
# Get the dates listed transactions and create day of week metadata from them.
dates = pd.to_datetime(self.transactions["date"])
dates = pd.Series(dates.unique(), name="date")
dates = dates.to_frame()
dates = dates.sort_values(by="date")
dates["day_of_week"] = dates["date"].dt.strftime("%a")
# Add the weather metadata if added to the planner
if hasattr(self, "weather"):
start_date = dates.head(1)["date"].dt.strftime("%Y-%m-%d").values[0]
end_date = dates.tail(1)["date"].dt.strftime("%Y-%m-%d").values[0]
station_id = self.settings["weather"]["station"]["id"]
weather = self.weather.get_weather_history(
start_date, end_date, station_id
)
dates = dates.merge(weather, on="date")
self.daily_history_metadata = dates
if write_csv:
self.daily_history_metadata.to_csv(metadata_file_path, index=False)
# self.daily_history_metadata['date'] = self.daily_history_metadata.dt.date
return self.daily_history_metadata
def generate_daily_forecast_metadata(self):
"""Creates a dataframe of features for each date in the coming week.
Attributes Set:
daily_forecast_metadata (pd.DataFrame): Dataframe used for making predictions
of future optimal production. Takes same form as the daily_history_metadata:
['date', 'day_of_week'] and optionally ['tmax', 'tmin', 'prcp', 'snow'].
Returns:
daily_forecast_metadata (pd.DataFrame)
"""
# Get next ten days
today = date.today()
dates = pd.Series(
[today + timedelta(days=x) for x in range(10)], name="date"
).to_frame()
dates["date"] = | pd.to_datetime(dates["date"]) | pandas.to_datetime |
import numpy as np
from scipy.io import loadmat
import os
from pathlib import Path
# from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
# plotting parameters
sns.set(font_scale=1.1)
sns.set_context("talk")
sns.set_palette(['#701f57', '#ad1759', '#e13342', '#f37651'])
transparent = False
markers = ['o','^','s']
plot_types = ['box','point']
estimator = np.median
est ='median'
# Number of microphones fixed, vary number of loudspeakers
path = Path(__file__).parent / os.path.join('..','matlab','data') # path to the saved results from matlab
outpath = os.path.join(Path(__file__).parent,'figures')
if not os.path.exists(outpath):
os.makedirs(outpath)
Ks = range(6,12) # number of loudspeakers
M = 12 # number of microphones
runs = 200
thresh = 10**-3 # lower bound for error
algs = ['SDR + LM', 'Wang']
suffs = ['real_data_clean_sdr_lm', 'wang_real_data_clean']
res_data = pd.DataFrame() # will load matlab results into DataFrame
all_err = np.zeros((runs,len(Ks),len(suffs)))
for mi,K in enumerate(Ks):
N = M + K # number of points
print(mi,M,K)
for suffi,suff in enumerate(suffs):
filename_mat = os.path.join(suff,'matlab_%s_M%s_K%s.mat'%(suff,M,K))
mat = loadmat(os.path.join(path,filename_mat))
err_aft = np.real(mat['err_lm'])
all_err[:,mi,suffi] = err_aft[:,0]
print('minimum',algs[suffi],M,K, np.min(all_err[:,mi,suffi]))
res = pd.DataFrame(err_aft[:,0])
res['M'] = M
res['K'] = K
res.rename(columns={0: 'err'}, inplace=True)
res = res.assign(Algorithm=algs[suffi])
res_data = | pd.concat([res_data,res],axis=0) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def synthetic_example(mu=0, sigma = 1,N=400, c_boundary=False,y_ints=[5,6,7], SEED=4):
np.random.seed(SEED)
plt.figure(figsize=(9,9))
plt.title("Synthetic Data Example", fontsize=20)
c1 = np.ones( (2,N)) + np.random.normal(0,sigma,(2,N))
c2 = 5 + np.zeros( (2,N)) + np.random.normal(0,sigma,(2,N))
plt.scatter(c1[0], c1[1], edgecolors='b', label='Malignant Tumor')
plt.scatter(c2[0], c2[1], c='r', edgecolors='b', label='Benign Tumor')
if c_boundary:
xb = [i for i in range(-2,9)]
dc=1
for j in y_ints:
yb = [-1 * i + j for i in xb]
plt.plot(xb,yb,label='Desicion Boundary '+ str(dc) ,linewidth=2 )
dc += 1
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. , fontsize=18)
plt.grid(True)
plt.xlabel("Feature 1", fontsize=18)
plt.ylabel("Feature 2", fontsize=18)
labels1 = np.zeros(N)
labels2 = np.ones(N)
y = np.concatenate((labels1,labels2),axis=0)
x0 = np.concatenate((c1[0],c2[0]),axis=0)
x1 = np.concatenate((c1[1],c2[1]),axis=0)
X=np.array([x0,x1,y]).T
df = | pd.DataFrame(X, columns=['Feature1', 'Feature2', 'Target']) | pandas.DataFrame |
import warnings
warnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA',
FutureWarning)
warnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA',
FutureWarning)
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.arima_model import ARIMA
data = pd.read_pickle('./train1.pkl')
data.index = pd.to_datetime(data.index)
data = data.sort_index()
# data = data[:5]
test = pd.read_pickle("./test1.pkl")
test.index = | pd.to_datetime(test.index) | pandas.to_datetime |
import zlib
import base64
import json
import re
import fnmatch
import pendulum
import requests
from redis import Redis
import pandas as pd
from pymongo import MongoClient
import pymongo.errors as merr
from ..constants import YEAR
from .orm import Competition
def _val(v, s=None):
if s is None:
s = {"raw", "proc", "df"}
if v not in s:
raise ValueError(v)
def _dec(s):
return zlib.decompress(base64.b64decode(s)).decode("utf-8")
def odate(ts):
dt = pendulum.parse(ts).in_timezone("Europe/London")
dt = pendulum.timezone("Europe/Rome").convert(dt)
return dt.strftime("%Y-%m-%d %H:%M:%S")
def mc(coll=None):
if coll:
return MongoClient().get_database("opta").get_collection(coll)
else:
return MongoClient().get_database("opta")
def bproc(j, which="teams"):
l = []
if which in {"teams", "comps"}:
a = {"id": "id", "1": "full", "2": "short", "3": "abbr"}
elif which == "team":
a = {"id": "id", "1": "first", "2": "last", "3": "known"}
else:
raise ValueError(which)
for d in j:
l.append({a[k]: d[k] for k in d})
return l
def bget(url, mongo=None, proc=None):
r = Redis()
coll = None
if url in r:
j = json.loads(r[url].decode("utf-8"))
if proc:
j = proc(j)
return j
if mongo:
coll = mc(mongo["coll"])
d = coll.find_one(mongo["key"])
if d:
return d["data"]
j = requests.get("http://127.0.0.1:9080" + url).text
if j:
j = _dec(j)
r[url] = j
jl = json.loads(j)
if proc:
jl = proc(jl)
exp = 3600 if "err" not in jl else 5
r.expire(url, exp)
if mongo and "err" not in jl:
doc = dict(data=jl, **mongo["key"])
coll.insert(doc)
return jl
else:
raise RuntimeError(j)
def _add_team(r, teams):
s = r["Standing"]
s["Team"] = teams[r["@attributes"]["TeamRef"]]
return s
def rget(feed, season=YEAR, cid="null", team="null", gid="null",
player="null"):
url = "/f/{}/{}/{}/{}/{}".format(feed, season, cid, team, gid, player)
return bget(url)
def comps(match=None, season=YEAR, raw=False):
url = "/comps/{}".format(season)
coll = mc("md_comps")
if coll.find_one({"season": season}) is None:
j = bget(url, proc=lambda x: bproc(x, "comps"))
try:
coll.insert_many(j, ordered=False)
except merr.BulkWriteError:
pass
else:
j = list(coll.find({"season": season}, {"_id": False}))
if match is not None:
if type(match) is int:
j = [c for c in j if c.get("id") == match]
else:
rxp = re.compile(fnmatch.translate(match),
flags=re.IGNORECASE)
j = [c for c in j if rxp.match(c.get("full")) or
("short" in c and rxp.match(c.get("short")))]
if raw:
return j
else:
if len(j) == 1:
j = j[0]
if "_id" in j:
del j["_id"]
return Competition(**j, season=season)
else:
return (
pd.DataFrame(j, columns=["id", "full", "short", "abbr"])
.set_index("id").sort_index()
)
def teams(cid, season=YEAR):
url = "/teams/{}/{}".format(cid, season)
mongo = {"key": {"cid": cid, "season": season}, "coll": "md_clubs"}
j = bget(url, mongo=mongo, proc=lambda x: bproc(x, "teams"))
return {int(d["id"]): {k: d[k] for k in d if k != "id"} for d in j}
def team(cid, team, season=YEAR):
"""This needs a custom Mongo importer"""
url = "/team/{}/{}/{}".format(cid, team, season)
j = bget(url, proc=lambda x: bproc(x, "team"))
coll = mc("md_players")
try:
coll.insert_many(j, ordered=False)
except merr.BulkWriteError:
pass
return {int(d["id"]): {k: d[k] for k in d if k not in {"id",
"_id"}} for d in j}
def player(pid, mconn=None):
mconn = (mconn or mc()).get_collection("md_players")
if type(pid) in {int, str}:
pid = int(pid)
d = mconn.find_one({"id": pid})
if d is None:
return None
del d["_id"]
return d
elif hasattr(pid, "__iter__"):
l = []
pid = [int(k) for k in pid]
for d in mconn.find({"id": {"$in": pid}}):
if d is not None:
del d["_id"]
l.append(d)
if len(l) == 1:
l = l[0]
return l
def stats(cid, team, season=YEAR):
url = "/stats/{}/{}/{}".format(cid, team, season)
return bget(url)
def parse_game(g, teams=None):
mi = g["MatchInfo"]
gid = int(g["@attributes"]["uID"][1:])
dt = odate(mi["dateObj"]["locale"])
d = {"gid": gid, "dt": dt, "day": int(mi["@attributes"]["MatchDay"])}
for sc in g["TeamData"]:
sc = sc["@attributes"]
s = sc["Side"].lower()
team = int(sc["TeamRef"][1:])
d[s + "_id"] = team
if teams:
d[s] = teams[team].get("short") or teams[team]["full"]
if sc["Score"] is not None:
d[s + "_score"] = int(sc["Score"])
return d
def games(cid=21, season=YEAR, ft=True, how="df"):
_val(how)
gms = bget(f"/games/{cid}/{season}")["OptaFeed"]["OptaDocument"]
if how == "raw":
return gms
gms = gms["MatchData"]
gms = [k for k in gms if
(ft and k["MatchInfo"]["@attributes"]["Period"] == "FullTime")
or (not ft)]
if how == "proc":
return gms
ts = teams(cid, season)
columns = ["gid", "day", "dt", "home", "home_score",
"away_score", "away", "home_id", "away_id"]
return pd.DataFrame([parse_game(k, ts) for k in gms],
columns=columns).set_index("gid")
def scorers(cid=21, season=YEAR, how="df"):
gs = games(cid, season=season, how="raw")
ts = {}
for g in gs["MatchData"]:
for t in g["TeamData"]:
a = t["@attributes"]
team = int(a["TeamRef"][1:])
side = a["Side"].lower()
goals = [{"pl": int(gl["@attributes"]["PlayerRef"][1:]),
"type": gl["@attributes"]["Type"].lower(),
"side": side} for gl in
t["Goal"]]
if team not in ts:
ts[team] = {}
for gl in goals:
if gl["type"] == "own":
continue
if gl["type"] not in {"penalty", "goal"}:
print(gl["type"])
if gl["pl"] not in ts[team]:
ts[team][gl["pl"]] = {"p": 0, "g": 0}
ts[team][gl["pl"]][
"p" if gl["type"] == "penalty" else "g"] += 1
if how == "raw":
return ts
l = []
for t in ts:
tn = teams(cid, season=season)[t]["full"]
for p in ts[t]:
pl = player(p)
if pl:
pl = pl.get("known") or pl.get("last")
else:
pl = p
l.append({"team": tn, "player": pl,
"g": ts[t][p]["g"] + ts[t][p]["p"],
"p": ts[t][p]["p"]})
return (
| pd.DataFrame(l, columns=["player", "team", "g", "p"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit
import pytest
import os
from collections import namedtuple
from itertools import product, combinations
from vectorbt import settings
from vectorbt.utils import checks, config, decorators, math, array, random, enum, data, params
from tests.utils import hash
seed = 42
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
with pytest.raises(Exception) as e_info:
conf.update(d=2)
conf.update(d=2, force_update=True)
assert conf['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, read_only=True)
with pytest.raises(Exception) as e_info:
conf['a'] = 2
with pytest.raises(Exception) as e_info:
del conf['a']
with pytest.raises(Exception) as e_info:
conf.pop('a')
with pytest.raises(Exception) as e_info:
conf.popitem()
with pytest.raises(Exception) as e_info:
conf.clear()
with pytest.raises(Exception) as e_info:
conf.update(a=2)
assert isinstance(conf.merge_with(dict(b=dict(d=2))), config.Config)
assert conf.merge_with(dict(b=dict(d=2)), read_only=True).read_only
assert conf.merge_with(dict(b=dict(d=2)))['b']['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': [1, 2]}})
conf['a'] = 1
conf['b']['c'].append(3)
conf['b']['d'] = 2
assert conf == {'a': 1, 'b': {'c': [1, 2, 3], 'd': 2}}
conf.reset()
assert conf == {'a': 0, 'b': {'c': [1, 2]}}
def test_merge_dicts(self):
assert config.merge_dicts({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_dicts({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
def test_configured(self):
class H(config.Configured):
def __init__(self, a, b=2, **kwargs):
super().__init__(a=a, b=b, **kwargs)
assert H(1).config == {'a': 1, 'b': 2}
assert H(1).copy(b=3).config == {'a': 1, 'b': 3}
assert H(1).copy(c=4).config == {'a': 1, 'b': 2, 'c': 4}
assert H(pd.Series([1, 2, 3])) == H(pd.Series([1, 2, 3]))
assert H(pd.Series([1, 2, 3])) != H(pd.Series([1, 2, 4]))
assert H(pd.DataFrame([1, 2, 3])) == H(pd.DataFrame([1, 2, 3]))
assert H(pd.DataFrame([1, 2, 3])) != H(pd.DataFrame([1, 2, 4]))
assert H(pd.Index([1, 2, 3])) == H(pd.Index([1, 2, 3]))
assert H(pd.Index([1, 2, 3])) != H(pd.Index([1, 2, 4]))
assert H(np.array([1, 2, 3])) == H(np.array([1, 2, 3]))
assert H(np.array([1, 2, 3])) != H(np.array([1, 2, 4]))
assert H(None) == H(None)
assert H(None) != H(10.)
# ############# decorators.py ############# #
class TestDecorators:
def test_class_or_instancemethod(self):
class G:
@decorators.class_or_instancemethod
def g(self_or_cls):
if isinstance(self_or_cls, type):
return True # class
return False # instance
assert G.g()
assert not G().g()
def test_custom_property(self):
class G:
@decorators.custom_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_custom_method(self):
class G:
@decorators.custom_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_cached_property(self):
np.random.seed(seed)
class G:
@decorators.cached_property
def cache_me(self): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_property(hello="world", hello2="world2")
def cache_me(self): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# clear_cache method
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
G.cache_me.clear_cache(g)
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# test blacklist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# test whitelist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
def test_cached_method(self):
np.random.seed(seed)
class G:
@decorators.cached_method
def cache_me(self, b=10): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_method(hello="world", hello2="world2")
def cache_me(self, b=10): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# clear_cache method
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
G.cache_me.clear_cache(g)
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# test blacklist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g.cache_me)
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# test whitelist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g.cache_me)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# disabled by non-hashable args
G.cache_me.clear_cache(g)
cached_number = g.cache_me(b=np.zeros(1))
assert g.cache_me(b=np.zeros(1)) != cached_number
def test_traverse_attr_kwargs(self):
class A:
@decorators.custom_property(some_key=0)
def a(self): pass
class B:
@decorators.cached_property(some_key=0, child_cls=A)
def a(self): pass
@decorators.custom_method(some_key=1)
def b(self): pass
class C:
@decorators.cached_method(some_key=0, child_cls=B)
def b(self): pass
@decorators.custom_property(some_key=1)
def c(self): pass
assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580
# ############# checks.py ############# #
class TestChecks:
def test_is_pandas(self):
assert not checks.is_pandas(0)
assert not checks.is_pandas(np.array([0]))
assert checks.is_pandas(pd.Series([1, 2, 3]))
assert checks.is_pandas(pd.DataFrame([1, 2, 3]))
def test_is_series(self):
assert not checks.is_series(0)
assert not checks.is_series(np.array([0]))
assert checks.is_series(pd.Series([1, 2, 3]))
assert not checks.is_series(pd.DataFrame([1, 2, 3]))
def test_is_frame(self):
assert not checks.is_frame(0)
assert not checks.is_frame(np.array([0]))
assert not checks.is_frame(pd.Series([1, 2, 3]))
assert checks.is_frame(pd.DataFrame([1, 2, 3]))
def test_is_array(self):
assert not checks.is_array(0)
assert checks.is_array(np.array([0]))
assert checks.is_array(pd.Series([1, 2, 3]))
assert checks.is_array(pd.DataFrame([1, 2, 3]))
def test_is_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
assert not checks.is_numba_func(test_func)
assert checks.is_numba_func(test_func_nb)
def test_is_hashable(self):
assert checks.is_hashable(2)
assert not checks.is_hashable(np.asarray(2))
def test_is_index_equal(self):
assert checks.is_index_equal(
pd.Index([0]),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.Index([0]),
pd.Index([1])
)
assert not checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0])
)
assert checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0]),
strict=False
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.Index([0])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.MultiIndex.from_arrays([[0], [1]])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2'])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name3', 'name4'])
)
def test_is_default_index(self):
assert checks.is_default_index(pd.DataFrame([[1, 2, 3]]).columns)
assert checks.is_default_index(pd.Series([1, 2, 3]).to_frame().columns)
assert checks.is_default_index(pd.Index([0, 1, 2]))
assert not checks.is_default_index(pd.Index([0, 1, 2], name='name'))
def test_is_equal(self):
assert checks.is_equal(np.arange(3), np.arange(3), np.array_equal)
assert not checks.is_equal(np.arange(3), None, np.array_equal)
assert not checks.is_equal(None, np.arange(3), np.array_equal)
assert checks.is_equal(None, None, np.array_equal)
def test_is_namedtuple(self):
assert checks.is_namedtuple(namedtuple('Hello', ['world'])(*range(1)))
assert not checks.is_namedtuple((0,))
def test_method_accepts_argument(self):
def test(a, *args, b=2, **kwargs):
pass
assert checks.method_accepts_argument(test, 'a')
assert not checks.method_accepts_argument(test, 'args')
assert checks.method_accepts_argument(test, '*args')
assert checks.method_accepts_argument(test, 'b')
assert not checks.method_accepts_argument(test, 'kwargs')
assert checks.method_accepts_argument(test, '**kwargs')
assert not checks.method_accepts_argument(test, 'c')
def test_assert_in(self):
checks.assert_in(0, (0, 1))
with pytest.raises(Exception) as e_info:
checks.assert_in(2, (0, 1))
def test_assert_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
checks.assert_numba_func(test_func_nb)
with pytest.raises(Exception) as e_info:
checks.assert_numba_func(test_func)
def test_assert_not_none(self):
checks.assert_not_none(0)
with pytest.raises(Exception) as e_info:
checks.assert_not_none(None)
def test_assert_type(self):
checks.assert_type(0, int)
checks.assert_type(np.zeros(1), (np.ndarray, pd.Series))
checks.assert_type( | pd.Series([1, 2, 3]) | pandas.Series |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/loading.ipynb (unless otherwise specified).
__all__ = ['DATA_PATH', 'N_TRAIN', 'N_TEST', 'get_csvs', 'CSV_NAMES_MAP', 'get_meter_data', 'get_nan_stats',
'show_nans', 'test_meter_train_and_test_set', 'get_building_data', 'test_building', 'get_weather_data',
'test_weather', 'load_all']
# Cell
import pandas as pd
import os
import numpy as np
import typing
from loguru import logger
from fastcore.all import *
from fastai.tabular.all import *
# Cell
DATA_PATH = Path("../data")
N_TRAIN = 10_000 # number of samples to load for the train set
N_TEST = 10_000 # number of samples to load for the test set
# Cell
CSV_NAMES_MAP = {'building_metadata.csv':'building',
'test.csv':'test',
'train.csv':'train',
'weather_test.csv':'weather_test',
'weather_train.csv':'weather_train',
'ashrae-energy-prediction-publicleaderboard.csv': 'public-leaderboard'}
@typed
def get_csvs(data_path:Path=DATA_PATH, csv_names_map:dict={}) -> dict:
csv_names = CSV_NAMES_MAP if len(csv_names_map) == 0 else csv_names_map
csvs = (data_path.ls()
.filter(lambda x: x.name.endswith('.csv'))
.map_dict(lambda x: csv_names.get(x.name, None)))
logger.info(f'Collected csv paths: {csvs}')
return {v: k for k,v in csvs.items() if v is not None}
# Cell
@typed
def get_meter_data(path:Path, nrows:int=-1) -> pd.DataFrame:
df = pd.read_csv(path, parse_dates=['timestamp'])
if nrows > 0: df = df.sample(nrows)
logger.info(f'Loading meter data: {path}')
return df_shrink(df, int2uint=True)
# Cell
@typed
def get_nan_stats(df:pd.DataFrame, col:str) -> pd.Series:
n = df[col].isna().sum()
return pd.Series({'# NaNs': n,
'col': col,
'NaNs (%)': 100 * n / len(df)})
# Cell
@typed
def show_nans(df:pd.DataFrame) -> pd.DataFrame:
nans = []
for col in df.columns:
nans.append(get_nan_stats(df, col))
return (pd.concat(nans, axis=1).T
.assign(**{
'# NaNs': lambda x: x['# NaNs'].astype(int),
'NaNs (%)': lambda x: x['NaNs (%)'].astype(float)})
.sort_values('# NaNs', ascending=False)
.set_index('col'))
# Cell
@typed
def test_meter_train_and_test_set(df_train:pd.DataFrame, df_test:pd.DataFrame):
assert len(df_train) == (20216100 if N_TRAIN == -1 else N_TRAIN)
assert len(df_test) == (41697600 if N_TEST == -1 else N_TEST)
assert set(df_train['meter'].unique()) == set(df_test['meter'].unique())
if N_TRAIN > 20216100 and N_TEST > 41697600:
assert set(df_train['building_id'].unique()) == set(df_test['building_id'].unique())
train_nans = show_nans(df_train)
assert np.allclose(train_nans['# NaNs'].values, 0)
test_nans = show_nans(df_test)
assert np.allclose(test_nans['# NaNs'].values, 0)
logger.info('Passed basic meter info tests')
# Cell
@typed
def get_building_data(path:Path=DATA_PATH/'building_metadata.csv') -> pd.DataFrame:
# TODO: year_built and floor_count actually are discrete values but contain nans
# test if 'Int' dtype would work or if it breaks the things downstream
logger.info(f'Loading building data: {path}')
df_building = | pd.read_csv(path) | pandas.read_csv |
"""
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Callable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from pandas._libs import reduction as libreduction
from pandas._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
nanops,
)
from pandas.core.apply import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from pandas.core.base import SpecificationError
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_agg_template,
_apply_docs,
_transform_template,
warn_dropping_nuisance_columns_deprecated,
)
from pandas.core.indexes.api import (
Index,
MultiIndex,
all_indexes_same,
)
from pandas.core.series import Series
from pandas.core.util.numba_ import maybe_use_numba
from pandas.plotting import boxplot_frame_groupby
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[DataFrame | Series]):
"""
Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
Parameters
----------
name : str
klass : {DataFrame, Series}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = getattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_allowlisted_properties(
klass: type[DataFrame | Series], allowlist: frozenset[str]
):
"""
Create GroupBy member defs for DataFrame/Series names in a allowlist.
Parameters
----------
klass : DataFrame or Series class
class where members are defined.
allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
def pinner(cls):
for name in allowlist:
if hasattr(cls, name):
# don't override anything that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_allowlisted_properties(Series, base.series_apply_allowlist)
class SeriesGroupBy(GroupBy[Series]):
_apply_allowlist = base.series_apply_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Series:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.iget(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: caller is responsible for setting ser.index
return ser
def _get_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Series")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.ravel(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, str):
return getattr(self, func)(*args, **kwargs)
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in assignment (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[assignment]
return ret
else:
cyfunc = com.get_cython_func(func)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_groupby.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_series_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> DataFrame:
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renamer is not supported")
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, DataFrame | Series] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
from pandas import concat
res_df = concat(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_df
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindex_output(output)
return output
def _indexed_output_to_ndframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Series:
"""
Wrap the dict result of a GroupBy aggregation into a Series.
"""
assert len(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Series,
values: list[Any],
not_indexed_same: bool = False,
) -> DataFrame | Series:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
data : Series
Input data for groupby operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if isinstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_df = self.obj._constructor_expanddim(values, index=index)
res_df = self._reindex_output(res_df)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
res_ser = res_df.stack(dropna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif isinstance(values[0], (Series, DataFrame)):
return self._concat_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_series_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Series")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by caller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Callable, *args, **kwargs) -> Series:
"""
Transform with a callable func`.
"""
assert callable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, dropna: bool = True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series:
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new unique observation
mask = codes == -1
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindex_output(result, fill_value=0)
@doc(Series.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def apply_series_value_counts():
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return apply_series_value_counts()
elif is_categorical_dtype(val.dtype):
# GH38672
return apply_series_value_counts()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[call-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not len(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not len(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@ | doc(Series.nlargest) | pandas.util._decorators.doc |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 08:32:48 2016
@module: choice_tools.py
@name: Helpful Tools for Choice Model Estimation
@author: <NAME>
@summary: Contains functions that help prepare one's data for choice model
estimation or helps speed the estimation process (the 'mappings').
"""
from __future__ import absolute_import
import warnings
from collections import OrderedDict
from collections import Iterable
from numbers import Number
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
def get_dataframe_from_data(data):
"""
Parameters
----------
data : string or pandas dataframe.
If string, data should be an absolute or relative path to a CSV file
containing the long format data for this choice model. Note long format
has one row per available alternative for each observation. If pandas
dataframe, the dataframe should be the long format data for the choice
model.
Returns
-------
dataframe : pandas dataframe of the long format data for the choice model.
"""
if isinstance(data, str):
if data.endswith(".csv"):
dataframe = | pd.read_csv(data) | pandas.read_csv |
from abc import abstractmethod
from collections import OrderedDict
import os
import pickle
import re
from typing import Tuple, Union
import pandas as pd
import numpy as np
import gym
from gridworld.log import logger
from gridworld import ComponentEnv
from gridworld.utils import to_scaled, to_raw, maybe_rescale_box_space
from gridworld.agents.buildings.obs_space import make_obs_space
from gridworld.agents.buildings import defaults
from gridworld.agents.buildings import five_zone_rom_dynamics as dyn
# Below are control variables' boundary.
MAX_FLOW_RATE = [2.2, 2.2, 2.2, 2.2, 3.2] # Max flow rate for each individual zone
MIN_FLOW_RATE = [.22, .22, .22, .22, .32] # Max flow rate for each individual zone
MAX_TOTAL_FLOW_RATE = 10.0 # Total flow rate for all zones should be lower than 10 kg/sec.
MAX_DISCHARGE_TEMP = 16.0 # Max temp of air leaving chiller
MIN_DISCHARGE_TEMP = 10.0 # Min temp of air leaving chiller
DEFAULT_COMFORT_BOUNDS = (22., 28.) # Temps between these values are considered "comfortable"
def load_data(start_time: str = None, end_time: str = None) -> Tuple[pd.DataFrame, dict]:
"""Returns exogenous data dataframe, and state space model (per-zone) dict."""
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
df = pd.read_csv(os.path.join(THIS_DIR, "data/exogenous_data.csv"), index_col=0)
df.index = pd.DatetimeIndex(df.index)
start_time = pd.Timestamp(start_time) if start_time else df.index[0]
end_time = pd.Timestamp(end_time) if end_time else df.index[-1]
_df = df.loc[start_time:end_time]
if _df is None or len(_df) == 0:
raise ValueError(
f"start and/or end times ({start_time}, {end_time}) " +
"resulted in empty dataframe. First and last indices are " +
f"({df.index[0]}, {df.index[-1]}), choose values in this range.")
with open(os.path.join(THIS_DIR, "data/state_space_model.p"), "rb") as f:
models = pickle.load(f)
return _df, models
def get_col(df, pattern, index=None):
"""Returns a dataframe with columns matching regex pattern."""
return df[[c for c in df.columns if re.match(pattern, c)]].values
class FiveZoneROMEnv(ComponentEnv):
time: pd.Timestamp = None
time_index: int = None
raw_action: np.ndarray = None
state: OrderedDict = None
def __init__(
self,
name: str = None,
obs_config: dict = None,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
comfort_bounds: Union[tuple, np.ndarray, pd.DataFrame] = None,
zone_temp_init: np.ndarray = None,
max_episode_steps: int = None,
rescale_spaces: bool = True,
**kwargs
):
super().__init__(name=name)
self.rescale_spaces = rescale_spaces
self.num_zones = 5
self.obs_config = obs_config if obs_config is not None else defaults.obs_config
# Set the initial zone temperature profile.
if zone_temp_init is not None:
self.zone_temp_init = zone_temp_init.copy()
else:
self.zone_temp_init = 27. * np.ones(self.num_zones, dtype=np.float64)
# Load exogenous and model data.
self.df, self.models = load_data(start_time, end_time)
# Configure max episode steps.
max_steps = self.df.shape[0] - 3 # due to filter update
if max_episode_steps is None:
self.max_episode_steps = max_steps
else:
self.max_episode_steps = min(max_episode_steps, max_steps)
# The default range on comfort bounds are (lowest of low, highest of high)
self.comfort_bounds = comfort_bounds if comfort_bounds is not None \
else DEFAULT_COMFORT_BOUNDS
# Action space: [zone_flows] + [discharge temp]
self.act_low = np.array(MIN_FLOW_RATE + [MIN_DISCHARGE_TEMP])
self.act_high = np.array(MAX_FLOW_RATE + [MAX_DISCHARGE_TEMP])
self._action_space = gym.spaces.Box(
low=self.act_low,
high=self.act_high,
dtype=np.float64
)
self.action_space = maybe_rescale_box_space(
self._action_space, rescale=self.rescale_spaces)
# State space is configured via obs_config.
self.comfort_bounds_df = self.make_comfort_bounds_df()
self._observation_space, self._obs_labels = make_obs_space(
self.num_zones, self.obs_config)
self.observation_space = maybe_rescale_box_space(
self._observation_space, rescale=self.rescale_spaces)
def make_comfort_bounds_df(self) -> pd.DataFrame:
"""Returns a dataframe containing upper and lower comfort bounds on the
zone temperatures."""
data = np.zeros((self.df.shape[0], 2))
if isinstance(self.comfort_bounds, tuple):
data[:, 0], data[:, 1] = self.comfort_bounds[0], self.comfort_bounds[1]
else:
data[:, 0] = self.comfort_bounds[:data.shape[0], 0]
data[:, 1] = self.comfort_bounds[:data.shape[0], 1]
return | pd.DataFrame(data, columns=["temp_lb", "temp_ub"], index=self.df.index) | pandas.DataFrame |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.prefillwithselection'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')},
{u('name'): u('accessibility.typeaheadfind'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.casesensitive'), u('value'): 0},
{u('name'): u('accessibility.warn_on_browsewithcaret'), u('value'): True},
{u('name'): u('accessibility.usetexttospeech'), u('value'): u('')},
{u('name'): u('accessibility.accesskeycausesactivation'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.linksonly'), u('value'): False},
{u('name'): u('isInstantiated'), u('value'): True}],
u('extensions'): [{u('id'): u('216ee7f7f4a5b8175374cd62150664efe2433a31'),
u('isEnabled'): True},
{u('id'): u('1aa53d3b720800c43c4ced5740a6e82bb0b3813e'), u('isEnabled'): False},
{u('id'): u('01ecfac5a7bd8c9e27b7c5499e71c2d285084b37'), u('isEnabled'): True},
{u('id'): u('1c01f5b22371b70b312ace94785f7b0b87c3dfb2'), u('isEnabled'): True},
{u('id'): u('fb723781a2385055f7d024788b75e959ad8ea8c3'), u('isEnabled'): True}],
u('fxVersion'): u('9.0'),
u('location'): u('zh-CN'),
u('operatingSystem'): u('WINNT Windows NT 5.1'),
u('surveyAnswers'): u(''),
u('task_guid'): u('d69fbd15-2517-45b5-8a17-bb7354122a75'),
u('tpVersion'): u('1.2'),
u('updateChannel'): | u('beta') | pandas.compat.u |
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import yfinance as yf
from pandas_datareader import data as web
import datetime as dt
from empyrical import*
import quantstats as qs
from darts.models import*
from darts import TimeSeries
from darts.utils.missing_values import fill_missing_values
from darts.metrics import mape
import yahoo_fin.stock_info as si
from yahoofinancials import YahooFinancials
from pypfopt import EfficientFrontier, risk_models, expected_returns, HRPOpt, objective_functions
import logging
import warnings
from warnings import filterwarnings
from IPython.display import display
import copy
# ------------------------------------------------------------------------------------------
today = dt.date.today()
# ------------------------------------------------------------------------------------------
class Engine:
def __init__(self,start_date, portfolio, weights=None, rebalance=None, benchmark=['SPY'], end_date=today, optimizer=None, max_vol=0.15):
self.start_date = start_date
self.end_date = end_date
self.portfolio = portfolio
self.weights = weights
self.benchmark = benchmark
self.optimizer = optimizer
self.max_vol = max_vol
self.rebalance = rebalance
if self.weights==None:
self.weights = [1.0/len(self.portfolio)]*len(self.portfolio)
if self.optimizer=="EF":
self.weights = efficient_frontier(self, perf="False")
if self.optimizer=="MV":
self.weights = mean_var(self, vol_max=max_vol, perf="False")
if self.optimizer=="HRP":
self.weights = hrp(self, perf="False")
if self.rebalance!=None:
self.rebalance = make_rebalance(self.start_date, self.end_date, self.optimizer, self.portfolio, self.rebalance)
#-------------------------------------------------------------------------------------------
def get_returns(stocks,wts, start_date, end_date=today):
if len(stocks) > 1:
assets = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)['Adj Close']
ret_data = assets.pct_change()[1:]
returns = (ret_data * wts).sum(axis = 1)
return returns
else:
df = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)['Adj Close']
df = pd.DataFrame(df)
returns = df.pct_change()
return returns
# ------------------------------------------------------------------------------------------
def get_pricing(stocks, start_date, end_date=today, pricing="Adj Close", wts=1):
if len(stocks) > 1:
assets = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)[pricing]
return assets
else:
df = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)[pricing]
return df
# ------------------------------------------------------------------------------------------
def get_data(stocks, period="max", trading_year_days=252):
p = {"period": period}
for stock in stocks:
years = {
'1mo' : math.ceil(trading_year_days/12),
'3mo' : math.ceil(trading_year_days/4),
'6mo' : math.ceil(trading_year_days/2),
'1y': trading_year_days,
'2y' : 2*trading_year_days,
'5y' : 5*trading_year_days,
'10y' : 10*trading_year_days,
'20y' : 20*trading_year_days,
'max' : len(yf.Ticker(stock).history(**p)['Close'].pct_change())
}
df = web.DataReader(stocks, data_source='yahoo', start = "1980-01-01", end= today)
df = pd.DataFrame(df)
df = df.tail(years[period])
df = pd.DataFrame(df)
df = df.drop(['Adj Close'], axis=1)
df = df[["Open", "High", "Low", "Close", "Volume"]]
return df
# ------------------------------------------------------------------------------------------
#reformat
def creturns(stocks,wts=1, period="max", benchmark= None, plot=True, pricing="Adj Close", trading_year_days=252, end_date = today):
p = {"period": period}
for stock in stocks:
years = {
'1mo' : math.ceil(trading_year_days/12),
'3mo' : math.ceil(trading_year_days/4),
'6mo' : math.ceil(trading_year_days/2),
'1y': trading_year_days,
'2y' : 2*trading_year_days,
'5y' : 5*trading_year_days,
'10y' : 10*trading_year_days,
'20y' : 20*trading_year_days,
'max' : len(yf.Ticker(stock).history(**p)['Close'].pct_change())
}
if len(stocks) > 1:
df = web.DataReader(stocks, data_source='yahoo', start = "1980-01-01", end= end_date)[pricing]
if benchmark != None:
df2 = web.DataReader(benchmark, data_source='yahoo', start = "1980-01-01", end= end_date)[pricing]
df = pd.DataFrame(df)
df = df.tail(years[period])
df2 = df2.tail(years[period])
return_df2 = df2.pct_change()[1:]
ret_data = df.pct_change()[1:]
ret_data = (ret_data + 1).cumprod()
port_ret = (ret_data * wts).sum(axis = 1)
return_df2 = (return_df2 + 1).cumprod()
ret_data['Portfolio'] = port_ret
ret_data['Benchmark'] = return_df2
ret_data = pd.DataFrame(ret_data)
else:
df = pd.DataFrame(df)
df = df.tail(years[period])
ret_data = df.pct_change()[1:]
ret_data = (ret_data + 1).cumprod()
port_ret = (ret_data * wts).sum(axis = 1)
ret_data['Portfolio'] = port_ret
ret_data = pd.DataFrame(ret_data)
if plot==True:
ret_data.plot(figsize=(20,10))
plt.xlabel('Date')
plt.ylabel('Returns')
plt.title(period + 'Portfolio Cumulative Returns')
else:
return ret_data
else:
df = web.DataReader(stocks, data_source='yahoo', start = "1980-01-01", end= today)[pricing]
if benchmark != None:
df2 = web.DataReader(benchmark, data_source='yahoo', start = "1980-01-01", end= today)[pricing]
return_df2 = df2.pct_change()[1:]
df = pd.DataFrame(df)
df = df.tail(years[period])
df2 = df2.tail(years[period])
return_df2 = df2.pct_change()[1:]
returns = df.pct_change()
returns = (returns + 1).cumprod()
return_df2 = (return_df2 + 1).cumprod()
returns["benchmark"] = return_df2
returns = pd.DataFrame(returns)
else:
df = pd.DataFrame(df)
df = df.tail(years[period])
returns = df.pct_change()
returns = (returns + 1).cumprod()
returns = pd.DataFrame(returns)
if plot==True:
returns.plot(figsize=(20,10))
plt.axvline(x=1)
plt.xlabel('Date')
plt.ylabel('Returns')
plt.title(stocks[0] +' Cumulative Returns (Period : '+ period+')')
else:
return returns
# ------------------------------------------------------------------------------------------
def information_ratio(returns, benchmark_returns, days=252):
return_difference = returns - benchmark_returns
volatility = return_difference.std() * np.sqrt(days)
information_ratio = return_difference.mean() / volatility
return information_ratio
def graph_allocation(my_portfolio):
fig1, ax1 = plt.subplots()
ax1.pie(my_portfolio.weights, labels=my_portfolio.portfolio, autopct='%1.1f%%',
shadow=False)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Portfolio's allocation")
plt.show()
#------------------------------------------------------------------------------------------------------------------------------------------------------
#initialize a variable that we be set to false
def empyrial(my_portfolio, rf=0.0, sigma_value=1, confidence_value=0.95, rebalance=False):
#standard emyrial output
if rebalance == False:
#standard returns calculation
returns = get_returns(my_portfolio.portfolio, my_portfolio.weights, start_date=my_portfolio.start_date,end_date=my_portfolio.end_date)
#when we want to do the rebalancing
if rebalance == True:
print("")
print('rebalance hit')
#we want to get the dataframe with the dates and weights
rebalance_schedule = my_portfolio.rebalance
#then want to make a list of the dates and start with our first date
dates = [my_portfolio.start_date]
#then our rebalancing dates into that list
dates = dates + rebalance_schedule.columns.to_list()
#this will hold returns
returns = | pd.Series() | pandas.Series |
from itertools import product
import pandas as pd
from sklearn.datasets import load_boston
from vivid.core import AbstractFeature
from vivid.out_of_fold import EnsembleFeature
from vivid.out_of_fold.boosting import XGBoostRegressorOutOfFold, OptunaXGBRegressionOutOfFold, LGBMRegressorOutOfFold
from vivid.out_of_fold.boosting.block import create_boosting_seed_blocks
from vivid.out_of_fold.ensumble import RFRegressorFeatureOutOfFold
from vivid.out_of_fold.kneighbor import KNeighborRegressorOutOfFold
from vivid.out_of_fold.linear import RidgeOutOfFold
class BostonProcessFeature(AbstractFeature):
def call(self, df_source: pd.DataFrame, y=None, test=False):
out_df = | pd.DataFrame() | pandas.DataFrame |
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_to_hdf_errors(format, setup_path):
data = ["\ud800foo"]
ser = Series(data, index=Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_create_table_index(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append("f2", df, index=["string"], data_columns=["string", "string2"])
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
msg = "cannot create table index on a Fixed format store"
with pytest.raises(TypeError, match=msg):
store.create_table_index("f2")
def test_create_table_index_data_columns_argument(setup_path):
# GH 28156
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
msg = "'Cols' object has no attribute 'string2'"
with pytest.raises(AttributeError, match=msg):
col("f", "string2").is_indexed
# try to index a col which isn't a data_column
msg = (
"column string2 is not a data_column.\n"
"In order to read column string2 you must reload the dataframe \n"
"into HDFStore and include string2 with the data_columns argument."
)
with pytest.raises(AttributeError, match=msg):
store.create_table_index("f", columns=["string2"])
def test_mi_data_columns(setup_path):
# GH 14435
idx = MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_table_mixed_dtypes(setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_calendar_roundtrip_issue(setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_remove(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_same_name_scoping(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(20, 2), index=date_range("20130101", periods=20))
store.put("df", df, format="table")
expected = df[df.index > Timestamp("20130105")]
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
# changes what 'datetime' points to in the namespace where
# 'select' does the lookup
from datetime import datetime # noqa:F401
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_store_index_name(setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(table_format, setup_path):
# GH #13492
idx = Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@pytest.mark.filterwarnings("ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning")
def test_overwrite_node(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_coordinates(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame({"A": range(5), "B": range(5)})
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected, check_freq=False)
# FIXME: 2021-01-18 on some (mostly windows) builds we get freq=None
# but expect freq="18B"
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
msg = (
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_start_stop_table(setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
{"A": np.random.rand(20), "B": np.random.rand(20)},
index=date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = [f"{c:3d}" for c in df.index]
df.columns = [f"{c:3d}" for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib():
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(start, stop, setup_path):
# GH 17021
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore():
df = tm.makeDataFrame()
def writer(path):
with HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with HDFStore(path) as store:
return read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath():
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore():
df = tm.makeDataFrame()
def writer(path):
with HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with HDFStore(path) as store:
return | read_hdf(store, "df") | pandas.io.pytables.read_hdf |
"""
Functions for converting object to other types
"""
import numpy as np
import pandas as pd
from pandas.core.common import (_possibly_cast_to_datetime, is_object_dtype,
isnull)
import pandas.lib as lib
# TODO: Remove in 0.18 or 2017, which ever is sooner
def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True,
convert_timedeltas=True, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = _possibly_cast_to_datetime(values, 'M8[ns]',
errors='coerce')
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(values,
convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.tseries.timedeltas import to_timedelta
new_values = to_timedelta(values, coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
except:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
values = values.copy() if copy else values
return values
def _soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
coerce=False, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError('At least one of datetime, numeric or timedelta must '
'be True.')
elif conversion_count > 1 and coerce:
raise ValueError("Only one of 'datetime', 'numeric' or "
"'timedelta' can be True when when coerce=True.")
if isinstance(values, (list, tuple)):
# List or scalar
values = np.array(values, dtype=np.object_)
elif not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
elif not is_object_dtype(values.dtype):
# If not object, do not attempt conversion
values = values.copy() if copy else values
return values
# If 1 flag is coerce, ensure 2 others are False
if coerce:
# Immediate return if coerce
if datetime:
return | pd.to_datetime(values, errors='coerce', box=False) | pandas.to_datetime |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = | DataFrame({"cat": cat, "ser": ser}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import warnings
warnings.filterwarnings('ignore')
import pandas as pd, numpy as np
import math, json, gc, random, os, sys
import torch
import logging
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from sklearn.model_selection import train_test_split
from catalyst.dl import SupervisedRunner
from catalyst.contrib.dl.callbacks import WandbLogger
from contextlib import contextmanager
from catalyst.dl.callbacks import AccuracyCallback, F1ScoreCallback, OptimizerCallback
#from pytorch_memlab import profile, MemReporter
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# In[2]:
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) # type: ignore
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = True # type: ignore
# In[3]:
set_seed(2020)
# In[4]:
test = pd.read_json('/kaggle/input/stanford-covid-vaccine/test.json', lines=True)
samplesub= | pd.read_csv('/kaggle/input/stanford-covid-vaccine/sample_submission.csv') | pandas.read_csv |
import copy
import io
import json
import os
import string
from collections import OrderedDict
from datetime import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
import pytz
from hypothesis import (
given,
settings,
)
from hypothesis.strategies import (
datetimes,
integers,
fixed_dictionaries,
floats,
just,
lists,
sampled_from,
text,
)
from pandas.testing import assert_frame_equal
from tempfile import NamedTemporaryFile
from oasislmf.utils.data import (
factorize_array,
factorize_ndarray,
fast_zip_arrays,
get_dataframe,
get_timestamp,
get_utctimestamp,
get_location_df,
)
from oasislmf.utils.defaults import (
get_loc_dtypes,
)
from oasislmf.utils.exceptions import OasisException
def arrays_are_identical(expected, result):
try:
np.testing.assert_array_equal(expected, result)
except AssertionError:
raise
return True
class TestFactorizeArrays(TestCase):
@settings(max_examples=10)
@given(
num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)),
str_len=integers(min_value=2, max_value=100),
num_strs=integers(min_value=10, max_value=100)
)
def test_factorize_1darray(self, num_chars, str_len, num_strs):
alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars)
strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(num_strs)]
expected_groups = list(OrderedDict({s: s for s in strings}))
expected_enum = np.array([expected_groups.index(s) + 1 for s in strings])
result_enum, result_groups = factorize_array(strings)
self.assertTrue(arrays_are_identical(expected_groups, result_groups))
self.assertTrue(arrays_are_identical(expected_enum, result_enum))
@settings(max_examples=1)
@given(
num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)),
str_len=integers(min_value=2, max_value=100),
rows=integers(min_value=10, max_value=100),
cols=integers(min_value=10, max_value=100)
)
def test_factorize_ndarray__no_row_or_col_indices_provided__raises_oasis_exception(self, num_chars, str_len, rows, cols):
alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars)
strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(rows * cols)]
ndarr = np.random.choice(strings, (rows, cols))
with self.assertRaises(OasisException):
factorize_ndarray(ndarr)
@settings(max_examples=10, deadline=None)
@given(
num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)),
str_len=integers(min_value=2, max_value=100),
rows=integers(min_value=10, max_value=100),
cols=integers(min_value=10, max_value=100),
num_row_idxs=integers(min_value=2, max_value=10)
)
def test_factorize_ndarray__by_row_idxs(self, num_chars, str_len, rows, cols, num_row_idxs):
alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars)
strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(rows * cols)]
ndarr = np.random.choice(strings, (rows, cols))
row_idxs = np.random.choice(range(rows), num_row_idxs, replace=False).tolist()
zipped = list(zip(*(ndarr[i, :] for i in row_idxs)))
groups = list(OrderedDict({x: x for x in zipped}))
expected_groups = np.empty(len(groups), dtype=object)
expected_groups[:] = groups
expected_enum = np.array([groups.index(x) + 1 for x in zipped])
result_enum, result_groups = factorize_ndarray(ndarr, row_idxs=row_idxs)
self.assertTrue(arrays_are_identical(expected_groups, result_groups))
self.assertTrue(arrays_are_identical(expected_enum, result_enum))
@settings(max_examples=10, deadline=None)
@given(
num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)),
str_len=integers(min_value=2, max_value=100),
rows=integers(min_value=10, max_value=100),
cols=integers(min_value=10, max_value=100),
num_col_idxs=integers(min_value=2, max_value=10)
)
def test_factorize_ndarray__by_col_idxs(self, num_chars, str_len, rows, cols, num_col_idxs):
alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars)
strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(rows * cols)]
ndarr = np.random.choice(strings, (rows, cols))
col_idxs = np.random.choice(range(cols), num_col_idxs, replace=False).tolist()
zipped = list(zip(*(ndarr[:, i] for i in col_idxs)))
groups = list(OrderedDict({x: x for x in zipped}))
expected_groups = np.empty(len(groups), dtype=object)
expected_groups[:] = groups
expected_enum = np.array([groups.index(x) + 1 for x in zipped])
result_enum, result_groups = factorize_ndarray(ndarr, col_idxs=col_idxs)
self.assertTrue(arrays_are_identical(expected_groups, result_groups))
self.assertTrue(arrays_are_identical(expected_enum, result_enum))
class TestFastZipArrays(TestCase):
@settings(max_examples=10)
@given(
array_len=integers(min_value=10, max_value=100),
num_arrays=integers(2, 100)
)
def test_fast_zip_arrays(self, array_len, num_arrays):
arrays = np.random.randint(1, 10**6, (num_arrays, array_len))
li = list(zip(*arrays))
zipped = np.empty(len(li), dtype=object)
zipped[:] = li
result = fast_zip_arrays(*arrays)
self.assertTrue(arrays_are_identical(zipped, result))
def dataframes_are_identical(df1, df2):
try:
assert_frame_equal(df1, df2)
except AssertionError:
return False
return True
class TestGetDataframe(TestCase):
def test_get_dataframe__no_src_fp_or_buf_or_data_provided__oasis_exception_is_raised(self):
with self.assertRaises(OasisException):
get_dataframe(src_fp=None, src_buf=None, src_data=None)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file__use_default_options(self, data):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
result = get_dataframe(src_fp=fp.name)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__use_default_options(self, data):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
expected.columns = expected.columns.str.lower()
result = get_dataframe(src_fp=fp.name)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
dtypes=fixed_dictionaries({
'int_col': sampled_from(['int32', 'int64']),
'float_col': sampled_from(['float32', 'float64'])
})
)
def test_get_dataframe__from_csv_file__set_col_dtypes_option_and_use_defaults_for_all_other_options(self, data, dtypes):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
for col, dtype in dtypes.items():
df[col] = df[col].astype(dtype)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = pd.read_csv(fp.name, dtype=dtypes)
result = get_dataframe(src_fp=fp.name, col_dtypes=dtypes)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'INT_COL': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
dtypes=fixed_dictionaries({
'INT_COL': sampled_from(['int32', 'int64']),
'FloatCol': sampled_from(['float32', 'float64'])
})
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_col_dtypes_option_and_use_defaults_for_all_other_options(self, data, dtypes):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
for col, dtype in dtypes.items():
df[col] = df[col].astype(dtype)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = pd.read_csv(fp.name, dtype=dtypes)
expected.columns = expected.columns.str.lower()
result = get_dataframe(src_fp=fp.name, col_dtypes=dtypes)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(empty_data_err_msg=text(min_size=1, max_size=10, alphabet=string.ascii_lowercase))
def test_get_dataframe__from_empty_csv_file__set_empty_data_err_msg_and_defaults_for_all_other_options__oasis_exception_is_raised_with_empty_data_err_msg(self, empty_data_err_msg):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame()
df.to_csv(path_or_buf=fp)
fp.close()
with self.assertRaises(OasisException):
try:
get_dataframe(src_fp=fp.name, empty_data_error_msg=empty_data_err_msg)
except OasisException as e:
self.assertEqual(str(e), empty_data_err_msg)
raise e
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
required=just(
np.random.choice(
['str_col', 'int_col', 'float_col', 'bool_col', 'null_col'],
np.random.choice(range(1, 6)),
replace=False
).tolist()
)
)
def test_get_dataframe__from_csv_file__set_required_cols_option_and_use_defaults_for_all_other_options(self, data, required):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
result = get_dataframe(
src_fp=fp.name,
required_cols=required
)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
required=just(
np.random.choice(
['STR_COL', 'int_col', 'FloatCol', 'boolCol', 'null_col'],
np.random.choice(range(1, 6)),
replace=False
).tolist()
)
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_required_cols_option_and_use_defaults_for_all_other_options(self, data, required):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
expected.columns = expected.columns.str.lower()
result = get_dataframe(
src_fp=fp.name,
required_cols=required
)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
missing_cols=just(
np.random.choice(
['str_col', 'int_col', 'float_col', 'bool_col', 'null_col'],
np.random.choice(range(1, 5)),
replace=False
).tolist()
)
)
def test_get_dataframe__from_csv_file_missing_some_required_cols__set_required_cols_option_and_use_defaults_for_all_other_options__oasis_exception_is_raised(self, data, missing_cols):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
df.drop(missing_cols, axis=1).to_csv(path_or_buf=fp, encoding='utf-8', index=False)
fp.close()
with self.assertRaises(OasisException):
get_dataframe(
src_fp=fp.name,
required_cols=df.columns.tolist()
)
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
missing=just(
np.random.choice(
['STR_COL', 'int_col', 'FloatCol', 'boolCol', 'null_col'],
np.random.choice(range(1, 5)),
replace=False
).tolist()
)
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols_and_missing_some_required_cols__set_required_cols_option_and_use_defaults_for_all_other_options__oasis_exception_is_raised(self, data, missing):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
df.drop(missing, axis=1).to_csv(path_or_buf=fp, encoding='utf-8', index=False)
fp.close()
with self.assertRaises(OasisException):
get_dataframe(
src_fp=fp.name,
required_cols=df.columns.tolist()
)
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
defaults=fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_uppercase),
'int_col': integers(min_value=0, max_value=10),
'float_col': floats(min_value=1.0, allow_infinity=False)
})
)
def test_get_dataframe__from_csv_file__set_col_defaults_option_and_use_defaults_for_all_other_options(self, data, defaults):
fp = NamedTemporaryFile("w", delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
for col, default in defaults.items():
expected.loc[:, col].fillna(defaults[col], inplace=True)
result = get_dataframe(src_fp=fp.name, col_defaults=defaults)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
defaults=fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_uppercase),
'int_col': integers(min_value=0, max_value=10),
'FloatCol': floats(min_value=1.0, allow_infinity=False)
})
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_col_defaults_option_and_use_defaults_for_all_other_options(self, data, defaults):
fp = NamedTemporaryFile("w", delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
expected.columns = expected.columns.str.lower()
for col, default in defaults.items():
expected.loc[:, col.lower()].fillna(defaults[col], inplace=True)
result = get_dataframe(src_fp=fp.name, col_defaults=defaults)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=10, max_size=15, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False])
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file_with_nulls_in_some_columns__set_non_na_cols_option_and_use_defaults_for_all_other_options(self, data):
fp = NamedTemporaryFile('w', delete=False)
try:
data[-1]['int_col'] = np.nan
data[-2]['str_col'] = np.nan
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
non_na_cols = ['int_col', 'str_col']
expected = df.dropna(subset=non_na_cols, axis=0)
result = get_dataframe(src_fp=fp.name, non_na_cols=non_na_cols)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False])
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols_and_nulls_in_some_columns__set_non_na_cols_option_and_use_defaults_for_all_other_options(self, data):
fp = NamedTemporaryFile("w", delete=False)
try:
data[-1]['int_col'] = np.nan
data[-2]['STR_COL'] = np.nan
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
non_na_cols = ['int_col', 'STR_COL']
expected = df.dropna(subset=non_na_cols, axis=0)
expected.columns = expected.columns.str.lower()
result = get_dataframe(src_fp=fp.name, non_na_cols=non_na_cols)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file__set_sort_cols_option_on_single_col_and_use_defaults_for_all_other_options(self, data):
fp = NamedTemporaryFile("w", delete=False)
try:
data = [{k: (v if k != 'int_col' else np.random.choice(range(10))) for k, v in it.items()} for it in data]
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
sort_cols = ['int_col']
expected = df.sort_values(sort_cols, axis=0)
result = get_dataframe(src_fp=fp.name, sort_cols=sort_cols)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'IntCol': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_sort_cols_option_on_single_col_and_use_defaults_for_all_other_options(self, data):
fp = NamedTemporaryFile("w", delete=False)
try:
data = [{k: (v if k != 'IntCol' else np.random.choice(range(10))) for k, v in it.items()} for it in data]
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
sort_cols = ['IntCol']
expected = df.sort_values(sort_cols, axis=0)
expected.columns = expected.columns.str.lower()
result = get_dataframe(src_fp=fp.name, sort_cols=sort_cols)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file__set_sort_cols_option_on_two_cols_and_use_defaults_for_all_other_options(self, data):
fp = NamedTemporaryFile("w", delete=False)
try:
data = [
{k: (v if k not in ('int_col', 'str_col') else (np.random.choice(range(10)) if k == 'int_col' else np.random.choice(list(string.ascii_lowercase)))) for k, v in it.items()}
for it in data
]
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
sort_cols = ['int_col', 'str_col']
expected = df.sort_values(sort_cols, axis=0)
result = get_dataframe(src_fp=fp.name, sort_cols=sort_cols)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'IntCol': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_sort_cols_option_on_two_cols_and_use_defaults_for_all_other_options(self, data):
fp = NamedTemporaryFile("w", delete=False)
try:
data = [
{k: (v if k not in ('IntCol', 'STR_COL') else (np.random.choice(range(10)) if k == 'IntCol' else np.random.choice(list(string.ascii_lowercase)))) for k, v in it.items()}
for it in data
]
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
sort_cols = ['IntCol', 'STR_COL']
expected = df.sort_values(sort_cols, axis=0)
expected.columns = expected.columns.str.lower()
result = get_dataframe(src_fp=fp.name, sort_cols=sort_cols)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=0, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
required=just(
np.random.choice(
['str_col', 'int_col', 'float_col', 'bool_col'],
np.random.choice(range(1, 5)),
replace=False
).tolist()
),
defaults=fixed_dictionaries({
'str_col': just('s'),
'int_col': just(1),
'float_col': just(1.0),
'bool_col': just(False)
})
)
def test_get_dataframe__from_csv_file__set_required_cols_and_col_defaults_options_and_use_defaults_for_all_other_options(self, data, required, defaults):
fp = NamedTemporaryFile("w", delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
for col, default in defaults.items():
expected.loc[:, col].fillna(defaults[col], inplace=True)
result = get_dataframe(src_fp=fp.name, required_cols=required, col_defaults=defaults)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'IntCol': integers(min_value=0, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
required=just(
np.random.choice(
['STR_COL', 'IntCol', 'float_col', 'boolCol'],
np.random.choice(range(1, 5)),
replace=False
).tolist()
),
defaults=fixed_dictionaries({
'STR_COL': just('s'),
'IntCol': just(1),
'float_col': just(1.0),
'boolCol': just(False)
})
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_required_cols_and_col_defaults_options_and_use_defaults_for_all_other_options(self, data, required, defaults):
fp = NamedTemporaryFile("w", delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
for col, default in defaults.items():
expected.loc[:, col].fillna(defaults[col], inplace=True)
expected.columns = expected.columns.str.lower()
result = get_dataframe(src_fp=fp.name, required_cols=required, col_defaults=defaults)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=0, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
missing=just(
np.random.choice(
['str_col', 'int_col', 'float_col', 'bool_col', 'null_col'],
np.random.choice(range(1, 5)),
replace=False
).tolist()
),
defaults=fixed_dictionaries({
'str_col': just('s'),
'int_col': just(1),
'float_col': just(1.0),
'bool_col': just(False)
})
)
def test_get_dataframe__from_csv_file_missing_some_required_cols__set_required_cols_and_col_defaults_options_and_use_defaults_for_all_other_options__oasis_exception_is_raised(self, data, missing, defaults):
fp = NamedTemporaryFile("w", delete=False)
try:
df = pd.DataFrame(data)
df.drop(missing, axis=1).to_csv(path_or_buf=fp, encoding='utf-8', index=False)
fp.close()
with self.assertRaises(OasisException):
get_dataframe(src_fp=fp.name, required_cols=list(df.columns), col_defaults=defaults)
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=0, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
missing=just(
np.random.choice(
['STR_COL', 'int_col', 'FloatCol', 'boolCol', 'null_col'],
np.random.choice(range(1, 5)),
replace=False
).tolist()
),
defaults=fixed_dictionaries({
'STR_COL': just('s'),
'int_col': just(1),
'FloatCol': just(1.0),
'boolCol': just(False)
})
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols_and_missing_some_required_cols__set_required_cols_and_col_defaults_options_and_use_defaults_for_all_other_options__oasis_exception_is_raised(self, data, missing, defaults):
fp = NamedTemporaryFile("w", delete=False)
try:
df = pd.DataFrame(data)
df.drop(missing, axis=1).to_csv(path_or_buf=fp, encoding='utf-8', index=False)
fp.close()
with self.assertRaises(OasisException):
get_dataframe(src_fp=fp.name, required_cols=list(df.columns), col_defaults=defaults)
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'Float_Col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'NullCol': just(np.nan)
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file_with_mixed_case_columns___set_lowercase_cols_option_to_false_and_use_defaults_for_all_other_options(self, data):
fp = NamedTemporaryFile("w", delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
result = get_dataframe(src_fp=fp.name, lowercase_cols=False)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'Int_Col': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'NullCol': just(np.nan)
}),
min_size=10,
max_size=10
),
dtypes=fixed_dictionaries({
'Int_Col': sampled_from(['int32', 'int64']),
'FloatCol': sampled_from(['float32', 'float64'])
})
)
def test_get_dataframe__from_csv_file_with_mixed_case_columns__set_lowercase_col_option_to_false_and_col_dtypes_option_and_use_defaults_for_all_other_options(self, data, dtypes):
fp = NamedTemporaryFile("w", delete=False)
try:
df = pd.DataFrame(data)
for col, dtype in dtypes.items():
df[col] = df[col].astype(dtype)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = | pd.read_csv(fp.name, dtype=dtypes) | pandas.read_csv |
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
import typing
import sys
import sklearn.cluster as sc
try :
from numba import jit
bUseNumba = True
except ImportError :
print ( "ImportError:"," NUMBA. WILL NOT USE IT")
bUseNumba = False
except OSError:
print ( "OSError:"," NUMBA. WILL NOT USE IT")
bUseNumba = False
# THE FOLLOWING KMEANS ALGORITHM IS THE AUTHOR OWN LOCAL VERSION
if bUseNumba :
@jit(nopython=True)
def seeded_kmeans( dat, cent ):
#
# PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# AROUND LINE 2345
# AGAIN CONSIDER USING THE C++ VERSION SINCE IT IS ALOT FASTER
# HERE WE SPEED IT UP USING NUMBA IF THE USER HAS IT INSTALLED AS A MODULE
#
NN , MM = np.shape ( dat )
KK , LL = np.shape ( cent )
if not LL == MM :
print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )
labels = [ int(z) for z in np.zeros(NN) ]
w = labels
counts = np.zeros(KK)
tmp_ce = np.zeros(KK*MM).reshape(KK,MM)
old_error , error , TOL = 0. , 1. , 1.0E-10
while abs ( error - old_error ) > TOL :
old_error = error
error = 0.
counts = counts * 0.
tmp_ce = tmp_ce * 0.
# START BC
for h in range ( NN ) :
min_distance = 1.0E30
for i in range ( KK ) :
distance = np.sum( ( dat[h]-cent[i] )**2 )
if distance < min_distance :
labels[h] = i
min_distance = distance
tmp_ce[labels[h]] += dat[ h ]
counts[labels[h]] += 1.0
error += min_distance
# END BC
for i in range ( KK ) :
if counts[i]>0:
cent[i] = tmp_ce[i]/counts[i]
centroids = cent
return ( labels, centroids )
else :
def seeded_kmeans( dat, cent ):
#
# SLOW SLUGGISH KMEANS WITH A DUBBLE FOR LOOP
# IN PYTHON! WOW! SUCH SPEED!
#
NN , MM = np.shape ( dat )
KK , LL = np.shape ( cent )
if not LL == MM :
print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )
labels = [ int(z) for z in np.zeros(NN) ]
w = labels
counts = np.zeros(KK)
tmp_ce = np.zeros(KK*MM).reshape(KK,MM)
old_error , error , TOL = 0. , 1. , 1.0E-10
while abs ( error - old_error ) > TOL :
old_error = error
error = 0.
counts = counts * 0.
tmp_ce = tmp_ce * 0.
# START BC
for h in range ( NN ) :
min_distance = 1.0E30
for i in range ( KK ) :
distance = np.sum( ( dat[h]-cent[i] )**2 )
if distance < min_distance :
labels[h] = i
min_distance = distance
tmp_ce[labels[h]] += dat[ h ]
counts[labels[h]] += 1.0
error += min_distance
# END BC
for i in range ( KK ) :
if counts[i]>0:
cent[i] = tmp_ce[i]/counts[i]
centroids = cent
return ( labels, centroids )
from scipy.spatial.distance import squareform , pdist
absolute_coordinates_to_distance_matrix = lambda Q:squareform(pdist(Q))
distance_matrix_to_geometry_conversion_notes = """
*) TAKE NOTE THAT THE OLD ALGORITHM CALLED DISTANCE GEOMETRY EXISTS. IT CAN BE EMPLOYED TO ANY DIMENSIONAL DATA. HERE YOU FIND A SVD BASED ANALOG OF THAT OLD METHOD.
*) PDIST REALLY LIKES TO COMPUTE SQUARE ROOT OF THINGS SO WE SQUARE THE RESULT IF IT IS NOT SQUARED.
*) THE DISTANCE MATRIX CONVERSION ROUTINE BACK TO ABSOLUTE COORDINATES USES R2 DISTANCES.
"""
if bUseNumba :
@jit(nopython=True)
def distance_matrix_to_absolute_coordinates ( D , bSquared = False, n_dimensions=2 ):
# C++ https://github.com/richardtjornhammar/RichTools/commit/be0c4dfa8f61915b0701561e39ca906a9a2e0bae
if not bSquared :
D = D**2.
DIM = n_dimensions
DIJ = D*0.
M = len(D)
for i in range(M) :
for j in range(M) :
DIJ[i,j] = 0.5* (D[i,-1]+D[j,-1]-D[i,j])
D = DIJ
U,S,Vt = np.linalg.svd ( D , full_matrices = True )
S[DIM:] *= 0.
Z = np.diag(S**0.5)[:,:DIM]
xr = np.dot( Z.T,Vt )
return ( xr )
else :
def distance_matrix_to_absolute_coordinates ( D , bSquared = False, n_dimensions=2 ):
# C++ https://github.com/richardtjornhammar/RichTools/commit/be0c4dfa8f61915b0701561e39ca906a9a2e0bae
if not bSquared :
D = D**2.
DIM = n_dimensions
DIJ = D*0.
M = len(D)
for i in range(M) :
for j in range(M) :
DIJ[i,j] = 0.5* (D[i,-1]+D[j,-1]-D[i,j])
D = DIJ
U,S,Vt = np.linalg.svd ( D , full_matrices = True )
S[DIM:] *= 0.
Z = np.diag(S**0.5)[:,:DIM]
xr = np.dot( Z.T,Vt )
return ( xr )
if bUseNumba :
@jit(nopython=True)
def connectivity ( B , val, bVerbose=False ) :
description = """ This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distance cutoff, you should see all the parts of the system and for a large distance cutoff, you should see the entire system. It has been employed for statistical analysis work as well as the original application where it was employed to segment molecular systems."""
if bVerbose :
print ( "CONNECTIVITY CLUSTERING OF ", np.shape(B), " MATRIX" )
# PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# AROUND LINE 2277
# CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS
# A LOT FASTER
# FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:
# https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf
#
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# ADDED TO RICHTOOLS HERE: https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
nr_sq,mr_sq = np.shape(B)
if nr_sq != mr_sq :
print ( 'ERROR: FAILED' )
N = mr_sq
res , nvisi, s, NN, ndx, C = [0], [0], [0], [0], [0], 0
res .append(0)
for i in range(N) :
nvisi.append(i+1)
res.append(0); res.append(0)
ndx.append(i)
res = res[1:]
nvisi = nvisi[1:]
ndx = ndx[1:]
while ( len(ndx)>0 ) :
i = ndx[-1] ; ndx = ndx[:-1]
NN = []
if ( nvisi[i]>0 ) :
C-=1
for j in range(N) :
if ( B[i,j]<=val ) :
NN.append(j)
while ( len(NN)>0 ) :
# back pop_back
k = NN[-1]; NN = NN[:-1]
nvisi[k] = C
for j in range(N):
if ( B[j,k]<=val ) :
for q in range(N) :
if ( nvisi[q] == j+1 ) :
NN.append(q)
if bVerbose : # VERBOSE
print ( "INFO "+str(-1*C) +" clusters" )
Nc = [ 0 for i in range(-1*C) ]
for q in range(N) :
res[ q*2+1 ] = q;
res[ q*2 ] = nvisi[q]-C;
Nc [res[q*2]]+= 1;
if bVerbose :
print ( " "+str(res[q*2])+" "+str(res[2*q+1]) )
if bVerbose:
for i in range(-1*C) :
print( "CLUSTER " +str(i)+ " HAS " + str(Nc[i]) + " ELEMENTS")
return ( Nc , np.array(res[:-1]).reshape(-1,2) )
else :
def connectivity ( B , val, bVerbose=False ) :
description="""
This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distanc>
"""
if bVerbose :
print ( "CONNECTIVITY CLUSTERING OF ", np.shape(B), " MATRIX" )
# PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# AROUND LINE 2277
# CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS
# A LOT FASTER
# FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:
# https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf
#
nr_sq,mr_sq = np.shape(B)
if nr_sq != mr_sq :
print ( 'ERROR' )
return ( -1 )
N = mr_sq
res , nvisi, s, NN, ndx, C = [], [], [], [], [], 0
res .append(0)
for i in range(N) :
nvisi.append(i+1)
res.append(0); res.append(0)
ndx.append(i)
while ( len(ndx)>0 ) :
i = ndx[-1] ; ndx = ndx[:-1]
NN = []
if ( nvisi[i]>0 ) :
C-=1
for j in range(N) :
if ( B[i,j]<=val ) :
NN.append(j)
while ( len(NN)>0 ) :
# back pop_back
k = NN[-1]; NN = NN[:-1]
nvisi[k] = C
for j in range(N):
if ( B[j,k]<=val ) :
for q in range(N) :
if ( nvisi[q] == j+1 ) :
NN.append(q)
if bVerbose : # VERBOSE
print ( "INFO "+str(-1*C) +" clusters" )
Nc = [ 0 for i in range(-1*C) ]
for q in range(N) :
res[ q*2+1 ] = q;
res[ q*2 ] = nvisi[q]-C;
Nc [res[q*2]]+= 1;
if bVerbose :
print ( " "+str(res[q*2])+" "+str(res[2*q+1]) )
if bVerbose:
for i in range(-1*C) :
print( "CLUSTER " +str(i)+ " HAS " + str(Nc[i]) + " ELEMENTS")
return ( Nc , np.array(res[:-1]).reshape(-1,2) )
if bUseNumba :
@jit(nopython=True)
def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :
#
# AN ALTERNATIVE METHOD
# DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY
# CLUSTERING MODULE (in src/impetuous/clustering.py )
# OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
# THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS
# WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER
#
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
def b2i ( a:list ) -> list :
return ( [ i for b,i in zip(a,range(len(a))) if b ] )
def f2i ( a:list,alf:float ) -> list :
return ( b2i( a<=alf ) )
L = []
for a in distm :
bAdd = True
ids = set( f2i(a,alpha) )
for i in range(len(L)) :
if len( L[i]&ids ) >= n_connections :
L[i] = L[i] | ids
bAdd = False
break
if bAdd and len(ids) >= n_connections :
L .append( ids )
return ( L )
else :
def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :
#
# AN ALTERNATIVE METHOD
# DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY
# CLUSTERING MODULE (in src/impetuous/clustering.py )
# OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# as of commit https://github.com/richardtjornhammar/RichTools/commit/76201bb07687017ae16a4e57cb1ed9fd8c394f18 2016
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
# THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS
# WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER
#
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
def b2i ( a:list ) -> list :
return ( [ i for b,i in zip(a,range(len(a))) if b ] )
def f2i ( a:list,alf:float ) -> list :
return ( b2i( a<=alf ) )
L = []
for a in distm :
bAdd = True
ids = set( f2i(a,alpha) )
for i in range(len(L)) :
if len( L[i]&ids ) >= n_connections :
L[i] = L[i] | ids
bAdd = False
break
if bAdd and len(ids) >= n_connections :
L .append( ids )
return ( L )
clustering_algorithm = None
clustering_algorithm = sc.KMeans(10) # CHOOSE SOMETHING YOU LIKE NOT THIS
class Cluster(object):
def __init__( self, nbins=50, nclusters=-1 , use_ranks = False ) :
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from numpy import histogram2d
from scipy.stats import rankdata
self.use_ranks = use_ranks
self.nclusters = nclusters
self.nbins = nbins
self.histogram2d = histogram2d
self.KMeans = KMeans
self.rankdata = rankdata
self.pca_f = PCA(2)
self.centroids_ = None
self.labels_ = None
self.df_ = None
self.num_index_ = None
self.components_ = None
def approximate_density_clustering( self, df, nbins=None ) :
#
# GENES APPROX 20K OK SO APPROX 50 BINS
# ANALYTES ON ROWS, SAMPLE POINTS ON COLUMNS
if nbins is None :
nbins = self.nbins
self.df_= df
frac_df = df
if self.use_ranks :
frac_df .apply( lambda x:self.rankdata( x , method='average' )/float(len(x)) )
self.pca_f.fit(frac_df.T.values)
self.components_ = self.pca_f.components_
vals,xe,ye = self.histogram2d(self.pca_f.components_[0],self.pca_f.components_[1],bins=nbins)
mvs, svsx, svsy = np.mean(vals),np.std(vals,0),np.std(vals,1)
svs = np.sqrt(svsx**2+svsy**2)
#
# IS THERE A DENSITY PEAK SEPARABLE FROM THE MEAN
# SHOULD DO GRADIENT REJECTION BASED ON TTEST PVALUES
hits = vals>mvs+0.5*svs
#
xe_,ye_ = 0.5*(xe[:1]+xe[1:]) , 0.5*(ye[:1]+ye[1:])
idx = np.where(hits); xi,yj = idx[0],idx[1]
centroids = [ (xe[ri],ye[rj]) for (ri,rj) in zip(xi,yj) ]
if self.nclusters == -1 :
self.nclusters = len ( centroids )
if self.nclusters < len ( centroids ) :
import heapq
from scipy.spatial import distance as distance_
a = distance_.cdist ( centroids, centroids, 'euclidean' )
cent_idx = heapq.nlargest ( self.nclusters, range(len(a)), a.reshape(-1).__getitem__ )
centroids = [ centroids[ idx ] for idx in cent_idx ]
kmeans = self.KMeans(len(centroids),init=np.array(centroids))
kmeans.fit(self.pca_f.components_.T)
centers = np.array(kmeans.cluster_centers_).T
self.labels_ = kmeans.labels_
self.centroids_ = centers
self.analyte_dict_ = { c:[] for c in self.labels_ }
[self.analyte_dict_[self.labels_[i]].append(df.index[i]) for i in range(len(self.labels_)) ]
return ( self.analyte_dict_ )
def write_gmt(self, filename = './cluster_file.gmt' ) :
with open(filename,'w') as of :
for k,v in self.analyte_dict_.items() :
print ( 'CLU-'+str(k),'\tDESCRIPTION\t'+'\t'.join(v), file=of )
class ManifoldClustering ( Cluster ) :
def __init__( self , nbins=50 ) :
from sklearn.cluster import KMeans
from sklearn.manifold import MDS, TSNE
from numpy import histogram2d
from scipy.stats import rankdata
self.nbins = nbins
self.histogram2d = histogram2d
self.KMeans = KMeans
self.rankdata = rankdata
self.mds = MDS ( n_components=2 )
self.tsne = TSNE ( n_components=2 )
self.man = None
self.centroids_ = None
self.labels_ = None
self.df_ = None
self.num_index_ = None
self.components_ = None
def approximate_embedding( self, df, nbins=None , use_tsne=True ) :
self.man = self.tsne
if not use_tsne :
self.man = self.mds
print ( 'WARNING::SLOW AND WASTEFUL' )
if nbins is None :
nbins = self.nbins
self.df_= df
frac_df = df.apply( lambda x:self.rankdata( x , method='average' )/float(len(x)) )
self.components_ = np.array(self.man.fit_transform(frac_df.values)).T
vals,xe,ye = self.histogram2d(self.components_[0],self.components_[1],bins=nbins)
mvs, svsx, svsy = np.mean(vals),np.std(vals,0),np.std(vals,1)
svs = np.sqrt( svsx**2 + svsy**2 )
#
# IS THERE A DENSITY PEAK SEPARABLE FROM THE MEAN
# SHOULD DO GRADIENT REJECTION BASED ON TTEST PVALUES
hits = vals>mvs+0.5*svs
#print(hits,vals)
xe_,ye_=0.5*(xe[:1]+xe[1:]),0.5*(ye[:1]+ye[1:])
idx = np.where(hits); xi,yj = idx[0],idx[1]
centroids = [ (xe[ri],ye[rj]) for (ri,rj) in zip(xi,yj) ]
#
kmeans = self.KMeans(len(centroids),init=np.array(centroids))
kmeans.fit(self.components_.T)
centers = np.array(kmeans.cluster_centers_).T
self.labels_ = kmeans.labels_
self.centroids_ = centers
self.analyte_dict_ = { c:[] for c in self.labels_ }
[self.analyte_dict_[self.labels_[i]].append(df.index[i]) for i in range(len(self.labels_)) ]
return ( self.analyte_dict_ )
def run_clustering_and_write_gmt( df , ca , filename = './approx_cluster_file.gmt' ) :
labels = ca.fit_predict(df.values)
llabs = [ l for l in labels ]; ulabs=set(llabs)
with open(filename,'w') as of :
for ulab in ulabs :
analytes = df.iloc[llabs==ulab].index.values
print ( 'CLU-'+str(ulab),'\tDESCRIPTION\t'+'\t'.join(analytes), file=of )
def projection_knn_assignment ( projected_coords , df , NMaxGuess=-1 , n_dimensions=2 ) :
coords_s = projected_coords.dropna( 0 )
centroid_coordinates = []
for row in df.T :
guess = sorted ( [ (v,i) for (v,i) in zip( df.loc[row].values,df.loc[row].index ) ] ) [::-1][:NMaxGuess]
maxWeights = [ i[1] for i in guess ]
use = df.loc[row,maxWeights]
S = np.sum ( use.values )
S = 1. if S==0 else S
crd = np.dot(use.values,coords_s.loc[use.index.values].values)/S
centroid_coordinates.append(crd)
centroids_df = pd.DataFrame ( centroid_coordinates , index=df.index , columns=[ 'C'+str(i) for i in range(n_dimensions) ] )
labels , centroids = seeded_kmeans( coords_s.values,centroids_df.values )
coords_s.loc[:,'owner'] = centroids_df.iloc[labels].index.values
for i in range(len(centroids.T)) :
centroids_df.loc[:,'E'+str(i) ] = (centroids.T)[i]
return ( centroids_df , coords_s )
def make_clustering_visualisation_df ( CLUSTER , df=None , add_synonyms = False ,
output_name = 'feature_clusters_output.csv'
) :
x_pc1 = CLUSTER.components_[0]
y_pc2 = CLUSTER.components_[1]
L_C = len(CLUSTER.centroids_[0])
#
# MAKE CLUSTER COLORS
make_hex_colors = lambda c : '#%02x%02x%02x' % (c[0]%256,c[1]%256,c[2]%256)
C0 = [255,255,255] ; cluster_colors = []
#
for i in CLUSTER.labels_ :
C0_ = C0 ; C0_[i%3] = int(np.floor(C0[i%3]-(i/float(L_C))*255))
cluster_colors.append(make_hex_colors(C0_))
if not df is None :
if add_synonyms :
synonyms = [ ens2sym[df.index.values[i]][0] if df.index.values[i] in ens2sym \
else ens2sym_2[df.index.values[i]] if df.index.values[i] in ens2sym_2 \
else df.index.values[i] for i in range(len(px))]
else :
synonyms = df.index.values
data = []
for (x,y,t,cl,co) in zip( x_pc1,y_pc2,synonyms , [cl for cl in CLUSTER.labels_] ,
[cluster_colors[cl] for cl in CLUSTER.labels_] ) :
data.append([x,y,t,cl,co])
clustering_df = pd.DataFrame( data , columns = ['X','Y','Type','Cluster','Color'])
if not df is None :
clustering_df.index = df.index.values
clustering_df.to_csv( output_name , '\t' )
return ( clustering_df )
def backprojection_clustering ( analyte_df , bRanked=False , n_dimensions=2 ,
bDoFeatures=True , bDoSamples=True ) :
from scipy.stats import rankdata
if bRanked :
rana_df = analyte_df .apply( lambda x:(rankdata(x,'average')-0.5)/len(x) )
else :
rana_df = analyte_df
dimcrdnames = [ 'd'+str(i) for i in range(n_dimensions) ]
#
# Do backprojection clustering
cluster_coords_f = None
if bDoFeatures :
#
dM1 = absolute_coordinates_to_distance_matrix( rana_df.values )
#pd.DataFrame(dM1,index=rana_df.index,columns=rana_df.index).to_csv('../data/dM1.tsv','\t')
#
# Project it back onto first two components
max_var_projection = distance_matrix_to_absolute_coordinates ( dM1 , n_dimensions=n_dimensions )
cluster_coords_f = pd.DataFrame( max_var_projection ,
columns = rana_df.index ,
index = dimcrdnames ).T
cluster_coords_s = None
if bDoSamples :
#
# And again for all the samples
dM2 = absolute_coordinates_to_distance_matrix( rana_df.T.values )
#pd.DataFrame(dM2,index=rana_df.columns,columns=rana_df.columns).to_csv('../data/dM2.tsv','\t')
#
# This algorithm is exact but scales somewhere between n^2 and n log n
max_var_projection = distance_matrix_to_absolute_coordinates ( dM2 , n_dimensions=n_dimensions )
cluster_coords_s = pd.DataFrame( max_var_projection ,
columns = rana_df.columns ,
index = dimcrdnames ).T
#cluster_coords_s.to_csv('../data/conclust_s.tsv','\t')
return ( cluster_coords_f,cluster_coords_s )
def dbscan ( data_frame = None , distance_matrix = None ,
eps = None, minPts = None , bVerbose = False ) :
if bVerbose :
print ( "THIS IMPLEMENTATION FOR DBSCAN" )
print ( "ASSESSMENT OF NOISE DIFFERS FROM" )
print ( "THE IMPLEMENTATION FOUND IN SKLEARN")
#
# FOR A DESCRIPTION OF THE CONNECTIVITY READ PAGE 30 (16 INTERNAL NUMBERING) of:
# https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf
#from impetuous.clustering import absolute_coordinates_to_distance_matrix
#from impetuous.clustering import connectivity
import operator
if not operator.xor( data_frame is None , distance_matrix is None ) :
print ( "ONLY SUPPLY A SINGE DATA FRAME OR A DISTANCE MATRIX" )
print ( "dbscan FAILED" )
print ( "DATA MATRICES NEEDS TO BE SPECIFIED WITH \" distance_matrix = ... \" " )
exit(1)
if not data_frame is None :
if not 'pandas' in str(type(data_frame)) :
print ( "ONLY SUPPLY A SINGE DATA FRAME WITH ABSOLUTE COORDINATES" )
print ( "DATA MATRICES NEEDS TO BE SPECIFIED WITH \" data_frame = ... \" " )
print ( "dbscan FAILED" )
exit ( 1 )
if bVerbose :
print ( data_frame )
distance_matrix = absolute_coordinates_to_distance_matrix(data_frame.values)
if not ( 'float' in str(type(eps)).lower() and 'int' in str(type(minPts)).lower() ) :
print ( "TO CALL THE dbscan PLEASE SPECIFY AT LEAST A DATA FRAME OR")
print ( "ITS CORRESPONDING DISTANCE MATRIX AS WELL AS THE DISTANCE CUTOFF PARAMETER" )
print ( "AND THE MINIMAL AMOUNT OF NEIGHBOUR POINTS TO CONSIDER IT CLUSTERED")
print ( "dbscan ( data_frame = None , distance_matrix = None , eps = None, minPts = None )" )
if 'panda' in str(type(distance_matrix)).lower() :
distance_matrix = distance_matrix.values
distance_matrix_ = distance_matrix.copy()
isNoise = np.sum(distance_matrix_<eps,0)-1 < minPts
i_ = 0
for ib in isNoise :
if ib :
distance_matrix_ [ i_] = ( 1+eps )*10.0
distance_matrix_.T[i_] = ( 1+eps )*10.0
distance_matrix_[i_][i_] = 0.
i_ = i_+1
clustercontent , clustercontacts = connectivity(distance_matrix_,eps)
return ( {'cluster content': clustercontent, 'clusterid-particleid' : clustercontacts, 'is noise':isNoise} )
def reformat_dbscan_results ( results ) :
if True :
clusters = {}
for icontent in range(len(results['cluster content'])) :
content = results[ 'cluster content' ][ icontent ]
for c in results [ 'clusterid-particleid' ] :
if c[0] == icontent :
if results[ 'is noise' ][c[1]] :
icontent=-1
if icontent in clusters:
clusters[ icontent ] .append( c[1] )
else :
clusters[ icontent ] = [ c[1] ]
return ( clusters )
if bUseNumba :
@jit(nopython=True)
def exclusive_pdist ( P , Q ) :
Np , Nq = len(P), len(Q)
R2 = np.zeros(Np*Nq).reshape(Np,Nq)
for i in range(len(P)):
for j in range(len(Q)):
R2[i][j] = np.sum((P[i]-Q[j])**2)
return ( R2 )
else :
def exclusive_pdist ( P , Q ) :
Np , Nq = len(P), len(Q)
R2 = np.zeros(Np*Nq).reshape(Np,Nq)
for i in range(len(P)):
for j in range(len(Q)):
R2[i][j] = np.sum((P[i]-Q[j])**2)
return ( R2 )
def select_from_distance_matrix(boolean_list,distance_matrix):
return ( np.array( [ d[boolean_list] for d in distance_matrix[boolean_list]] ) )
def diar ( n ):
if n>1:
return ( np.sqrt(n)*diar(n-1) )
else:
return ( 1. )
def calculate_rdf ( particles_i = None , particles_o = None , nbins=100 ,
distance_matrix = None , bInGroup = None , bNotInGroup = None ,
n_dimensions = 3 , xformat="%.3f" ,
constant=4.0/3.0 , rho=1.0 , rmax=None ,
bRemoveZeros = False ) :
import operator
crit0 = particles_i is None
crit1 = particles_i is None and particles_o is None
crit2 = bInGroup is None and distance_matrix is None and bNotInGroup is None
if not crit2 :
particles_i = distance_matrix_to_absolute_coordinates ( \
select_from_distance_matrix ( bInGroup , distance_matrix ) ,
n_dimensions = n_dimensions ).T
particles_o = distance_matrix_to_absolute_coordinates ( \
select_from_distance_matrix ( bNotInGroup , distance_matrix ) ,
n_dimensions = n_dimensions ).T
if operator.xor( (not crit1) or (not crit0) , not crit2 ) :
if not crit0 and particles_o is None :
particles_o = particles_i
bRemoveZeros = True
rdf_p = pd.DataFrame ( exclusive_pdist ( particles_i , particles_o ) ).apply( np.sqrt ).values.reshape(-1)
if bRemoveZeros :
rdf_p = [ r for r in rdf_p if not r==0. ]
if rmax is None :
rmax = np.max ( rdf_p ) / diar( n_dimensions+1 )
rdf_p = np.array ( [ r for r in rdf_p if r < rmax ] )
Y_ , X = np.histogram ( rdf_p , bins=nbins )
X_ = 0.5 * ( X[1:]+X[:-1] )
norm = constant * np.pi * ( ( X_ + np.diff(X) )**(n_dimensions) - X_**(n_dimensions) ) * rho
dd = Y_ / norm
rd = X_
rdf_source = {'density_values': dd, 'density_ids':[ xformat % (d) for d in rd ] }
return ( rdf_source , rdf_p )
else :
print ( """calculate_rdf ( particles_i = None , particles_o = None , nbins=100 ,
distance_matrix = None , bInGroup = None , bNotInGroup = None ,
n_dimensions = 3 , xformat="%.3f" ,
constant=4.0/3.0 , rho=1.0 , rmax=None ,
bRemoveZeros = False )""")
exit ( 1 )
def unpack ( seq ) : # seq:Union -> Union
if isinstance ( seq,(list,tuple,set)) :
yield from ( x for y in seq for x in unpack(y) )
elif isinstance ( seq , dict ):
yield from ( x for item in seq.items() for y in item for x in unpack(y) )
else :
yield seq
def rem ( a:list , H:list ) -> list :
h0 = []
for h in H:
hp = h - np.sum(h>np.array(h0))
h0 .append(h)
a .pop(hp)
return(a)
def nppop(A:np.array, irow:int=None, jcol:int=None ) -> list[np.array] :
# ASSUMES ROW MAJOR ORDER
rrow:np.array() = None
rcol:np.array() = None
N = len(A)
M0,M1 = np.shape(A)
if not irow is None :
rrow = A[irow,:]
A = np.delete(A,range(N*irow,N*(irow+1))).reshape(-1,N)
M0 = M0-1
if not jcol is None :
rcol = A[:,jcol]
A = np.delete(A,range(jcol,len(A.reshape(-1)),N) )
M1 = M1-1
return ( [rrow,rcol,A.reshape(M0,M1)] )
def link1_ ( D:np.array , method:str = 'min' , bDA:bool = False ) -> list :
def func( r:float , c:float , lab:str='min' ) -> float :
if lab == 'max' :
return ( r if r > c else c )
if lab == 'min' :
return ( r if r < c else c )
#
nmind = np.argmin(D) # SIMPLE TIEBREAKER
if bDA :
planar_crds = lambda linear_crd,N : tuple( (int(linear_crd/N) , linear_crd%N) )
#
# HEURISTIC TIEBREAKER
dr = D.reshape(-1)
ties = np.where(dr==dr[nmind])[0]
ties = ties[:int(len(ties)/2)]
if len(ties) > 1 :
nmind = ties[np.argmin( [ np.sum( D[planar_crds(t,len(D))[0],:]) for t in ties ]) ]
( i,j ) = ( int(nmind/len(D)) , nmind%len(D) )
k = j - int(i<j)
l = i - int(j<i)
pop1 = nppop(D,i,j)
pop2 = nppop(pop1[-1],k,l)
lpr = list(pop2[0])
d = lpr.pop(l)
lpr = np.array(lpr)
lpc = pop2[1]
nvec = np.array([*[D[0,0]],*[ func(r,c,method) for (r,c) in zip(lpr,lpc) ]])
DUM = np.eye(len(nvec))*0
DUM[ 0 , : ] = nvec
DUM[ : , 0 ] = nvec
DUM[ 1: , 1:] = pop2[-1]
return ( [ DUM , (i,j) , d ] )
def linkage_dict_tuples ( D:np.array , method:str = 'min' ) -> dict :
N = len(D)
dm = np.max(D)*1.1
idx = list()
for i in range(N): D[i,i] = dm; idx.append(i)
cidx = []
sidx = set()
res = [D]
linkages = dict()
while ( len(res[0]) > 1 ) :
res = link1_ ( res[0] , method )
oidx = tuple ( unpack( tuple( [ idx[i] for i in res[1] ]) ) )
unique_local_clusters = [ c for c in cidx if len( set(c) - set(oidx) ) >0 ]
unique_local_clusters .append( oidx )
cidx .append( oidx )
sidx = sidx|set(oidx)
idx = [*unique_local_clusters[::-1] , *[i for i in range(N) if not i in sidx ]]
linkages[ oidx ] = res[-1]
for i in range(N) :
linkages[ (i,) ] = 0
return ( linkages )
def link0_ ( D:np.array , method:str = 'min' ) -> list :
def func( r:float , c:float , lab:str='min' ) -> float :
if lab == 'max' :
return ( r if r > c else c )
if lab == 'min' :
return ( r if r < c else c )
nmind = np.argmin(D) # SIMPLE TIEBREAKER
( i,j ) = ( int(nmind/len(D)) , nmind%len(D) )
k = j - int(i<j)
l = i - int(j<i)
pop1 = nppop(D,i,j)
pop2 = nppop(pop1[-1],k,l)
lpr = list(pop2[0])
d = lpr.pop(l)
lpr = np.array(lpr)
lpc = pop2[1]
nvec = np.array([*[D[0,0]],*[ func(r,c,method) for (r,c) in zip(lpr,lpc) ]])
DUM = np.eye(len(nvec))*0
DUM[ 0 , : ] = nvec
DUM[ : , 0 ] = nvec
DUM[ 1: , 1:] = pop2[-1]
return ( [ DUM , (i,j) , d ] )
def linkages_tiers ( D:np.array , method:str = 'min' ) -> dict :
N = len(D)
dm = np.max(D)*1.1
idx = list()
for i in range(N): D[i,i] = dm ; idx.append( tuple((i,)) )
cidx = []
sidx = set()
res = [D]
linkages = dict()
while ( len(res[0]) > 1 ) :
res = link0_ ( res[0] , method )
found_cidx = tuple( [ idx[i] for i in res[1] ])
idx = [ *[found_cidx], *[ix_ for ix_ in idx if not ix_ in set(found_cidx) ] ]
linkages[ found_cidx ] = res[-1]
for i in range(N) :
linkages[ (i,) ] = 0
D[i,i] = 0
return ( linkages )
def lint2lstr ( seq:list[int] ) -> list[str] :
#
# DUPLICATED IN special.lint2lstr
if isinstance ( seq,(list,tuple,set)) :
yield from ( str(x) for y in seq for x in lint2lstr(y) )
else :
yield seq
def scipylinkages ( distm ,command='min' , bStrKeys=True ) -> dict :
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import linkage as sclinks
from scipy.cluster.hierarchy import fcluster
Z = sclinks( squareform(distm) , {'min':'single','max':'complete'}[command] )
CL = {}
for d in Z[:,2] :
row = fcluster ( Z ,d, 'distance' )
sv_ = sorted(list(set(row)))
cl = {s:[] for s in sv_}
for i in range( len( row ) ) :
cl[row[i]].append(i)
for v_ in list( cl.values() ) :
if tuple(v_) not in CL:
CL[tuple(v_)] = d
if bStrKeys :
L = {}
for item in CL.items():
L['.'.join( lint2lstr(item[0]) )] = item[1]
CL = L
return ( CL )
def linkages ( distm:np.array , command:str='min' ,
bStrKeys:bool = True , bUseScipy:bool = False ,
bMemSec=True, bLegacy:bool=False ) -> dict :
distm = np.array(distm)
if bMemSec :
distm = distm.copy()
if bUseScipy :
linkages_ = scipylinkages ( distm ,command=command , bStrKeys = False )
else :
linkages_ = linkages_tiers ( D = distm , method = command )
if bLegacy :
return ( linkage( distm = distm , command = command ) )
if bStrKeys :
L = {}
for item in linkages_.items():
L['.'.join( lint2lstr(item[0]) )] = item[1]
linkages_ = L
return ( linkages_ )
def linkage ( distm:np.array , command:str = 'min' ) -> dict :
print ( 'WARNING! LEGACY CODE!' )
D = distm
N = len(D)
sidx = [ str(i)+'-'+str(j) for i in range(N) for j in range(N) if i<j ]
pidx = [ [i,j] for i in range(N) for j in range(N) if i<j ]
R = [ D[idx[0]][idx[1]] for idx in pidx ]
label_order = lambda i,k: i+'-'+k if '.' in i else k+'-'+i if '.' in k else i+'-'+k if int(i)<int(k) else k+'-'+i
if command == 'max':
func = lambda R,i,j : [(R[i],i),(R[j],j)] if R[i]>R[j] else [(R[j],j),(R[i],i)]
if command == 'min':
func = lambda R,i,j : [(R[i],i),(R[j],j)] if R[i]<R[j] else [(R[j],j),(R[i],i)]
LINKS = {}
cleared = set()
for I in range( N**2 ) : # WORST CASE SCENARIO
clear = []
if len(R) == 0 :
break
nar = np.argmin( R )
clu_idx = sidx[nar].replace('-','.')
LINKS = { **{ clu_idx : R[nar] } , **LINKS }
lp = {}
for l in range(len(sidx)) :
lidx = sidx[l]
lp [ lidx ] = l
pij = sidx[nar].split('-')
cidx = set( unpack( [ s.split('-') for s in [ s for s in sidx if len(set(s.split('-'))-set(pij)) == 1 ] ] ) )-set(pij)
ccidx = set( [ s for s in [ s for s in sidx if len(set(s.split('-'))-set(pij)) == 1 ] ] )
found = {}
i = pij[0]
j = pij[1]
for k in cidx :
h0 , h1 , h2 , q0, J = None , None , None , None, 0
if k == j or k == i :
continue
la1 = label_order(i,k)
if la1 in lp :
J+=1
h1 = lp[ label_order(i,k) ]
h0 = h1 #; q0 = i
la2 = label_order(j,k)
if la2 in lp :
J+=1
h2 = lp[ label_order(j,k) ]
h0 = h2 #; q0 = j
if J == 2 :
Dijk = func ( R , h1 , h2 )
elif J == 1 :
Dijk = [[ R[h0],h0 ]]
else :
continue
nidx = [ s for s in sidx[Dijk[0][1]].split('-') ]
nidx = list( set(sidx[Dijk[0][1]].split('-'))-set(pij) )[0]
nclu_idx = clu_idx + '-' + nidx
found[ nclu_idx ] = Dijk[0][0]
clear = [*[ lp[c] for c in ccidx ],*[nar]]
cleared = cleared | set( unpack( [ sidx[c].split('-') for c in clear ]) )
R = rem(R,clear)
sidx = rem(sidx,clear)
for label,d in found.items() :
R.append(d)
sidx.append(label)
for c in sorted( [ (v,k) for k,v in LINKS.items() ] )[-1][1].split('.'):
LINKS[c] = 0
return ( LINKS )
if __name__=='__main__' :
D = [[0,9,3,6,11],[9,0,7,5,10],[3,7,0,9,2],[6,5,9,0,8],[11,10,2,8,0] ]
print ( np.array(D) )
print ( linkage( D, command='min') )
print ( linkage( D, command='max') )
if False :
#
# TEST DEPENDS ON THE DIABETES DATA FROM BROAD INSTITUTE
filename = './Diabetes_collapsed_symbols.gct'
df_ = pd.read_csv(filename,'\t',index_col=0,header=2)
ddf = df_.loc[:,[ col for col in df_.columns if '_' in col ]]
ddf .index = [idx.split('/')[0] for idx in ddf.index]
run_clustering_and_write_gmt( ddf , clustering_algorithm )
#
CLU = Cluster( )
CLU .approximate_density_clustering(ddf)
CLU .write_gmt()
if True :
A = np.array( [ [0.00, 0.10, 0.10, 9.00, 9.00, 9.00],
[0.10, 0.00, 0.15, 9.00, 9.00, 9.00],
[0.10, 0.15, 0.00, 9.00, 9.00, 9.00],
[9.00, 9.00, 9.00, 0.00, 0.10, 0.10],
[9.10, 9.00, 9.00, 0.10, 0.00, 0.15],
[9.10, 9.00, 9.00, 0.10, 0.15, 0.00] ] )
print ( connectivity(A,0.11) ) #
print ( dbscan(distance_matrix= | pd.DataFrame(A) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, True, exp, np.complex128)
def test_fillna_index_complex128(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_bool(self):
# bool can't hold NaN
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
# ToDo: must be coerced to object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, 'datetime64[ns]')
# datetime64 + object => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + datetime64tz(different tz) => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz='Asia/Tokyo'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + int => datetime64tz
# ToDo: must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + object => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
'x',
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_timedelta64(self):
pass
def test_fillna_series_period(self):
pass
def test_fillna_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01',
'2011-01-03', '2011-01-04'])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
exp = pd.Index([pd.Timestamp('2011-01-01'),
1,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# datetime64 + object => object
exp = pd.Index([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_index_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03',
'2011-01-04'], tz=tz)
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01',
'2011-01-03', '2011-01-04'], tz=tz)
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + datetime64tz(different tz) => object
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz='Asia/Tokyo'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + int => object
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
1,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# datetime64tz + object => object
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
'x',
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_index_timedelta64(self):
pass
def test_fillna_index_period(self):
pass
class TestReplaceSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
klasses = ['series']
method = 'replace'
def setUp(self):
self.rep = {}
self.rep['object'] = ['a', 'b']
self.rep['int64'] = [4, 5]
self.rep['float64'] = [1.1, 2.2]
self.rep['complex128'] = [1 + 1j, 2 + 2j]
self.rep['bool'] = [True, False]
def _assert_replace_conversion(self, from_key, to_key, how):
index = pd.Index([3, 4], name='xxx')
obj = pd.Series(self.rep[from_key], index=index, name='yyy')
self.assertEqual(obj.dtype, from_key)
if how == 'dict':
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == 'series':
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
# buggy on windows for bool/int64
if (from_key == 'bool' and
to_key == 'int64' and
tm.is_platform_windows()):
pytest.skip("windows platform buggy: {0} -> {1}".format
(from_key, to_key))
if ((from_key == 'float64' and
to_key in ('bool', 'int64')) or
(from_key == 'complex128' and
to_key in ('bool', 'int64', 'float64')) or
(from_key == 'int64' and
to_key in ('bool')) or
# TODO_GH12747 The result must be int?
(from_key == 'bool' and to_key == 'int64')):
# buggy on 32-bit
if tm.is_platform_32bit():
pytest.skip("32-bit platform buggy: {0} -> {1}".format
(from_key, to_key))
# Expected: do not downcast by replacement
exp = pd.Series(self.rep[to_key], index=index,
name='yyy', dtype=from_key)
else:
exp = | pd.Series(self.rep[to_key], index=index, name='yyy') | pandas.Series |
import pandas as pd
from pandas import DataFrame
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import f_regression
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR, LinearSVR
from metalfi.src.data.dataset import Dataset
from metalfi.src.data.memory import Memory
from metalfi.src.data.metadataset import MetaDataset
from metalfi.src.model.evaluation import Evaluation
from metalfi.src.model.featureselection import MetaFeatureSelection
from metalfi.src.model.metamodel import MetaModel
class Controller:
def __init__(self):
self.__train_data = None
self.__data_names = None
self.__meta_data = list()
self.fetchData()
self.storeMetaData()
self.__targets = ["linSVC_SHAP", "LOG_SHAP", "RF_SHAP", "NB_SHAP", "SVC_SHAP",
"linSVC_LIME", "LOG_LIME", "RF_LIME", "NB_LIME", "SVC_LIME",
"linSVC_PIMP", "LOG_PIMP", "RF_PIMP", "NB_PIMP", "SVC_PIMP",
"linSVC_LOFO", "LOG_LOFO", "RF_LOFO", "NB_LOFO", "SVC_LOFO"]
self.__meta_models = [(RandomForestRegressor(n_estimators=100, n_jobs=4), "RF"),
(SVR(), "SVR"),
(LinearRegression(n_jobs=4), "LIN"),
(LinearSVR(dual=True, max_iter=10000), "linSVR")]
def getTrainData(self):
return self.__train_data
def fetchData(self):
data_frame, target = Memory.loadTitanic()
data_1 = Dataset(data_frame, target)
data_frame_2, target_2 = Memory.loadCancer()
data_2 = Dataset(data_frame_2, target_2)
data_frame_3, target_3 = Memory.loadIris()
data_3 = Dataset(data_frame_3, target_3)
data_frame_4, target_4 = Memory.loadWine()
data_4 = Dataset(data_frame_4, target_4)
data_frame_5, target_5 = Memory.loadBoston()
data_5 = Dataset(data_frame_5, target_5)
open_ml = [(Dataset(data_frame, target), name) for data_frame, name, target in Memory.loadOpenML()]
self.__train_data = [(data_1, "Titanic"), (data_2, "Cancer"), (data_3, "Iris"), (data_4, "Wine"),
(data_5, "Boston")] + open_ml
self.__data_names = dict({})
i = 0
for data, name in self.__train_data:
self.__data_names[name] = i
i += 1
def storeMetaData(self):
for dataset, name in self.__train_data:
if not (Memory.getPath() / ("input/" + name + "meta.csv")).is_file():
print("meta-data calc.: " + name)
meta = MetaDataset(dataset, True)
data = meta.getMetaData()
d_times, t_times = meta.getTimes()
nr_feat, nr_inst = meta.getNrs()
Memory.storeInput(data, name)
Memory.storeDataFrame(DataFrame(data=d_times, index=["Time"], columns=[x for x in d_times]),
name + "XmetaX" + str(nr_feat) + "X" + str(nr_inst), "runtime")
Memory.storeDataFrame(DataFrame(data=t_times, index=["Time"], columns=[x for x in t_times]),
name + "XtargetX" + str(nr_feat) + "X" + str(nr_inst), "runtime")
def loadMetaData(self):
for dataset, name in self.__train_data:
sc = StandardScaler()
data = Memory.load(name + "meta.csv", "input")
fmf = [x for x in data.columns if "." not in x]
dmf = [x for x in data.columns if "." in x]
X_f = DataFrame(data=sc.fit_transform(data[fmf]), columns=fmf)
X_d = | DataFrame(data=data[dmf], columns=dmf) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category'),
Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@pytest.mark.parametrize('arr, unique', [
([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)]),
([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')],
[('b', 'c'), ('a', 'b')]),
([('a', 1), ('b', 2), ('a', 3), ('a', 1)],
[('a', 1), ('b', 2), ('a', 3)]),
])
def test_unique_tuples(self, arr, unique):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(unique), dtype=object)
expected[:] = unique
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_lookup_overflow(self):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals, safely_resizes):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() set an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (for all but StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError) as excinfo:
htable.get_labels(vals, uniques, 0, -1)
assert str(excinfo.value).startswith('external reference')
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
test_cases = [
(ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
(ht.StringHashTable, ht.ObjectVector, 'object', True),
(ht.Float64HashTable, ht.Float64Vector, 'float64', False),
(ht.Int64HashTable, ht.Int64Vector, 'int64', False),
(ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]
for (tbl, vect, dtype, safely_resizes) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0, safely_resizes)
_test_vector_resize(tbl(), vect(), dtype, 10, safely_resizes)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
@td.skip_if_no_scipy
def test_scipy_compat(self):
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
# coding: utf-8
# # Generates the table of the ontological issues.
#
# ### Note this code assumes that you've already computed psx -- the predicted probabilities for all examples in the training set using four-fold cross-validation. If you have no done that you will need to use `imagenet_train_crossval.py` to do this!
# In[4]:
# These imports enhance Python2/3 compatibility.
from __future__ import print_function, absolute_import, division, unicode_literals, with_statement
# In[9]:
import cleanlab
import numpy as np
import torch
# For visualizing images of label errors
from PIL import Image
from torchvision import datasets
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.metrics import confusion_matrix
# urllib2 for python2 and python3
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
# where imagenet dataset is located
train_dir = '/datasets/datasets/imagenet/val/'
# In[6]:
# Set-up name mapping for ImageNet train data
url = 'https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/'
url += 'raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt'
with urlopen(url) as f:
lines = [x.decode('utf-8') for x in f.readlines()]
nid2name = dict([(l.split(" ")[0], l.split(" ")[2][:-1]) for l in lines])
dataset = datasets.ImageFolder(train_dir)
nid2idx = dataset.class_to_idx
idx2nid = {v: k for k, v in nid2idx.items()}
name2nid = {v: k for k, v in nid2name.items()}
idx2name = {k: nid2name[v] for k, v in idx2nid.items()}
# ## Analyze the train set on ImageNet
# In[7]:
# CHANGE THIS TO CHANGE EXPERIMENT
# pyx_file = 'imagenet_val_out.npy' # NO FINE TUNING
pyx_file = 'imagenet__train__model_resnet50__pyx.npy' # trained from scratch with 10fold cv
# where imagenet dataset is located
train_dir = '/datasets/datasets/imagenet/train/'
# Stored results directory
pyx_dir = '/datasets/cgn/pyx/imagenet/'
# Load in data
pyx = np.load(pyx_dir + pyx_file)
imgs, labels = [list(z) for z in zip(*datasets.ImageFolder(train_dir).imgs)]
labels = np.array(labels, dtype=int)
# # A bad way to approach this problem might be to just look at the correlation of every column in the probability matrix. The problem is correlation is symmetric and this will correlate everything that has small probabilities.
# In[143]:
corr = np.corrcoef(pyx.T)
# In[220]:
corr_non_diag = corr - np.eye(len(corr)) * corr.diagonal()
corr_largest_non_diag_raveled = np.argsort(corr_non_diag.ravel())[::-1]
corr_largest_non_diag = np.unravel_index(corr_largest_non_diag_raveled, corr_non_diag.shape)
corr_largest_non_diag = list(zip(*(list(z) for z in corr_largest_non_diag)))
print([(nid2name[idx2nid[z[0]]], nid2name[idx2nid[z[1]]]) for z in corr_largest_non_diag][:5])
print([nid2name[idx2nid[z]] for z in corr.diagonal().argsort()[:10]])
# # Understand ImageNet ontological issues on the TRAIN SET
# ## Uses ARGMAX baseline (not confident learning)
# In[12]:
cj = confusion_matrix(np.argmax(pyx, axis=1), labels).T
# In[13]:
joint = cleanlab.latent_estimation.estimate_joint(labels, pyx, cj)
joint_non_diag = joint - np.eye(len(joint)) * joint.diagonal()
# In[14]:
cj_non_diag = cj - np.eye(len(cj)) * cj.diagonal()
largest_non_diag_raveled = np.argsort(cj_non_diag.ravel())[::-1]
largest_non_diag = np.unravel_index(largest_non_diag_raveled, cj_non_diag.shape)
largest_non_diag = list(zip(*(list(z) for z in largest_non_diag)))
# In[15]:
# Checks that joint correctly has rows that are p(s)
assert(all(joint.sum(axis = 1) - np.bincount(labels) / len(labels) < 1e-4))
# In[16]:
class_name = 'bighorn'
print("Index of '{}' in sorted diagonal of cj: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in cj.diagonal().argsort()].index(class_name))
print("Index of '{}' in sorted diagonal of joint: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in joint.diagonal().argsort()].index(class_name))
print("Index of '{}' in sorted most noisy classes in cj: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in np.argsort(cj_non_diag.sum(axis = 0))[::-1]].index(class_name))
print("Index of '{}' in sorted most noisy classes in joint: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in np.argsort(joint_non_diag.sum(axis = 0))[::-1]].index(class_name))
print("Index of '{}' in sorted most noisy true classes in cj: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in np.argsort(cj_non_diag.sum(axis = 1))[::-1]].index(class_name))
print("Index of '{}' in sorted most noisy true classes in joint: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in np.argsort(joint_non_diag.sum(axis = 1))[::-1]].index(class_name))
idx = cj.diagonal().argmin()
print("Least confident class by diagonal of cj:", nid2name[idx2nid[idx]], idx)
idx = joint.diagonal().argmin()
print("Least confident class by diagonal of joint:", nid2name[idx2nid[idx]], idx)
idx = cj_non_diag.sum(axis = 0).argmax()
print("Least confident class by max sum of row of non-diagonal elements of cj:", nid2name[idx2nid[idx]], idx)
idx = joint_non_diag.sum(axis = 1).argmax()
print("Least confident class by max sum of column of non-diagonal elements of cj:", nid2name[idx2nid[idx]], idx)
print('Largest noise rate:', [(nid2name[idx2nid[z]], z) for z in largest_non_diag[0]])
# In[17]:
cj
# In[18]:
edges = [(
idx2name[i].replace('American_chameleon', 'chameleon').replace('typewriter_keyboard', 'keyboard'),
idx2name[j].replace('American_chameleon', 'chameleon').replace('typewriter_keyboard', 'keyboard'),
idx2nid[i],
idx2nid[j],
int(round(cj[i,j])),
joint[i,j].round(6),
) for i,j in largest_non_diag[:30]]
# nodes = list({z for i,j in largest_non_diag[:30] for z in (idx2name[i], idx2name[j])})
# In[19]:
df = pd.DataFrame(edges)
# In[25]:
df[r"$\tilde{y}$ name"].isin(['a','b'])
# In[29]:
df[df[r"$\tilde{y}$ name"].isin(['projectile','tub', 'breastplate', 'chameleon', 'green_lizard', 'maillot', 'ram', 'missile', 'corn', 'keyboard'])][r"$C(\tilde{y},y^*)$"]
# In[21]:
df = pd.DataFrame(edges, columns = [r"$\tilde{y}$ name", r"$y^*$ name", r"$\tilde{y}$ nid", r"$y^*$ nid", r"$C(\tilde{y},y^*)$", r"$P(\tilde{y},y^*)$"])[:20]
df.insert(loc = 0, column = 'Rank', value = df.index + 1)
tex = df.to_latex(index = False)
orig = '\\$\\textbackslash tilde\\{y\\}\\$ name & \\$y\\textasciicircum *\\$ name & \\$\\textbackslash tilde\\{y\\}\\$ nid & \\$y\\textasciicircum *\\$ nid & \\$C(\\textbackslash tilde\\{y\\},y\\textasciicircum *)\\$ & \\$P(\\textbackslash tilde\\{y\\},y\\textasciicircum *)\\$'
new = '$\\tilde{y}$ name & $y^*$ name & $\\tilde{y}$ nid & $y^*$ nid & $C(\\tilde{y},y^*)$ & $P(\\tilde{y},y^*)$ '
tex = tex.replace(orig, new)
print(tex)
df.style.set_properties(subset=[r"$C(\tilde{y},y^*)$"], **{'width': '50px'})
# # Understand ImageNet ontological issues on the TRAIN SET
# In[9]:
cj = cleanlab.latent_estimation.compute_confident_joint(labels, pyx)
joint = cleanlab.latent_estimation.estimate_joint(labels, pyx, cj)
joint_non_diag = joint - np.eye(len(joint)) * joint.diagonal()
# In[10]:
cj_non_diag = cj - np.eye(len(cj)) * cj.diagonal()
largest_non_diag_raveled = np.argsort(cj_non_diag.ravel())[::-1]
largest_non_diag = np.unravel_index(largest_non_diag_raveled, cj_non_diag.shape)
largest_non_diag = list(zip(*(list(z) for z in largest_non_diag)))
# In[11]:
# Checks that joint correctly has rows that are p(s)
assert(all(joint.sum(axis = 1) - np.bincount(labels) / len(labels) < 1e-4))
# In[12]:
class_name = 'bighorn'
print("Index of '{}' in sorted diagonal of cj: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in cj.diagonal().argsort()].index(class_name))
print("Index of '{}' in sorted diagonal of joint: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in joint.diagonal().argsort()].index(class_name))
print("Index of '{}' in sorted most noisy classes in cj: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in np.argsort(cj_non_diag.sum(axis = 0))[::-1]].index(class_name))
print("Index of '{}' in sorted most noisy classes in joint: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in np.argsort(joint_non_diag.sum(axis = 0))[::-1]].index(class_name))
print("Index of '{}' in sorted most noisy true classes in cj: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in np.argsort(cj_non_diag.sum(axis = 1))[::-1]].index(class_name))
print("Index of '{}' in sorted most noisy true classes in joint: ".format(class_name), end = "")
print([nid2name[idx2nid[i]] for i in np.argsort(joint_non_diag.sum(axis = 1))[::-1]].index(class_name))
idx = cj.diagonal().argmin()
print("Least confident class by diagonal of cj:", nid2name[idx2nid[idx]], idx)
idx = joint.diagonal().argmin()
print("Least confident class by diagonal of joint:", nid2name[idx2nid[idx]], idx)
idx = cj_non_diag.sum(axis = 0).argmax()
print("Least confident class by max sum of row of non-diagonal elements of cj:", nid2name[idx2nid[idx]], idx)
idx = joint_non_diag.sum(axis = 1).argmax()
print("Least confident class by max sum of column of non-diagonal elements of cj:", nid2name[idx2nid[idx]], idx)
print('Largest noise rate:', [(nid2name[idx2nid[z]], z) for z in largest_non_diag[0]])
# In[13]:
cj
# In[14]:
edges = [(
idx2name[i].replace('American_chameleon', 'chameleon').replace('typewriter_keyboard', 'keyboard'),
idx2name[j].replace('American_chameleon', 'chameleon').replace('typewriter_keyboard', 'keyboard'),
idx2nid[i],
idx2nid[j],
int(round(cj[i,j])),
joint[i,j].round(6),
) for i,j in largest_non_diag[:30]]
# nodes = list({z for i,j in largest_non_diag[:30] for z in (idx2name[i], idx2name[j])})
# In[3]:
df = | pd.DataFrame(edges) | pandas.DataFrame |
"""
=======================
Visualizing the Results
=======================
Auto-Pytorch uses SMAC to fit individual machine learning algorithms
and then ensembles them together using `Ensemble Selection
<https://www.cs.cornell.edu/~caruana/ctp/ct.papers/caruana.icml04.icdm06long.pdf>`_.
The following examples shows how to visualize both the performance
of the individual models and their respective ensemble.
Additionally, as we are compatible with scikit-learn,
we show how to further interact with `Scikit-Learn Inspection
<https://scikit-learn.org/stable/inspection.html>`_ support.
"""
import os
import pickle
import tempfile as tmp
import time
import warnings
# The following variables are not needed for every unix distribution, but are
# highlighted in here to prevent problems with multiprocessing with scikit-learn.
os.environ['JOBLIB_TEMP_FOLDER'] = tmp.gettempdir()
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
warnings.simplefilter(action='ignore', category=UserWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.datasets
import sklearn.model_selection
from sklearn.inspection import permutation_importance
from smac.tae import StatusType
from autoPyTorch.api.tabular_classification import TabularClassificationTask
from autoPyTorch.metrics import accuracy
############################################################################
# Data Loading
# ============
# We will use the iris dataset for this Toy example
seed = 42
X, y = sklearn.datasets.fetch_openml(data_id=61, return_X_y=True, as_frame=True)
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
X,
y,
random_state=42,
)
############################################################################
# Build and fit a classifier
# ==========================
api = TabularClassificationTask(seed=seed)
api.search(
X_train=X_train,
y_train=y_train,
X_test=X_test.copy(),
y_test=y_test.copy(),
optimize_metric=accuracy.name,
total_walltime_limit=200,
func_eval_time_limit_secs=50
)
############################################################################
# One can also save the model for future inference
# ================================================
# For more details on how to deploy a model, please check
# `Scikit-Learn persistence
# <https://scikit-learn.org/stable/modules/model_persistence.html>`_ support.
with open('estimator.pickle', 'wb') as handle:
pickle.dump(api, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Then let us read it back and use it for our analysis
with open('estimator.pickle', 'rb') as handle:
estimator = pickle.load(handle)
############################################################################
# Plotting the model performance
# ==============================
# We will plot the search incumbent through time.
# Collect the performance of individual machine learning algorithms
# found by SMAC
individual_performances = []
for run_key, run_value in estimator.run_history.data.items():
if run_value.status != StatusType.SUCCESS:
# Ignore crashed runs
continue
individual_performances.append({
'Timestamp': pd.Timestamp(
time.strftime(
'%Y-%m-%d %H:%M:%S',
time.localtime(run_value.endtime)
)
),
'single_best_optimization_accuracy': accuracy._optimum - run_value.cost,
'single_best_test_accuracy': np.nan if run_value.additional_info is None else
accuracy._optimum - run_value.additional_info['test_loss'],
})
individual_performance_frame = pd.DataFrame(individual_performances)
# Collect the performance of the ensemble through time
# This ensemble is built from the machine learning algorithms
# found by SMAC
ensemble_performance_frame = | pd.DataFrame(estimator.ensemble_performance_history) | pandas.DataFrame |
from json import load
from matplotlib.pyplot import title
from database.database import DbClient
from discord import Embed
import pandas as pd
from util.data import load_data
class Analytics:
def __init__(self, server_id: str, db):
self.server_id = server_id
self.db = db
@staticmethod
def no_data_embed(topic: str) -> Embed:
"""CREATE AN EMBED IF NO DATA WAS COLLECTED"""
embed = Embed(title="SORRY", description=f"Sorry, but there were no `{topic}` data collected on this server!")
return embed
async def analyze_message(self):
"""ANALYZE THE MESSAGE DATA"""
data = await load_data(self.db, self.server_id)
data = data["message"]
if len(data) == 0:
return self.no_data_embed("message")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
channelid_counts = pd.value_counts(df["channelid"])
role_counts = pd.value_counts(df["roles"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Message counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_message_delete(self):
"""ANALYZE MESSAGE DELETE"""
data = await load_data(self.db, self.server_id)
data = data["message_delete"]
if len(data) == 0:
return self.no_data_embed("message delete")
# ANALYZE THE DATA
df = pd.DataFrame(data)
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message delete ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message edit data"),
Embed(title=embed_title, description="Message delete counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message delete from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message delete counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message delete counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_message_edit(self):
"""ANALYZE MESSAGE EDIT"""
data = await load_data(self.db, self.server_id)
data = data["message_edit"]
if len(data) == 0:
return self.no_data_embed("message edit")
# ANALYZE THE DATA
df = pd.DataFrame(data)
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message edit ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message edit data"),
Embed(title=embed_title, description="Message edits counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message edits from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message edits counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message edits counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_reaction(self):
"""ANALYZE THE REACTION DATA"""
data = await load_data(self.db, self.server_id)
data = data["reaction"]
if len(data) == 0:
return self.no_data_embed("reaction")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
name_count = pd.value_counts(df["reactionname"])
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Reaction ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed reaction data"),
Embed(title=embed_title, description="Reaction counted by name:\n"f"```{name_count}```"),
Embed(title=embed_title, description="Reaction counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Reaction send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Reaction counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Reaction counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_botrequests(self):
"""ANALYZE THE BOT-REQUESTS DATA"""
data = await load_data(self.db, self.server_id)
data = data["bot_requests"]
if len(data) == 0:
return self.no_data_embed("bot-requests")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
name_count = pd.value_counts(df["cmdname"])
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
embed_title = "Bot-Requests ~ Analytics"
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = | pd.value_counts(df["hours"]) | pandas.value_counts |
# coding: utf-8
# # Val Strategy
# > A good validation strategy is key to winning a competition
# >
# > — @CPMP
#
# The val strategy should be trusted in and utilized for all feature engineering tasks and for hyper parameter tuning.
#
# Generally, the best val strategy will somehow mimic the train-test (submission) spli, especially in cases when the public portion of the LB sorted orderly. In these cases, the Public LB score can even be used as another 'fold'.
# # What we Know
# PublicLB is the first **20%** of the test set _by number of samples_
#
# - This corresponds to the frist 118108 of 590540 samples
# - If we assume 2017-11 start date, 2018-07-10
# - If we assume 2018-12 start date, 2018-08-09
# We also know that there gap between the train and test set is **31 days**:
#
# - If we assume 2017-11 start date, 2018-05-02 through 2018-05-31
# - If we assume 2018-11 start date, 2018-06-01 through 2018-06-31
# If **2017-11-01** is the first day:
# - Train 2017-11-01 through 2018-05-01
# - Test 2018-06-01 through 2018-11-30
# - **Con** Train ends on 2018-05-01 instead of 2018-04-31, which is weird
# - **Con** Train/Test gap: 2018-05-02 through 2018-05-31
# - **Pro** 2018-11-30 is 30 days long so there,s a match
# - **Pro** Mitigates some of the issues surrounding OS releases occurring before the transaction date
# If **2017-12-01** is the first day:
# - Train 2017-12-01 through 2018-05-31
# - Test 2018-07-01 through 2018-12-30
# - **Pro** 2018-05-31 is 31 days long
# - **Con** Some OS's used were released after the purported transaction date
# - **Con** Test set ends on 2018-12-30 instead of 2018-12-31
# - **Pro** Christmas alignment
# - **Pro** Train/Test gap: 2018-06-01 through 2018-06-31
# Start date aside, the gap between train and val should mimic the gap between train + test (submission), that is 31 full days worth of seconds.
#
# For the validation set size, we can either build that using the number of samples 118108 = 20% of test set size. Or alternatively we can build it using a fixed time of roughly six-weeks. I perfer the time value rather than the sample size value because the density of samples within the train set varies A LOT, which would mean sometimes our train set might be jsut 3 weeks long and other times it might be 5 weeks. Not a consistent want to test things out.
#
# In summary, my recommendation is split by time. So _minimally_, we need 73 days (31 day gap plus 6 x 7 day validation) worth of data to be held out forvalidation purposes. As for the amount of training data we use, it can be whatever size we want. The bigger the size, the fewer splits we can make.
# In[55]:
START_DATE = '2017-12-01'
# In[85]:
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
traintr = pd.read_csv('input/train_transaction.csv.zip')
trainid = pd.read_csv('input/train_identity.csv.zip')
testtr = pd.read_csv('input/test_transaction.csv.zip')
testid = | pd.read_csv('input/test_identity.csv.zip') | pandas.read_csv |
"""Interface for running a registry of models on a registry of validations."""
from typing import Optional, Tuple
from kotsu.typing import Model, Results, Validation
import functools
import logging
import os
import time
import pandas as pd
from kotsu import store
from kotsu.registration import ModelRegistry, ModelSpec, ValidationRegistry, ValidationSpec
logger = logging.getLogger(__name__)
def run(
model_registry: ModelRegistry,
validation_registry: ValidationRegistry,
results_path: str = "./validation_results.csv",
skip_if_prior_result: bool = True,
artefacts_store_directory: Optional[str] = None,
run_params: dict = {},
):
"""Run a registry of models on a registry of validations.
Args:
model_registry: A ModelRegistry containing the registry of models to be run through
validations.
validation_registry: A ValidationRegistry containing the registry of validations to run
each model through.
results_path: The file path to which the results will be written to, and results from prior
runs will be read from.
skip_if_prior_result: Flag, if True then will not run validation-model combinations
that are found in the results at given results_path. If False then all combinations
will be ran and any prior results in results_path will be completely overwritten.
artefacts_store_directory: A directory path or URI location to store extra output
artefacts of the validations and models.
run_params: A dictionary of optional run parameters.
"""
results_df = pd.DataFrame(columns=["validation_id", "model_id", "runtime_secs"])
results_df["runtime_secs"] = results_df["runtime_secs"].astype(int)
if skip_if_prior_result:
try:
results_df = pd.read_csv(results_path)
except FileNotFoundError:
pass # leave `results_df` as the empty dataframe already defined
results_df = results_df.set_index(["validation_id", "model_id"], drop=False)
results_list = []
for validation_spec in validation_registry.all():
for model_spec in model_registry.all():
if skip_if_prior_result and (validation_spec.id, model_spec.id) in results_df.index:
logger.info(
f"Skipping validation - model: {validation_spec.id} - {model_spec.id}"
", as found prior result in results."
)
continue
logger.info(f"Running validation - model: {validation_spec.id} - {model_spec.id}")
validation = validation_spec.make()
if artefacts_store_directory is not None:
artefacts_directory = os.path.join(
artefacts_store_directory, f"{validation_spec.id}/{model_spec.id}/"
)
os.makedirs(artefacts_directory, exist_ok=True)
validation = functools.partial(validation, artefacts_directory=artefacts_directory)
model = model_spec.make()
results, elapsed_secs = _run_validation_model(validation, model, run_params)
results = _add_meta_data_to_results(results, elapsed_secs, validation_spec, model_spec)
results_list.append(results)
additional_results_df = | pd.DataFrame.from_records(results_list) | pandas.DataFrame.from_records |
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath, utils
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
from sklearn.linear_model import LinearRegression
from hydroDL.data import usgs, gageII, gridMET, ntn, transform
import torch
import os
import json
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
startDate = | pd.datetime(1979, 1, 1) | pandas.datetime |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import pytest
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
from pyarrow.compat import unittest
import pyarrow as pa
class MessagingTest(object):
def setUp(self):
self.sink = self._get_sink()
def _get_sink(self):
return io.BytesIO()
def _get_source(self):
return pa.BufferReader(self.sink.getvalue())
def write_batches(self):
nrows = 5
df = pd.DataFrame({
'one': np.random.randn(nrows),
'two': ['foo', np.nan, 'bar', 'bazbaz', 'qux']})
batch = pa.RecordBatch.from_pandas(df)
writer = self._get_writer(self.sink, batch.schema)
num_batches = 5
frames = []
batches = []
for i in range(num_batches):
unique_df = df.copy()
unique_df['one'] = np.random.randn(nrows)
batch = pa.RecordBatch.from_pandas(unique_df)
writer.write_batch(batch)
frames.append(unique_df)
batches.append(batch)
writer.close()
return batches
class TestFile(MessagingTest, unittest.TestCase):
# Also tests writing zero-copy NumPy array with additional padding
def _get_writer(self, sink, schema):
return pa.FileWriter(sink, schema)
def test_simple_roundtrip(self):
batches = self.write_batches()
file_contents = self._get_source()
reader = pa.FileReader(file_contents)
assert reader.num_record_batches == len(batches)
for i, batch in enumerate(batches):
# it works. Must convert back to DataFrame
batch = reader.get_batch(i)
assert batches[i].equals(batch)
def test_read_all(self):
batches = self.write_batches()
file_contents = self._get_source()
reader = pa.FileReader(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
class TestStream(MessagingTest, unittest.TestCase):
def _get_writer(self, sink, schema):
return pa.StreamWriter(sink, schema)
def test_simple_roundtrip(self):
batches = self.write_batches()
file_contents = self._get_source()
reader = pa.StreamReader(file_contents)
assert reader.schema.equals(batches[0].schema)
total = 0
for i, next_batch in enumerate(reader):
assert next_batch.equals(batches[i])
total += 1
assert total == len(batches)
with pytest.raises(StopIteration):
reader.get_next_batch()
def test_read_all(self):
batches = self.write_batches()
file_contents = self._get_source()
reader = pa.StreamReader(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
class TestInMemoryFile(TestFile):
def _get_sink(self):
return pa.InMemoryOutputStream()
def _get_source(self):
return self.sink.get_result()
def test_ipc_zero_copy_numpy():
df = pd.DataFrame({'foo': [1.5]})
batch = pa.RecordBatch.from_pandas(df)
sink = pa.InMemoryOutputStream()
write_file(batch, sink)
buffer = sink.get_result()
reader = pa.BufferReader(buffer)
batches = read_file(reader)
data = batches[0].to_pandas()
rdf = pd.DataFrame(data)
| assert_frame_equal(df, rdf) | pandas.util.testing.assert_frame_equal |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from stockstats import StockDataFrame
import warnings
import traceback
warnings.filterwarnings('ignore')
import argparse
import re
import sys, os
sys.path.append(os.getcwd())
import os
import requests
from requests.exceptions import ConnectionError
import bs4
from bs4 import BeautifulSoup
from fastnumbers import isfloat
from fastnumbers import fast_float
from multiprocessing.dummy import Pool as ThreadPool
import more_itertools
from random import shuffle
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import json
import seaborn as sns
sns.set_style('whitegrid')
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib as mplt
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import explained_variance_score
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
import matplotlib.dates as mdates
import seaborn as sns
import math
import gc
import ipaddress
from urllib.parse import urlparse
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from data_science_utils import dataframe as df_utils
from data_science_utils import models as model_utils
from data_science_utils.dataframe import column as column_utils
from data_science_utils.models.IdentityScaler import IdentityScaler as IdentityScaler
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix,classification_report
import lightgbm as lgb
np.set_printoptions(threshold=np.nan)
import pickle
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.metrics import accuracy_score
import missingno as msno
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
import datetime
from scipy import signal
import matplotlib.pyplot as plt
from datetime import timedelta
from sklearn import linear_model
from sklearn.metrics import roc_auc_score
from IPython.display import display, HTML
import warnings
warnings.filterwarnings('ignore')
from data_science_utils.misc import ffloat
from data_science_utils.misc import is_dataframe
from data_science_utils.misc import ffloat_list
from data_science_utils.misc import remove_multiple_spaces
from datetime import date, timedelta
def prev_weekday(adate):
if adate.weekday() <=4:
return adate
adate -= timedelta(days=1)
while adate.weekday() > 4: # Mon-Fri are 0-4
adate -= timedelta(days=1)
return adate
def get_ci(p,t,r):
return np.abs(np.fv(r/100,t,0,p))
def get_cumulative_amounts(p,t,r):
psum = p
for i in range(1,t):
psum = psum + get_ci(p,i,r)
return psum
def get_year_when_cumulative_profit_over_pe(pe,cpg):
if np.isnan(pe) or np.isnan(cpg):
return np.inf
for i in range(1,int(np.ceil(pe))):
if get_cumulative_amounts(1,i,cpg)>=pe:
return i
return int(np.ceil(pe))
def get_children(html_content):
return [item for item in html_content.children if type(item)==bs4.element.Tag or len(str(item).replace("\n","").strip())>0]
def get_portfolio(mfid):
url = "https://www.moneycontrol.com/india/mutualfunds/mfinfo/portfolio_holdings/" + mfid
page_response = requests.get(url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
portfolio_table = page_content.find('table', attrs={'class': 'tblporhd'})
fund_name = page_content.find('h1').text
return portfolio_table, fund_name
def get_table(portfolio_table):
portfolio_elems = get_children(portfolio_table)
table_data = list()
for row in portfolio_elems:
row_data = list()
row_elems = get_children(row)
for elem in row_elems:
text = elem.text.strip().replace("\n", "")
if len(text) == 0:
continue
elem_descriptor = {'text': text}
elem_children = get_children(elem)
if len(elem_children) > 0:
if elem_children[0].has_attr('href'):
elem_href = elem_children[0]['href']
elem_descriptor['href'] = elem_href
row_data.append(elem_descriptor)
table_data.append(row_data)
return table_data
def get_table_simple(portfolio_table, is_table_tag=True):
portfolio_elems = portfolio_table.find_all('tr') if is_table_tag else get_children(portfolio_table)
table_data = list()
for row in portfolio_elems:
row_data = list()
row_elems = get_children(row)
for elem in row_elems:
text = elem.text.strip().replace("\n", "")
text = remove_multiple_spaces(text)
if len(text) == 0:
continue
row_data.append(text)
table_data.append(row_data)
return table_data
def get_inner_texts_as_array(elem, filter_empty=True):
children = get_children(elem)
tarr = [child.text.strip().replace("\n", "") for child in children]
if filter_empty:
tarr = list(filter(lambda x: x is not None and len(x) > 0, tarr))
return tarr
def get_shareholding_pattern(shareholding_url):
page_response = requests.get(shareholding_url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
tables = page_content.find_all('table')
if len(tables) < 3:
return {}
table_content = page_content.find_all('table')[2]
rows = table_content.find_all('tr')
all_tds = page_content.find_all('td')
idx = list(map(lambda x: x.text, all_tds)).index("Total (A)+(B)+(C)")
promoters = get_inner_texts_as_array(
list(filter(lambda x: "Total shareholding of Promoter and Promoter Group (A)" in x.text, rows))[0],
filter_empty=False)
public = get_inner_texts_as_array(list(filter(lambda x: "Total Public shareholding (B)" in x.text, rows))[0],
filter_empty=False)
all_shares = get_inner_texts_as_array(
list(filter(lambda x: "Total (A)+(B)+(C)" in x.text, page_content.find_all('tr')))[0], filter_empty=False)
promoters_pledging = ffloat(promoters[7])
promoters = ffloat(promoters[5])
public = ffloat(public[5])
total_shares_count = ffloat(all_tds[idx + 2].text)
total_pledging = ffloat(all_tds[idx + 7].text)
return {"promoters": promoters, "public": public, "promoters_pledging": promoters_pledging,
"total_shares_count": total_shares_count, "total_pledging": total_pledging}
def get_fundholding_pattern(fundholding_url):
# Funds holding it or not Y
# Total funds holding currently N
# percent held by funds
# buys last quarter
# sells last quarter
# no change last quarter
# Total change in fund holding by money
# Total change in fund holding by percent shares
page_response = requests.get(fundholding_url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
results = {}
top_tab = page_content.text
# print(top_tab)
if "Not held by Mutual Funds in the last 6 quarters" in top_tab:
results['mf_holding'] = True
else:
results['mf_holding'] = False
bought = np.nan
sold = np.nan
hold = np.nan
if not results['mf_holding']:
bl = top_tab.split("Bought by")
if len(bl) == 2:
bought = ffloat(bl[1].strip().split(" ")[0])
sl = top_tab.split("Sold by")
if len(sl) == 2:
sold = ffloat(sl[1].strip().split(" ")[0])
hl = top_tab.split("No change in")
if len(hl) == 2:
hold = ffloat(hl[1].strip().split(" ")[0])
results['mf_bought'] = bought
results['mf_sold'] = sold
results['mf_hold'] = hold
six_quarter = page_content.find('div', attrs={'id': 'div_0'}).find('table', attrs={'class': 'tblfund2'}).find_all('tr')[-1]
six_quarter = ffloat_list(get_inner_texts_as_array(six_quarter)[1:])
results['mf_share_count'] = six_quarter[0]
results['mf_share_count_last_quarter_change'] = six_quarter[0] - six_quarter[1]
results['mf_six_quarter_share_count'] = six_quarter
return results
def get_ratios(url):
page_response = requests.get(url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
table_content = page_content.find_all('table', attrs={'class': 'table4'})[-1]
if "Data Not Available" in table_content.text:
return {}
dates_html = get_children(get_children(get_children(table_content)[0])[1])[1]
dates = get_inner_texts_as_array(dates_html)
ratios_htmls = get_children(get_children(get_children(get_children(table_content)[0])[1])[2])[1:]
rows = list(map(get_inner_texts_as_array, ratios_htmls))
ratios = {}
ratios['dates'] = dates
for row in rows:
if len(row) > 1:
ratios[row[0]] = ffloat_list(row[1:])
needed_keys = [('dates', 'ratios_dates'),
('Diluted EPS (Rs.)', 'ratios_diluted_eps'),
('Revenue from Operations/Share (Rs.)', 'ratios_revenue_per_share'),
('PBT/Share (Rs.)', 'ratios_pbt_per_share'),
('PBT Margin (%)', 'ratios_pbt_margin_per_share'),
('Total Debt/Equity (X)', 'ratios_de'),
('Asset Turnover Ratio (%)', 'ratios_asset_turnover_ratio'),
('Current Ratio (X)', 'ratios_cr'),
('EV/EBITDA (X)', 'ratios_ev_by_ebitda'),
('Price/BV (X)', 'ratios_pb'),
('MarketCap/Net Operating Revenue (X)','mcap/revenue'),
('Price/Net Operating Revenue','price/revenue')]
ratios = {your_key[1]: ratios[your_key[0]] if your_key[0] in ratios else [] for your_key in needed_keys}
return ratios
def get_min_and_three_year_from_screener(table):
min_value = np.inf
three_year_value = np.inf
for row in table:
if len(row)==2:
if row[0]=='3 Years:':
three_year_value = ffloat(row[1].replace('%',''))
cur_value = ffloat(row[1].replace('%',''))
min_value = min(min_value,cur_value)
return min_value,three_year_value
def get_quarterly_results(quarterly_results_table):
qrt = get_table_simple(quarterly_results_table)
qres = {}
qres['dates'] = qrt[0]
qres['sales'] = ffloat_list(qrt[1][1:])
qres['operating_profit'] = ffloat_list(qrt[3][1:])
qres['opm_percent'] = ffloat_list(qrt[4][1:])
qres['interest'] = ffloat_list(qrt[7][1:])
qres['pbt'] = ffloat_list(qrt[8][1:])
return qres
def get_annual_results(annual_results):
if annual_results is None:
return {}
qrt = get_table_simple(annual_results)
qres = {}
qres['dates'] = qrt[0]
qres['sales'] = ffloat_list(qrt[1][1:])
qres['operating_profit'] = ffloat_list(qrt[3][1:])
qres['opm_percent'] = ffloat_list(qrt[4][1:])
qres['interest'] = ffloat_list(qrt[6][1:])
qres['pbt'] = ffloat_list(qrt[8][1:])
qres['eps'] = ffloat_list(qrt[11][1:])
return qres
def get_balance_sheet(balance_sheet):
if balance_sheet is None:
return {}
qrt = get_table_simple(balance_sheet)
qres = {}
qres['dates'] = qrt[0]
qres['borrowings'] = ffloat_list(qrt[3][1:])
qres['fixed_assets'] = ffloat_list(qrt[6][1:])
qres['total_assets'] = ffloat_list(qrt[10][1:])
return qres
def get_cash_flows(cash_flows):
if cash_flows is None:
return {}
qrt = get_table_simple(cash_flows)
qres = {}
qres['dates'] = qrt[0]
qres['net_cash_flow'] = ffloat_list(qrt[4][1:])
return qres
def get_past_prices(sc_id):
bse_url = "https://www.moneycontrol.com/tech_charts/bse/his/%s.csv" % sc_id
nse_url = "https://www.moneycontrol.com/tech_charts/nse/his/%s.csv" % sc_id
past_prices_nse = pd.read_csv(nse_url, header=None, names=['open', 'high', 'low', 'close', 'volume', 1, 2, 3, 4])[
['open', 'high', 'low', 'close', 'volume']]
past_prices_nse.index = pd.to_datetime(past_prices_nse.index)
past_prices_bse = pd.read_csv(bse_url, header=None, names=['open', 'high', 'low', 'close', 'volume', 1, 2, 3, 4])[
['open', 'high', 'low', 'close', 'volume']]
past_prices_bse.index = pd.to_datetime(past_prices_bse.index)
ly = None
two_year_ago = None
three_year_ago = None
five_year_ago = None
past_prices = past_prices_bse
for i in range(12):
try:
if ly is None:
ly_t = pd.to_datetime(past_prices.iloc[-1:].index.values[0] - | pd.to_timedelta(364 + i, unit='d') | pandas.to_timedelta |
import sys
sys.path.append("../ern/")
sys.path.append("../dies/")
import copy
import torch
import numpy as np
import pandas as pd
from dies.utils import listify
from sklearn.metrics import mean_squared_error as mse
from torch.utils.data.dataloader import DataLoader
from fastai.basic_data import DataBunch
from fastai.basic_data import DatasetType
import glob
def to_short_name(file):
return (
file.split("/")[-1]
.replace(".h5", "")
.replace(".csv", "")
.replace(".pkl", "")
.replace(".pth", "")
.replace("_config", "")
)
def create_databunch(
train_ds, val_ds, test_ds, batch_size, device,
):
train_ds.to_device(device)
tr = DataLoader(
train_ds,
batch_size,
drop_last=True,
shuffle=True,
# num_workers=6,
pin_memory=False,
)
val_ds.to_device(device)
val = DataLoader(val_ds, batch_size, pin_memory=False)
if test_ds is not None:
test_ds.to_device(device)
test = DataLoader(test_ds, batch_size, pin_memory=False)
else:
test = None
data_bunch = DataBunch(tr, val, test_dl=test)
return data_bunch
def get_config(file, include_rmse=False):
df = pd.read_csv(file, sep=",")
min_rmse_idx = df.root_mean_squared_error.idxmin()
relevant_cols = [c for c in df.columns if "config" in c]
rename_cols = {c: c.replace("config/", "") for c in relevant_cols}
if include_rmse:
relevant_cols += ["root_mean_squared_error"]
df = df[relevant_cols].loc[min_rmse_idx]
df = df.rename(rename_cols)
return df
def match_file_names(file_name, file_names):
res = None
file_name = to_short_name(file_name)
for f in file_names:
if file_name == to_short_name(f):
res = f
break
return res
def get_preds(learn, data_type=DatasetType.Test):
y_hats, y = learn.get_preds(data_type)
y_hats = np.clip(y_hats, 0, 1.05)
return y, y_hats
def get_rmse(learn, data_type=DatasetType.Test):
y, y_hats = get_preds(learn, data_type=data_type)
y_hats = np.clip(y_hats, 0, 1.05)
e = mse(y, y_hats) ** 0.5
return e
def get_ds_from_type(data_bunch, data_type):
if data_type == DatasetType.Train:
return data_bunch.train_ds
elif data_type == DatasetType.Valid:
return data_bunch.valid_ds
elif data_type == DatasetType.Test:
return data_bunch.test_ds
def create_rmse_df_lstm(y, y_hat, file, data_bunch, data_type=DatasetType.Test):
res_rmses, park_ids = [], []
pdfs = []
ds = get_ds_from_type(data_bunch, data_type)
y, y_hat = y.ravel(), y_hat.ravel()
res_rmse = mse(y, y_hat) ** 0.5
res_rmses.append(res_rmse)
park_ids.append(file)
df_f = pd.DataFrame({"Y": y, "Yhat": y_hat, "Time": ds.index})
df_f["ParkId"] = to_short_name(file)
pdfs.append(df_f)
df_res = pd.DataFrame({"RMSE": res_rmses, "ParkId": park_ids})
pdfs = pd.concat(pdfs, axis=0)
return df_res, pdfs
def create_rmse_df_mtl(y, y_hat, files, data_bunch, data_type=DatasetType.Test):
res_rmses, park_ids = [], []
pdfs = []
ds = get_ds_from_type(data_bunch, data_type)
for i in range(y.shape[1]):
res_rmse = mse(y[:, i], y_hat[:, i]) ** 0.5
res_rmses.append(res_rmse)
park_ids.append(files[i])
df_f = pd.DataFrame({"Y": y[:, i], "Yhat": y_hat[:, i], "Time": ds.index})
df_f["ParkId"] = to_short_name(data_bunch.files[i])
pdfs.append(df_f)
df_res = | pd.DataFrame({"RMSE": res_rmses, "ParkId": park_ids}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
:Author: <NAME>
:Date: 2018. 6. 19.
"""
import numpy as np
import pandas as pd
from pandas.io.excel import ExcelFile
from preprocess.core.columns import *
KIND = 'Kind'
SYMBOL = 'Symbol'
NAME = 'name'
ITEM_NAME = 'Item Name '
ITEM = 'Item'
FREQUENCY = 'Frequency'
SYMBOL_NAME = 'Symbol Name'
DIR = 'data/{}.xlsx'
COMPANY_UNNECESSARY_COLUMNS = [KIND, NAME, ITEM_NAME, ITEM, FREQUENCY]
BENCHMARK_UNNECESSARY_COLUMNS = [SYMBOL, KIND, ITEM, ITEM_NAME, FREQUENCY]
# noinspection PyShadowingNames
def read_companies(excel_file: ExcelFile) -> pd.DataFrame:
"""
:param excel_file: (ExcelFile)
:return melted_companies: (DataFrame)
code | (String)
date | (Datetime)
name | (String)
...
"""
# Read excel file.
raw_companies = excel_file.parse(COMPANY, skiprows=8)
# Rename Symbol -> code, Symbol Name -> name
raw_companies = raw_companies.rename(columns={
'Symbol': CODE,
'Symbol Name': NAME,
})
# Save symbol names and item names.
names = raw_companies.drop_duplicates(subset=CODE, keep='last').loc[:, [CODE, NAME]]
names = names.set_index(CODE)
item_name_num = len(raw_companies.loc[:1000, ITEM_NAME].unique())
item_names = raw_companies.loc[:item_name_num - 1, ITEM_NAME]
# Remove unnecessary columns, for example, Symbol, Kind, Item, Item Name, Frequency
raw_companies = raw_companies.drop(columns=COMPANY_UNNECESSARY_COLUMNS)
# Melt every items.
melted_companies = pd.DataFrame(columns=[CODE, DATE])
melted_companies = melted_companies.set_index([CODE, DATE])
for index, item_name in enumerate(item_names):
# Melt raw_benchmark. Symbole name -> code, column names -> date
item_companies = pd.melt(raw_companies.iloc[index::item_name_num, :], id_vars=[CODE], var_name=DATE,
value_name=item_name)
item_companies[DATE] = pd.to_datetime(item_companies[DATE], format='%Y-$m-%D')
item_companies = item_companies.set_index([CODE, DATE])
melted_companies = melted_companies.join(item_companies, how='outer')
melted_companies = melted_companies.rename(columns=COMPANY_RENAMES)
# Add the names of company.
melted_companies = melted_companies.join(names)
melted_companies = melted_companies.reset_index()
melted_companies = melted_companies.sort_values([CODE, DATE])
# IS_MANAGED, IS_SUSPENDED: '정지' -> True, na -> False
melted_companies[IS_MANAGED] = melted_companies[IS_MANAGED].replace('관리', True)
melted_companies[IS_MANAGED] = melted_companies[IS_MANAGED].fillna(False)
melted_companies[IS_SUSPENDED] = melted_companies[IS_SUSPENDED].replace('정지', True)
melted_companies[IS_SUSPENDED] = melted_companies[IS_SUSPENDED].fillna(False)
# nan -> 0
to_zero_columns = [
CFO, ALLOWANCE_AR_, TRADING_VOLUME, RES_EXP, AR, DIVP, AP,
NET_PERSONAL_PURCHASE, NET_NATIONAL_PURCHASE, NET_FINANCIAL_INVESTMENT_PURCHASE,
NET_INSTITUTIONAL_FOREIGN_PURCHASE, NET_INSTITUTIONAL_PURCHASE, NET_ETC_FINANCE_PURCHASE,
NET_ETC_CORPORATION_PURCHASE, NET_ETC_FOREIGN_PURCHASE, NET_REGISTERED_FOREIGN_PURCHASE,
NET_INSURANCE_PURCHASE, NET_PRIVATE_FUND_PURCHASE, NET_PENSION_PURCHASE, NET_FOREIGN_PURCHASE,
NET_BANK_PURCHASE, NET_TRUST_PURCHASE, SHORT_SALE_BALANCE, FOREIGN_OWNERSHIP_RATIO
]
melted_companies.loc[:, to_zero_columns] = melted_companies.replace(np.nan, 0.0).loc[:, to_zero_columns]
# There are no SHORT_SALE_BALANCE before 2016-06-30
melted_companies.loc[melted_companies[DATE] < '2016-06-30', SHORT_SALE_BALANCE] = np.nan
# Sort by code and date
melted_companies = melted_companies.sort_values([CODE, DATE]).reset_index(drop=True)
return melted_companies
# noinspection PyShadowingNames
def read_benchmarks(excel_file: ExcelFile) -> pd.DataFrame:
"""
:param excel_file: (ExcelFile)
:return melted_benchmarks: (DataFrame)
code | (String)
date | (Datetime)
price_index | (float)
"""
# Read excel file.
raw_benchmarks = excel_file.parse(BENCHMARK, skiprows=8)
raw_macro_from_monthly = excel_file.parse(MACRO_MONTHLY, skiprows=8)
# Use only CD91
raw_risk_free = raw_macro_from_monthly.loc[raw_macro_from_monthly[ITEM_NAME] == '시장금리:CD유통수익률(91)(%)', :]
# Remove unnecessary columns, for example, Symbol, Kind, Item, Item Name, Frequency
raw_benchmarks = raw_benchmarks.drop(columns=BENCHMARK_UNNECESSARY_COLUMNS)
raw_risk_free = raw_risk_free.drop(columns=BENCHMARK_UNNECESSARY_COLUMNS)
raw_risk_free[SYMBOL_NAME] = CD91
# Melt benchmarks. Symbole name -> code, column names -> date
melted_benchmarks = _melt(raw_benchmarks, PRICE_INDEX)
melted_risk_free = _melt(raw_risk_free, PRICE_INDEX)
# Calculate a risk free rate index
melted_risk_free[PRICE_INDEX] = (((melted_risk_free[PRICE_INDEX] / 100) + 1) ** (1 / 12)).cumprod()
melted_benchmarks = | pd.concat([melted_benchmarks, melted_risk_free]) | pandas.concat |
import os
import numpy as np
import pandas as pd
import consts
first_period='01-2017'
last_period='02-2020'
# Coleta a substring referente ao ano e a transforma no tipo int
first_year = int(first_period[3:])
# Coleta a substring referente ao ano e a transforma no tipo int
last_year = int(last_period[3:])
# Coleta a substring do mês
first_month = first_period[:2]
# Coleta a substring do mês
last_month = last_period[:2]
df = pd.read_excel(os.path.join(consts.DIRETORIO_RESULTADOS, 'resultados.xlsx'), sheet_name=f'{first_year}-{first_month}')
df['EFICIENCIA'].replace('Unbounded', np.nan, inplace=True)
df['EFICIENCIA'] = df['EFICIENCIA'].astype(float)
df.rename(columns={'EFICIENCIA': f'{first_year}-{first_month}'}, inplace=True)
df = df[['CNES', f'{first_year}-{first_month}']]
# Itera sobre os anos
for ano in np.arange(first_year, last_year + 1):
print(ano)
if ano == first_year:
# Itera sobre os meses do primeiro=último ano de dados
for mes in np.arange(2, 13):
# Converte o tipo int referenciado à variável "mes" como tipo string
mes = str(mes)
# Preenche com zeros à esquerda a string "mes" até ficar com dois dígitos
mes = mes.zfill(2)
print(mes)
# Lê o arquivo xlsx "resultado_dados_tratados_dea_'ano'_'mes'" como um objeto pandas DataFrame
df_mes = pd.read_excel(os.path.join(consts.DIRETORIO_RESULTADOS, 'resultados.xlsx'), sheet_name=f'{ano}-{mes}')
#
df_mes['EFICIENCIA'].replace('Unbounded', np.nan, inplace=True)
#
df_mes['EFICIENCIA'] = df_mes['EFICIENCIA'].astype(float)
#
df_mes.rename(columns={'EFICIENCIA': f'{first_year}-{mes}'}, inplace=True)
#
df = pd.merge(df, df_mes[['CNES', f'{first_year}-{mes}']], how='outer', left_on='CNES', right_on='CNES')
elif first_year < ano < last_year:
# Itera sobre os meses do último ano de dados
for mes in np.arange(1, 13):
# Converte o tipo int referenciado à variável "mes" como tipo string
mes = str(mes)
# Preenche com zeros à esquerda a string "mes" até ficar com dois dígitos
mes = mes.zfill(2)
print(mes)
# Lê o arquivo xlsx "resultado_dados_tratados_dea_'ano'_'mes'" como um objeto pandas DataFrame
df_mes = pd.read_excel(os.path.join(consts.DIRETORIO_RESULTADOS, 'resultados.xlsx'), sheet_name = f'{ano}-{mes}')
#
df_mes['EFICIENCIA'].replace('Unbounded', np.nan, inplace=True)
#
df_mes['EFICIENCIA'] = df_mes['EFICIENCIA'].astype(float)
#
df_mes.rename(columns={'EFICIENCIA': f'{ano}-{mes}'}, inplace=True)
#
df = | pd.merge(df, df_mes[['CNES', f'{ano}-{mes}']], how='outer', left_on='CNES', right_on='CNES') | pandas.merge |
import pandas as pd
import numpy as np
import os
path='D:\sufe\A'
files=os.listdir(path)
train_data=pd.read_csv('D:\sufe\A\data_train_changed.csv')
data1=pd.read_csv('D:\sufe\A\contest_ext_crd_cd_ln.tsv',sep='\t')
data2=pd.read_csv('D:\sufe\A\contest_ext_crd_cd_ln_spl.tsv',sep='\t')
p=pd.merge(train_data,data1,on='REPORT_ID',how='left')
p= | pd.merge(p,data2,on='REPORT_ID',how='left') | pandas.merge |
#%%
import pandas as pd
import json
import os
class Preprocessing:
optional = object()
"""
Helper class for Preprocessing Data
"""
@staticmethod
def extract_str_dict_df(df, column):
"""
To parse data that have rows that look like:
"{"/m/01jfsb": "Thriller", "/m/06n90": "Science Fiction", "/m/03npn": "Horror", "/m/03k9fj": "Adventure"}"
:param df:DataFrame Df containing rows that contain string values in a dictionary format
:param column:str Column name
:return: list
a tuple consisting of a`list of unique values of the column, list_of_values
"""
s = set()
lists = []
for x in range(len(df[column])):
values = list(json.loads(df[column][x]).values())
lists.append(values)
for value in values:
s.add(value)
unique_list = list(s)
return unique_list, lists
@staticmethod
def create_row(c_v, values, c_dv, dv_values):
"""
@Precondition: dv is unique list
Essentially one hot encodes the dependent values
# Any other relevant data
:param c_v: [str], column names (ordered with values)
:param values: [any] - List of values of columns
# Categorical data
:param c_dv: [str] (unique list of dependent_variables)
:param dv_values: [str] (list of dependent variables)
:return:
dictionary that will appended to a DataFrame
"""
assert (len(c_dv) == len(set(c_dv))), "Not a unique list"
# Any other relevant data
dictionary = dict(zip(c_v, values))
# One hot encoding
dictionary_2 = dict(zip(c_dv, [0] * len(c_dv)))
for v in dv_values:
dictionary_2[v] = 1
row = {**dictionary_2, **dictionary}
return row
@staticmethod
def create_cleaned_df(meta_df, plot_df, columns, genre_list):
"""
@precondition: Must be for the CMU Corpus Data Frame
:param meta_df: DataFrame for movie.metadata.tsv
:param plot_df: DataFrame for plot-summaries.txt
:param columns: Columns for the cleaned DataFrame
:param genre_list: List of (genres for each movie)
:return:
DataFrame, but cleaned and arranged poperly
"""
n_df = pd.DataFrame(columns=columns)
plot_ids = plot_df['wikiid'].tolist()
plot_by_id = [x.lower().replace(",", "") for x in plot_df["summary"]]
for ind, id in enumerate(meta_df['wikiid']):
if id in plot_ids:
row = Preprocessing.create_row(["Title","Summary"],[meta_df["title"][ind], plot_by_id[plot_by_id.index(id)]], columns, genre_list[ind])
n_df = n_df.append(row, ignore_index=True)
return n_df
@staticmethod
def merge_data(df_1, df_2, co_to_keep, rn_co_1=optional, rn_co_2=optional):
"""
Formats the columns and merges the dataframes based on those columns
:param df_1: Data Frame 1
:param df_2: Data Frame 2
:param co_to_keep: [str] - List of columns to keep
:param rn_co_1: Dictionary - Renamed columns for DF1
:param rn_co_2: Dictionary - Renamed columns for DF2
:return:
merged DataFrame
"""
if rn_co_1 != Preprocessing.optional:
df_1.rename(columns=rn_co_1, inplace=True)
if rn_co_2 != Preprocessing.optional:
df_2.rename(columns=rn_co_2, inplace=True)
return | pd.concat(df_1[co_to_keep], df_2[co_to_keep]) | pandas.concat |
import Orange
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from parameters import output_dir, rank_dir, input_dir
from classifiers import classifiers_list
from datasets import dataset_biclass, dataset_multiclass
# geometry
order = ['area',
'volume',
'area_volume_ratio',
'edge_ratio',
'radius_ratio',
'aspect_ratio',
'max_solid_angle',
'min_solid_angle',
'solid_angle']
# Dirichlet Distribution alphas
alphas = np.arange(1, 10, 0.5)
class Performance:
def __init__(self):
pass
def average_results(self, rfile, kind, release):
'''
Calculates average results
:param rfile: filename with results
:param kind: biclass or multiclass
:return: avarege_results in another file
'''
df = pd.read_csv(rfile)
t = pd.Series(data=np.arange(0, df.shape[0], 1))
if kind == 'biclass':
dfr = pd.DataFrame(columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER',
'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO', 'IBA', 'AUC'],
index=np.arange(0, int(t.shape[0] / 5)))
else:
dfr = pd.DataFrame(columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER',
'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO', 'IBA'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'PRE'] = group['PRE'].mean()
dfr.at[i, 'REC'] = group['REC'].mean()
dfr.at[i, 'SPE'] = group['SPE'].mean()
dfr.at[i, 'F1'] = group['F1'].mean()
dfr.at[i, 'GEO'] = group['GEO'].mean()
dfr.at[i, 'IBA'] = group['IBA'].mean()
if kind == 'biclass':
dfr.at[i, 'AUC'] = group['AUC'].mean()
i = i + 1
print('Total lines in a file: ', i)
dfr.to_csv(input_dir + 'average_results_' + kind + '_' + str(release) + '.csv', index=False)
def rank_by_algorithm(self, df, kind, order, alpha, release, smote=False):
'''
Calcula rank
:param df:
:param tipo:
:param wd:
:param delaunay_type:
:return:
'''
biclass_measures = ['PRE', 'REC', 'SPE', 'F1', 'GEO', 'IBA', 'AUC']
multiclass_measures = ['PRE', 'REC', 'SPE', 'F1', 'GEO', 'IBA']
df_table = pd.DataFrame(
columns=['DATASET', 'ALGORITHM', 'ORIGINAL', 'RANK_ORIGINAL', 'SMOTE', 'RANK_SMOTE', 'SMOTE_SVM',
'RANK_SMOTE_SVM', 'BORDERLINE1', 'RANK_BORDERLINE1', 'BORDERLINE2', 'RANK_BORDERLINE2',
'GEOMETRIC_SMOTE', 'RANK_GEOMETRIC_SMOTE', 'DELAUNAY', 'RANK_DELAUNAY', 'DELAUNAY_TYPE',
'ALPHA', 'unit'])
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
if smote == False:
df.to_csv(rank_dir + release + '_' + kind + '_' + order + '_' + str(alpha) + '.csv', index=False)
else:
df.to_csv(rank_dir + release + '_smote_' + kind + '_' + order + '_' + str(alpha) + '.csv', index=False)
j = 0
if kind == 'biclass':
dataset = dataset_biclass
measures = biclass_measures
else:
dataset = dataset_multiclass
measures = multiclass_measures
for d in dataset:
for m in measures:
aux = group[group['DATASET'] == d]
aux = aux.reset_index()
df_table.at[j, 'DATASET'] = d
df_table.at[j, 'ALGORITHM'] = name
indice = aux.PREPROC[aux.PREPROC == '_train'].index.tolist()[0]
df_table.at[j, 'ORIGINAL'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_SMOTE'].index.tolist()[0]
df_table.at[j, 'SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_smoteSVM'].index.tolist()[0]
df_table.at[j, 'SMOTE_SVM'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline1'].index.tolist()[0]
df_table.at[j, 'BORDERLINE1'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline2'].index.tolist()[0]
df_table.at[j, 'BORDERLINE2'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Geometric_SMOTE'].index.tolist()[0]
df_table.at[j, 'GEOMETRIC_SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.ORDER == order].index.tolist()[0]
df_table.at[j, 'DELAUNAY'] = aux.at[indice, m]
df_table.at[j, 'DELAUNAY_TYPE'] = order
df_table.at[j, 'ALPHA'] = alpha
df_table.at[j, 'unit'] = m
j += 1
df_pre = df_table[df_table['unit'] == 'PRE']
df_rec = df_table[df_table['unit'] == 'REC']
df_spe = df_table[df_table['unit'] == 'SPE']
df_f1 = df_table[df_table['unit'] == 'F1']
df_geo = df_table[df_table['unit'] == 'GEO']
df_iba = df_table[df_table['unit'] == 'IBA']
if kind == 'biclass':
df_auc = df_table[df_table['unit'] == 'AUC']
pre = df_pre[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
rec = df_rec[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
spe = df_spe[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
f1 = df_f1[['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
geo = df_geo[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
iba = df_iba[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
if kind == 'biclass':
auc = df_auc[['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
'DELAUNAY']]
pre = pre.reset_index()
pre.drop('index', axis=1, inplace=True)
rec = rec.reset_index()
rec.drop('index', axis=1, inplace=True)
spe = spe.reset_index()
spe.drop('index', axis=1, inplace=True)
f1 = f1.reset_index()
f1.drop('index', axis=1, inplace=True)
geo = geo.reset_index()
geo.drop('index', axis=1, inplace=True)
iba = iba.reset_index()
iba.drop('index', axis=1, inplace=True)
if kind == 'biclass':
auc = auc.reset_index()
auc.drop('index', axis=1, inplace=True)
# calcula rank linha a linha
pre_rank = pre.rank(axis=1, ascending=False)
rec_rank = rec.rank(axis=1, ascending=False)
spe_rank = spe.rank(axis=1, ascending=False)
f1_rank = f1.rank(axis=1, ascending=False)
geo_rank = geo.rank(axis=1, ascending=False)
iba_rank = iba.rank(axis=1, ascending=False)
if kind == 'biclass':
auc_rank = auc.rank(axis=1, ascending=False)
df_pre = df_pre.reset_index()
df_pre.drop('index', axis=1, inplace=True)
df_pre['RANK_ORIGINAL'] = pre_rank['ORIGINAL']
df_pre['RANK_SMOTE'] = pre_rank['SMOTE']
df_pre['RANK_SMOTE_SVM'] = pre_rank['SMOTE_SVM']
df_pre['RANK_BORDERLINE1'] = pre_rank['BORDERLINE1']
df_pre['RANK_BORDERLINE2'] = pre_rank['BORDERLINE2']
df_pre['RANK_GEOMETRIC_SMOTE'] = pre_rank['GEOMETRIC_SMOTE']
df_pre['RANK_DELAUNAY'] = pre_rank['DELAUNAY']
df_rec = df_rec.reset_index()
df_rec.drop('index', axis=1, inplace=True)
df_rec['RANK_ORIGINAL'] = rec_rank['ORIGINAL']
df_rec['RANK_SMOTE'] = rec_rank['SMOTE']
df_rec['RANK_SMOTE_SVM'] = rec_rank['SMOTE_SVM']
df_rec['RANK_BORDERLINE1'] = rec_rank['BORDERLINE1']
df_rec['RANK_BORDERLINE2'] = rec_rank['BORDERLINE2']
df_rec['RANK_GEOMETRIC_SMOTE'] = rec_rank['GEOMETRIC_SMOTE']
df_rec['RANK_DELAUNAY'] = rec_rank['DELAUNAY']
df_spe = df_spe.reset_index()
df_spe.drop('index', axis=1, inplace=True)
df_spe['RANK_ORIGINAL'] = spe_rank['ORIGINAL']
df_spe['RANK_SMOTE'] = spe_rank['SMOTE']
df_spe['RANK_SMOTE_SVM'] = spe_rank['SMOTE_SVM']
df_spe['RANK_BORDERLINE1'] = spe_rank['BORDERLINE1']
df_spe['RANK_BORDERLINE2'] = spe_rank['BORDERLINE2']
df_spe['RANK_GEOMETRIC_SMOTE'] = spe_rank['GEOMETRIC_SMOTE']
df_spe['RANK_DELAUNAY'] = spe_rank['DELAUNAY']
df_f1 = df_f1.reset_index()
df_f1.drop('index', axis=1, inplace=True)
df_f1['RANK_ORIGINAL'] = f1_rank['ORIGINAL']
df_f1['RANK_SMOTE'] = f1_rank['SMOTE']
df_f1['RANK_SMOTE_SVM'] = f1_rank['SMOTE_SVM']
df_f1['RANK_BORDERLINE1'] = f1_rank['BORDERLINE1']
df_f1['RANK_BORDERLINE2'] = f1_rank['BORDERLINE2']
df_f1['RANK_GEOMETRIC_SMOTE'] = f1_rank['GEOMETRIC_SMOTE']
df_f1['RANK_DELAUNAY'] = f1_rank['DELAUNAY']
df_geo = df_geo.reset_index()
df_geo.drop('index', axis=1, inplace=True)
df_geo['RANK_ORIGINAL'] = geo_rank['ORIGINAL']
df_geo['RANK_SMOTE'] = geo_rank['SMOTE']
df_geo['RANK_SMOTE_SVM'] = geo_rank['SMOTE_SVM']
df_geo['RANK_BORDERLINE1'] = geo_rank['BORDERLINE1']
df_geo['RANK_BORDERLINE2'] = geo_rank['BORDERLINE2']
df_geo['RANK_GEOMETRIC_SMOTE'] = geo_rank['GEOMETRIC_SMOTE']
df_geo['RANK_DELAUNAY'] = geo_rank['DELAUNAY']
df_iba = df_iba.reset_index()
df_iba.drop('index', axis=1, inplace=True)
df_iba['RANK_ORIGINAL'] = iba_rank['ORIGINAL']
df_iba['RANK_SMOTE'] = iba_rank['SMOTE']
df_iba['RANK_SMOTE_SVM'] = iba_rank['SMOTE_SVM']
df_iba['RANK_BORDERLINE1'] = iba_rank['BORDERLINE1']
df_iba['RANK_BORDERLINE2'] = iba_rank['BORDERLINE2']
df_iba['RANK_GEOMETRIC_SMOTE'] = iba_rank['GEOMETRIC_SMOTE']
df_iba['RANK_DELAUNAY'] = iba_rank['DELAUNAY']
if kind == 'biclass':
df_auc = df_auc.reset_index()
df_auc.drop('index', axis=1, inplace=True)
df_auc['RANK_ORIGINAL'] = auc_rank['ORIGINAL']
df_auc['RANK_SMOTE'] = auc_rank['SMOTE']
df_auc['RANK_SMOTE_SVM'] = auc_rank['SMOTE_SVM']
df_auc['RANK_BORDERLINE1'] = auc_rank['BORDERLINE1']
df_auc['RANK_BORDERLINE2'] = auc_rank['BORDERLINE2']
df_auc['RANK_GEOMETRIC_SMOTE'] = auc_rank['GEOMETRIC_SMOTE']
df_auc['RANK_DELAUNAY'] = auc_rank['DELAUNAY']
# avarege rank
media_pre_rank = pre_rank.mean(axis=0)
media_rec_rank = rec_rank.mean(axis=0)
media_spe_rank = spe_rank.mean(axis=0)
media_f1_rank = f1_rank.mean(axis=0)
media_geo_rank = geo_rank.mean(axis=0)
media_iba_rank = iba_rank.mean(axis=0)
if kind == 'biclass':
media_auc_rank = auc_rank.mean(axis=0)
media_pre_rank_file = media_pre_rank.reset_index()
media_pre_rank_file = media_pre_rank_file.sort_values(by=0)
media_rec_rank_file = media_rec_rank.reset_index()
media_rec_rank_file = media_rec_rank_file.sort_values(by=0)
media_spe_rank_file = media_spe_rank.reset_index()
media_spe_rank_file = media_spe_rank_file.sort_values(by=0)
media_f1_rank_file = media_f1_rank.reset_index()
media_f1_rank_file = media_f1_rank_file.sort_values(by=0)
media_geo_rank_file = media_geo_rank.reset_index()
media_geo_rank_file = media_geo_rank_file.sort_values(by=0)
media_iba_rank_file = media_iba_rank.reset_index()
media_iba_rank_file = media_iba_rank_file.sort_values(by=0)
if kind == 'biclass':
media_auc_rank_file = media_auc_rank.reset_index()
media_auc_rank_file = media_auc_rank_file.sort_values(by=0)
if smote == False:
# Grava arquivos importantes
df_pre.to_csv(
rank_dir + release + '_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv', index=False)
df_rec.to_csv(
rank_dir + release + '_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv', index=False)
df_spe.to_csv(
rank_dir + release + '_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv', index=False)
df_f1.to_csv(
rank_dir + release + '_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv', index=False)
df_geo.to_csv(
rank_dir + release + '_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv', index=False)
df_iba.to_csv(
rank_dir + release + '_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv', index=False)
if kind == 'biclass':
df_auc.to_csv(
rank_dir + release + '_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
media_pre_rank_file.to_csv(
rank_dir + release + '_' + 'media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
media_rec_rank_file.to_csv(
rank_dir + release + '_media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
media_spe_rank_file.to_csv(
rank_dir + release + '_media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
media_f1_rank_file.to_csv(
rank_dir + release + '_media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
media_geo_rank_file.to_csv(
rank_dir + release + '_media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
media_iba_rank_file.to_csv(
rank_dir + release + '_media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
if kind == 'biclass':
media_auc_rank_file.to_csv(
rank_dir + release + '_media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
delaunay_type = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
delaunay_type]
avranks = list(media_pre_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + kind + '_' + delaunay_type + '_' + name + '_pre.pdf')
plt.close()
avranks = list(media_rec_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + kind + '_' + delaunay_type + '_' + name + '_rec.pdf')
plt.close()
avranks = list(media_spe_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + kind + '_' + delaunay_type + '_' + name + '_spe.pdf')
plt.close()
avranks = list(media_f1_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(rank_dir + release + 'cd_' + '_' + kind + '_' + delaunay_type + '_' + name + '_f1.pdf')
plt.close()
avranks = list(media_geo_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + kind + '_' + delaunay_type + '_' + name + '_geo.pdf')
plt.close()
avranks = list(media_iba_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + kind + '_' + delaunay_type + '_' + name + '_iba.pdf')
plt.close()
if kind == 'biclass':
avranks = list(media_auc_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + kind + '_' + delaunay_type + '_' + name + '_auc.pdf')
plt.close()
print('Delaunay Type= ', delaunay_type)
print('Algorithm= ', name)
else:
# Grava arquivos importantes
df_pre.to_csv(
rank_dir + release + '_smote_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv', index=False)
df_rec.to_csv(
rank_dir + release + '_smote_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv', index=False)
df_spe.to_csv(
rank_dir + release + '_smote_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv', index=False)
df_f1.to_csv(
rank_dir + release + '_smote_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv', index=False)
df_geo.to_csv(
rank_dir + release + '_smote_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv', index=False)
df_iba.to_csv(
rank_dir + release + '_smote_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv', index=False)
if kind == 'biclass':
df_auc.to_csv(
rank_dir + release + '_smote_' + kind + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
media_pre_rank_file.to_csv(
rank_dir + release + '_smote_media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
media_rec_rank_file.to_csv(
rank_dir + release + '_smote__media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
media_spe_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
media_f1_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
media_geo_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
media_iba_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
if kind == 'biclass':
media_auc_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + kind + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
delaunay_type = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
delaunay_type]
avranks = list(media_pre_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + kind + '_' + delaunay_type + '_' + name + '_pre.pdf')
plt.close()
avranks = list(media_rec_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + kind + '_' + delaunay_type + '_' + name + '_rec.pdf')
plt.close()
avranks = list(media_spe_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + kind + '_' + delaunay_type + '_' + name + '_spe.pdf')
plt.close()
avranks = list(media_f1_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(rank_dir + release + 'cd_smote' + '_' + kind + '_' + delaunay_type + '_' + name + '_f1.pdf')
plt.close()
avranks = list(media_geo_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + kind + '_' + delaunay_type + '_' + name + '_geo.pdf')
plt.close()
avranks = list(media_iba_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + kind + '_' + delaunay_type + '_' + name + '_iba.pdf')
plt.close()
if kind == 'biclass':
avranks = list(media_auc_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + kind + '_' + delaunay_type + '_' + name + '_auc.pdf')
plt.close()
print('SMOTE Delaunay Type= ', delaunay_type)
print('SMOTE Algorithm= ', name)
def rank_dto_by(self, geometry, kind, release, smote=False):
if kind == 'biclass':
M = ['_pre.csv', '_rec.csv', '_spe.csv', '_f1.csv', '_geo.csv', '_iba.csv', '_auc.csv']
else:
M = ['_pre.csv', '_rec.csv', '_spe.csv', '_f1.csv', '_geo.csv', '_iba.csv']
df_media_rank = pd.DataFrame(columns=['ALGORITHM', 'RANK_ORIGINAL', 'RANK_SMOTE',
'RANK_SMOTE_SVM', 'RANK_BORDERLINE1', 'RANK_BORDERLINE2',
'RANK_GEOMETRIC_SMOTE', 'RANK_DELAUNAY', 'unit'])
if smote == False:
name = rank_dir + release + '_' + kind + '_total_rank_' + geometry + '_'
else:
name = rank_dir + release + '_smote_' + kind + '_total_rank_' + geometry + '_'
for m in M:
i = 0
for c in classifiers_list:
df = pd.read_csv(name + c + m)
rank_original = df.RANK_ORIGINAL.mean()
rank_smote = df.RANK_SMOTE.mean()
rank_smote_svm = df.RANK_SMOTE_SVM.mean()
rank_b1 = df.RANK_BORDERLINE1.mean()
rank_b2 = df.RANK_BORDERLINE2.mean()
rank_geo_smote = df.RANK_GEOMETRIC_SMOTE.mean()
rank_dto = df.RANK_DELAUNAY.mean()
df_media_rank.loc[i, 'ALGORITHM'] = df.loc[0, 'ALGORITHM']
df_media_rank.loc[i, 'RANK_ORIGINAL'] = rank_original
df_media_rank.loc[i, 'RANK_SMOTE'] = rank_smote
df_media_rank.loc[i, 'RANK_SMOTE_SVM'] = rank_smote_svm
df_media_rank.loc[i, 'RANK_BORDERLINE1'] = rank_b1
df_media_rank.loc[i, 'RANK_BORDERLINE2'] = rank_b2
df_media_rank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = rank_geo_smote
df_media_rank.loc[i, 'RANK_DELAUNAY'] = rank_dto
df_media_rank.loc[i, 'unit'] = df.loc[0, 'unit']
i += 1
dfmediarank = df_media_rank.copy()
dfmediarank = dfmediarank.sort_values('RANK_DELAUNAY')
dfmediarank.loc[i, 'ALGORITHM'] = 'avarage'
dfmediarank.loc[i, 'RANK_ORIGINAL'] = df_media_rank['RANK_ORIGINAL'].mean()
dfmediarank.loc[i, 'RANK_SMOTE'] = df_media_rank['RANK_SMOTE'].mean()
dfmediarank.loc[i, 'RANK_SMOTE_SVM'] = df_media_rank['RANK_SMOTE_SVM'].mean()
dfmediarank.loc[i, 'RANK_BORDERLINE1'] = df_media_rank['RANK_BORDERLINE1'].mean()
dfmediarank.loc[i, 'RANK_BORDERLINE2'] = df_media_rank['RANK_BORDERLINE2'].mean()
dfmediarank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = df_media_rank['RANK_GEOMETRIC_SMOTE'].mean()
dfmediarank.loc[i, 'RANK_DELAUNAY'] = df_media_rank['RANK_DELAUNAY'].mean()
dfmediarank.loc[i, 'unit'] = df.loc[0, 'unit']
i += 1
dfmediarank.loc[i, 'ALGORITHM'] = 'std'
dfmediarank.loc[i, 'RANK_ORIGINAL'] = df_media_rank['RANK_ORIGINAL'].std()
dfmediarank.loc[i, 'RANK_SMOTE'] = df_media_rank['RANK_SMOTE'].std()
dfmediarank.loc[i, 'RANK_SMOTE_SVM'] = df_media_rank['RANK_SMOTE_SVM'].std()
dfmediarank.loc[i, 'RANK_BORDERLINE1'] = df_media_rank['RANK_BORDERLINE1'].std()
dfmediarank.loc[i, 'RANK_BORDERLINE2'] = df_media_rank['RANK_BORDERLINE2'].std()
dfmediarank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = df_media_rank['RANK_GEOMETRIC_SMOTE'].std()
dfmediarank.loc[i, 'RANK_DELAUNAY'] = df_media_rank['RANK_DELAUNAY'].std()
dfmediarank.loc[i, 'unit'] = df.loc[0, 'unit']
dfmediarank['RANK_ORIGINAL'] = pd.to_numeric(dfmediarank['RANK_ORIGINAL'], downcast="float").round(2)
dfmediarank['RANK_SMOTE'] = pd.to_numeric(dfmediarank['RANK_SMOTE'], downcast="float").round(2)
dfmediarank['RANK_SMOTE_SVM'] = pd.to_numeric(dfmediarank['RANK_SMOTE_SVM'], downcast="float").round(2)
dfmediarank['RANK_BORDERLINE1'] = pd.to_numeric(dfmediarank['RANK_BORDERLINE1'], downcast="float").round(2)
dfmediarank['RANK_BORDERLINE2'] = pd.to_numeric(dfmediarank['RANK_BORDERLINE2'], downcast="float").round(2)
dfmediarank['RANK_GEOMETRIC_SMOTE'] = pd.to_numeric(dfmediarank['RANK_GEOMETRIC_SMOTE'],
downcast="float").round(2)
dfmediarank['RANK_DELAUNAY'] = pd.to_numeric(dfmediarank['RANK_DELAUNAY'], downcast="float").round(2)
if smote == False:
dfmediarank.to_csv(output_dir + release + '_' + kind + '_results_media_rank_' + geometry + m,
index=False)
else:
dfmediarank.to_csv(output_dir + release + '_smote_' + kind + '_results_media_rank_' + geometry + m,
index=False)
def grafico_variacao_alpha(self, kind, release):
if kind == 'biclass':
M = ['_geo', '_iba', '_auc']
else:
M = ['_geo', '_iba']
order = ['area', 'volume', 'area_volume_ratio', 'edge_ratio', 'radius_ratio', 'aspect_ratio', 'max_solid_angle',
'min_solid_angle', 'solid_angle']
# Dirichlet Distribution alphas
alphas = np.arange(1, 10, 0.5)
df_alpha_variations_rank = pd.DataFrame()
df_alpha_variations_rank['alphas'] = alphas
df_alpha_variations_rank.index = alphas
df_alpha_all = pd.DataFrame()
df_alpha_all['alphas'] = alphas
df_alpha_all.index = alphas
for m in M:
for o in order:
for a in alphas:
filename = output_dir + release + '_' + kind + '_results_media_rank_' + o + '_' + str(
a) + m + '.csv'
print(filename)
df = pd.read_csv(filename)
mean = df.loc[8, 'RANK_DELAUNAY']
df_alpha_variations_rank.loc[a, 'AVARAGE_RANK'] = mean
if m == '_geo':
measure = 'GEO'
if m == '_iba':
measure = 'IBA'
if m == '_auc':
measure = 'AUC'
df_alpha_all[o + '_' + measure] = df_alpha_variations_rank['AVARAGE_RANK'].copy()
fig, ax = plt.subplots()
ax.set_title('DTO AVARAGE RANK\n ' + 'GEOMETRY = ' + o + '\nMEASURE = ' + measure, fontsize=10)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
ax.plot(df_alpha_variations_rank['AVARAGE_RANK'], marker='d', label='Avarage Rank')
ax.legend(loc="upper right")
plt.xticks(range(11))
fig.savefig(output_dir + release + '_' + kind + '_pic_' + o + '_' + measure + '.png', dpi=125)
plt.show()
plt.close()
# figure(num=None, figsize=(10, 10), dpi=800, facecolor='w', edgecolor='k')
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = GEO', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
t4 = df_alpha_all['alphas']
t5 = df_alpha_all['alphas']
t6 = df_alpha_all['alphas']
t7 = df_alpha_all['alphas']
t8 = df_alpha_all['alphas']
t9 = df_alpha_all['alphas']
ft1 = df_alpha_all['area_GEO']
ft2 = df_alpha_all['volume_GEO']
ft3 = df_alpha_all['area_volume_ratio_GEO']
ft4 = df_alpha_all['edge_ratio_GEO']
ft5 = df_alpha_all['radius_ratio_GEO']
ft6 = df_alpha_all['aspect_ratio_GEO']
ft7 = df_alpha_all['max_solid_angle_GEO']
ft8 = df_alpha_all['min_solid_angle_GEO']
ft9 = df_alpha_all['solid_angle_GEO']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + release + '_' + kind + '_pic_all_geo.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(output_dir + release + '_' + kind + '_pic_all_geo.csv', index=False)
###################
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = IBA', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
t4 = df_alpha_all['alphas']
t5 = df_alpha_all['alphas']
t6 = df_alpha_all['alphas']
t7 = df_alpha_all['alphas']
t8 = df_alpha_all['alphas']
t9 = df_alpha_all['alphas']
ft1 = df_alpha_all['area_IBA']
ft2 = df_alpha_all['volume_IBA']
ft3 = df_alpha_all['area_volume_ratio_IBA']
ft4 = df_alpha_all['edge_ratio_IBA']
ft5 = df_alpha_all['radius_ratio_IBA']
ft6 = df_alpha_all['aspect_ratio_IBA']
ft7 = df_alpha_all['max_solid_angle_IBA']
ft8 = df_alpha_all['min_solid_angle_IBA']
ft9 = df_alpha_all['solid_angle_IBA']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + release + '_' + kind + '_pic_all_iba.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(output_dir + release + '_' + kind + '_pic_all_iba.csv', index=False)
if kind == 'biclass':
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = AUC', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
t4 = df_alpha_all['alphas']
t5 = df_alpha_all['alphas']
t6 = df_alpha_all['alphas']
t7 = df_alpha_all['alphas']
t8 = df_alpha_all['alphas']
t9 = df_alpha_all['alphas']
ft1 = df_alpha_all['area_AUC']
ft2 = df_alpha_all['volume_AUC']
ft3 = df_alpha_all['area_volume_ratio_AUC']
ft4 = df_alpha_all['edge_ratio_AUC']
ft5 = df_alpha_all['radius_ratio_AUC']
ft6 = df_alpha_all['aspect_ratio_AUC']
ft7 = df_alpha_all['max_solid_angle_AUC']
ft8 = df_alpha_all['min_solid_angle_AUC']
ft9 = df_alpha_all['solid_angle_AUC']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + release + '_' + kind + '_pic_all_auc.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(output_dir + release + '_' + kind + '_pic_all_auc.csv', index=False)
def best_alpha(self, kind):
# Best alpha calculation
# GEO
df1 = pd.read_csv(output_dir + 'v1' + '_' + kind + '_pic_all_geo.csv')
df2 = pd.read_csv(output_dir + 'v2' + '_' + kind + '_pic_all_geo.csv')
df3 = pd.read_csv(output_dir + 'v3' + '_' + kind + '_pic_all_geo.csv')
if kind == 'biclass':
col = ['area_GEO', 'volume_GEO', 'area_volume_ratio_GEO',
'edge_ratio_GEO', 'radius_ratio_GEO', 'aspect_ratio_GEO',
'max_solid_angle_GEO', 'min_solid_angle_GEO', 'solid_angle_GEO',
'area_IBA', 'volume_IBA', 'area_volume_ratio_IBA', 'edge_ratio_IBA',
'radius_ratio_IBA', 'aspect_ratio_IBA', 'max_solid_angle_IBA',
'min_solid_angle_IBA', 'solid_angle_IBA', 'area_AUC', 'volume_AUC',
'area_volume_ratio_AUC', 'edge_ratio_AUC', 'radius_ratio_AUC',
'aspect_ratio_AUC', 'max_solid_angle_AUC', 'min_solid_angle_AUC',
'solid_angle_AUC']
else:
col = ['area_GEO', 'volume_GEO',
'area_volume_ratio_GEO', 'edge_ratio_GEO', 'radius_ratio_GEO',
'aspect_ratio_GEO', 'max_solid_angle_GEO', 'min_solid_angle_GEO',
'solid_angle_GEO', 'area_IBA', 'volume_IBA', 'area_volume_ratio_IBA',
'edge_ratio_IBA', 'radius_ratio_IBA', 'aspect_ratio_IBA',
'max_solid_angle_IBA', 'min_solid_angle_IBA', 'solid_angle_IBA']
df_mean = pd.DataFrame()
df_mean['alphas'] = df1.alphas
for c in col:
for i in np.arange(0, df1.shape[0]):
df_mean.loc[i, c] = (df1.loc[i, c] + df2.loc[i, c] + df3.loc[i, c]) / 3.0
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = GEO', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_mean['alphas']
t2 = df_mean['alphas']
t3 = df_mean['alphas']
t4 = df_mean['alphas']
t5 = df_mean['alphas']
t6 = df_mean['alphas']
t7 = df_mean['alphas']
t8 = df_mean['alphas']
t9 = df_mean['alphas']
ft1 = df_mean['area_GEO']
ft2 = df_mean['volume_GEO']
ft3 = df_mean['area_volume_ratio_GEO']
ft4 = df_mean['edge_ratio_GEO']
ft5 = df_mean['radius_ratio_GEO']
ft6 = df_mean['aspect_ratio_GEO']
ft7 = df_mean['max_solid_angle_GEO']
ft8 = df_mean['min_solid_angle_GEO']
ft9 = df_mean['solid_angle_GEO']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_geo.png', dpi=800)
plt.show()
plt.close()
df_mean.to_csv(output_dir + kind + '_pic_average_geo.csv', index=False)
###################
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = IBA', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_mean['alphas']
t2 = df_mean['alphas']
t3 = df_mean['alphas']
t4 = df_mean['alphas']
t5 = df_mean['alphas']
t6 = df_mean['alphas']
t7 = df_mean['alphas']
t8 = df_mean['alphas']
t9 = df_mean['alphas']
ft1 = df_mean['area_IBA']
ft2 = df_mean['volume_IBA']
ft3 = df_mean['area_volume_ratio_IBA']
ft4 = df_mean['edge_ratio_IBA']
ft5 = df_mean['radius_ratio_IBA']
ft6 = df_mean['aspect_ratio_IBA']
ft7 = df_mean['max_solid_angle_IBA']
ft8 = df_mean['min_solid_angle_IBA']
ft9 = df_mean['solid_angle_IBA']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_iba.png', dpi=800)
plt.show()
plt.close()
df_mean.to_csv(output_dir + kind + '_pic_average_iba.csv', index=False)
if kind == 'biclass':
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = AUC', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_mean['alphas']
t2 = df_mean['alphas']
t3 = df_mean['alphas']
t4 = df_mean['alphas']
t5 = df_mean['alphas']
t6 = df_mean['alphas']
t7 = df_mean['alphas']
t8 = df_mean['alphas']
t9 = df_mean['alphas']
ft1 = df_mean['area_AUC']
ft2 = df_mean['volume_AUC']
ft3 = df_mean['area_volume_ratio_AUC']
ft4 = df_mean['edge_ratio_AUC']
ft5 = df_mean['radius_ratio_AUC']
ft6 = df_mean['aspect_ratio_AUC']
ft7 = df_mean['max_solid_angle_AUC']
ft8 = df_mean['min_solid_angle_AUC']
ft9 = df_mean['solid_angle_AUC']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_auc.png', dpi=800)
plt.show()
plt.close()
df_mean.to_csv(output_dir + kind + '_pic_average_auc.csv', index=False)
def run_rank_choose_parameters(self, filename, kind, release):
df_best_dto = pd.read_csv(filename)
df_B1 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline1'].copy()
df_B2 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline2'].copy()
df_GEO = df_best_dto[df_best_dto['PREPROC'] == '_Geometric_SMOTE'].copy()
df_SMOTE = df_best_dto[df_best_dto['PREPROC'] == '_SMOTE'].copy()
df_SMOTEsvm = df_best_dto[df_best_dto['PREPROC'] == '_smoteSVM'].copy()
df_original = df_best_dto[df_best_dto['PREPROC'] == '_train'].copy()
for o in order:
for a in alphas:
GEOMETRY = '_delaunay_' + o + '_' + str(a)
df_dto = df_best_dto[df_best_dto['PREPROC'] == GEOMETRY].copy()
df = pd.concat([df_B1, df_B2, df_GEO, df_SMOTE, df_SMOTEsvm, df_original, df_dto])
self.rank_by_algorithm(df, kind, o, str(a), release)
self.rank_dto_by(o + '_' + str(a), kind, release)
def run_global_rank(self, filename, kind, release):
df_best_dto = pd.read_csv(filename)
df_B1 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline1'].copy()
df_B2 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline2'].copy()
df_GEO = df_best_dto[df_best_dto['PREPROC'] == '_Geometric_SMOTE'].copy()
df_SMOTE = df_best_dto[df_best_dto['PREPROC'] == '_SMOTE'].copy()
df_SMOTEsvm = df_best_dto[df_best_dto['PREPROC'] == '_smoteSVM'].copy()
df_original = df_best_dto[df_best_dto['PREPROC'] == '_train'].copy()
o = 'solid_angle'
if kind == 'biclass':
a = 7.0
else:
a = 7.5
GEOMETRY = '_delaunay_' + o + '_' + str(a)
df_dto = df_best_dto[df_best_dto['PREPROC'] == GEOMETRY].copy()
df = | pd.concat([df_B1, df_B2, df_GEO, df_SMOTE, df_SMOTEsvm, df_original, df_dto]) | pandas.concat |
import unittest
import numpy as np
import pandas as pd
from haychecker.chc.metrics import constraint
class TestConstraint(unittest.TestCase):
def test_empty(self):
df = pd.DataFrame()
df["c1"] = []
df["c2"] = []
condition1 = {"column": "c1", "operator": "lt", "value": 1000}
condition2 = {"column": "c1", "operator": "gt", "value": 0}
conditions = [condition1, condition2]
r = constraint([0], [1], conditions, df)[0]
self.assertEqual(r, 100.)
def test_allnull(self):
df = pd.DataFrame()
df["c1"] = [None for _ in range(100)]
df["c2"] = [np.NaN for _ in range(100)]
df["c3"] = [None for _ in range(100)]
r = constraint([0, 1], [2], df=df)[0]
self.assertEqual(r, 100.0)
def test_allnull_with_conditions(self):
df = | pd.DataFrame() | pandas.DataFrame |
import os
from multiprocessing import Pool, cpu_count
from itertools import repeat
import pandas as pd
from solvers.solvers import SOLVER_MAP
from problem_classes.random_qp import RandomQPExample
from problem_classes.eq_qp import EqQPExample
from problem_classes.portfolio import PortfolioExample
from problem_classes.lasso import LassoExample
from problem_classes.svm import SVMExample
from problem_classes.huber import HuberExample
from problem_classes.control import ControlExample
from utils.general import make_sure_path_exists
examples = [RandomQPExample,
EqQPExample,
PortfolioExample,
LassoExample,
SVMExample,
HuberExample,
ControlExample]
EXAMPLES_MAP = {example.name(): example for example in examples}
class Example(object):
'''
Examples runner
'''
def __init__(self, name,
dims,
solvers,
settings,
n_instances=10):
self.name = name
self.dims = dims
self.n_instances = n_instances
self.solvers = solvers
self.settings = settings
def solve(self, parallel=True):
'''
Solve problems of type example
The results are stored as
./results/benchmark_problems/{solver}/{class}/n{dimension}.csv
using a pandas table with fields
- 'class': example class
- 'solver': solver name
- 'status': solver status
- 'run_time': execution time
- 'iter': number of iterations
- 'obj_val': objective value
- 'n': leading dimension
- 'N': nnz dimension (nnz(P) + nnz(A))
'''
print("Solving %s" % self.name)
print("-----------------")
if parallel:
pool = Pool(processes=min(self.n_instances, cpu_count()))
# Iterate over all solvers
for solver in self.solvers:
settings = self.settings[solver]
# Initialize solver results
results_solver = []
# Solution directory
path = os.path.join('.', 'results', 'benchmark_problems',
solver,
self.name
)
# Create directory for the results
make_sure_path_exists(path)
# Get solver file name
solver_file_name = os.path.join(path, 'full.csv')
for n in self.dims:
# Check if solution already exists
n_file_name = os.path.join(path, 'n%i.csv' % n)
if not os.path.isfile(n_file_name):
if parallel:
instances_list = list(range(self.n_instances))
n_results = pool.starmap(self.solve_single_example,
zip(repeat(n),
instances_list,
repeat(solver),
repeat(settings)))
else:
n_results = []
for instance in range(self.n_instances):
n_results.append(
self.solve_single_example(n,
instance,
solver,
settings)
)
# Combine n_results
df = | pd.concat(n_results) | pandas.concat |
# -*- coding: utf-8 -*-
from datetime import timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.lib import Timestamp
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 19:00'),
Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 19:00'),
pd.NaT, Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
with tm.assertRaises(TypeError):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returing NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[D]')
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_tz(self):
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
if method is not None:
self.assertEqual(idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')),
1)
self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('1999', method='nearest'), 0)
self.assertEqual(idx.get_loc('2001', method='nearest'), 2)
with tm.assertRaises(KeyError):
idx.get_loc('1999', method='pad')
with tm.assertRaises(KeyError):
idx.get_loc('2001', method='backfill')
with tm.assertRaises(KeyError):
idx.get_loc('foobar')
with tm.assertRaises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2))
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12], dtype=np.int64))
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([], dtype=np.int64))
with tm.assertRaises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2]))
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1]))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2]))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1]))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1]))
with tm.assertRaises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = self.round_trip_pickle(index)
self.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
def test_time_loc(self): # GH8667
from datetime import time
from pandas.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
self.assertEqual(len(idx1), periods)
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
self.assertEqual(len(idx2), periods)
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
self.assertTrue(tm.equalContents(result, second))
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
self.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
def test_nat(self):
self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '3D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
def test_fillna_datetime64(self):
# GH 11343
for tz in ['US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'])
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'])
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# tz mismatch
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00', tz=tz),
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x',
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
idx = pd.DatetimeIndex(
['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'], tz=tz)
exp = pd.DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], tz=tz)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
'x',
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makePeriodIndex(10))
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_astype(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
# Hack because of lack of support for Period null checking (GH12759)
tm.assert_index_equal(result[:1], expected[:1])
result_arr = np.asarray([p.ordinal for p in result], dtype=np.int64)
expected_arr = np.asarray([p.ordinal for p in expected],
dtype=np.int64)
tm.assert_numpy_array_equal(result_arr, expected_arr)
# TODO: When GH12759 is resolved, change the above hack to:
# tm.assert_index_equal(result, expected) # now, it raises.
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_index_equal(result, Index(idx.asi8))
self.assert_numpy_array_equal(result.values, idx.values)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[ns]')
def test_shift(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_get_loc(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(
idx.get_loc(idx[1].asfreq('H', how='start'), method), 1)
self.assertEqual(idx.get_loc(idx[1].to_timestamp(), method), 1)
self.assertEqual(
idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
idx = pd.period_range('2000-01-01', periods=5)[::2]
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.int_))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.int_))
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with self.assertRaisesRegexp(ValueError, msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.int_))
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
self.assert_index_equal(res, exp)
self.assertEqual(res.freqstr, 'D')
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
self.assert_frame_equal(df, df.ix[idx])
self.assert_frame_equal(df, df.ix[list(idx)])
self.assert_frame_equal(df, df.loc[list(idx)])
self.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
self.assert_frame_equal(df, df.loc[list(idx)])
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(
['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H')
self.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
with tm.assertRaisesRegexp(
ValueError,
'Input has different freq=D from PeriodIndex\\(freq=H\\)'):
idx.fillna(pd.Period('2011-01-01', freq='D'))
def test_no_millisecond_field(self):
with self.assertRaises(AttributeError):
DatetimeIndex.millisecond
with self.assertRaises(AttributeError):
DatetimeIndex([]).millisecond
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
self.assert_index_equal(result, expected)
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [pd.NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[ns]')
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')), 1)
self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc('1 day 1 hour', method), loc)
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.int_))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.int_))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.int_))
def test_numeric_compat(self):
idx = self._holder(np.arange(5, dtype='int64'))
didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result,
self._holder(np.arange(5, dtype='int64') * 5))
result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result, self._holder(np.arange(
5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
# invalid
self.assertRaises(TypeError, lambda: idx * idx)
self.assertRaises(ValueError, lambda: idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))
def test_pickle_compat_construction(self):
pass
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4H')
for result in [idx / 2, np.divide(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'H')
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2H')
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_fillna_timedelta(self):
# GH 11343
idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
exp = | pd.TimedeltaIndex(['1 day', '2 day', '3 day']) | pandas.TimedeltaIndex |
# pylint: disable=redefined-outer-name
import itertools
import time
import pytest
import math
import flask
import pandas as pd
import numpy as np
import json
import psutil # noqa # pylint: disable=unused-import
from bentoml.utils.dataframe_util import _csv_split, _guess_orient
from bentoml.adapters import DataframeInput
from bentoml.adapters.dataframe_input import (
check_dataframe_column_contains,
read_dataframes_from_json_n_csv,
)
from bentoml.exceptions import BadInput
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def test_dataframe_request_schema():
input_adapter = DataframeInput(
input_dtypes={"col1": "int", "col2": "float", "col3": "string"}
)
schema = input_adapter.request_schema["application/json"]["schema"]
assert "object" == schema["type"]
assert 3 == len(schema["properties"])
assert "array" == schema["properties"]["col1"]["type"]
assert "integer" == schema["properties"]["col1"]["items"]["type"]
assert "number" == schema["properties"]["col2"]["items"]["type"]
assert "string" == schema["properties"]["col3"]["items"]["type"]
def test_dataframe_handle_cli(capsys, tmpdir):
def test_func(df):
return df["name"][0]
input_adapter = DataframeInput()
json_file = tmpdir.join("test.json")
with open(str(json_file), "w") as f:
f.write('[{"name": "john","game": "mario","city": "sf"}]')
test_args = ["--input={}".format(json_file)]
input_adapter.handle_cli(test_args, test_func)
out, _ = capsys.readouterr()
assert out.strip().endswith("john")
def test_dataframe_handle_aws_lambda_event():
test_content = '[{"name": "john","game": "mario","city": "sf"}]'
def test_func(df):
return df["name"][0]
input_adapter = DataframeInput()
event = {
"headers": {"Content-Type": "application/json"},
"body": test_content,
}
response = input_adapter.handle_aws_lambda_event(event, test_func)
assert response["statusCode"] == 200
assert response["body"] == '"john"'
input_adapter = DataframeInput()
event_without_content_type_header = {
"headers": {},
"body": test_content,
}
response = input_adapter.handle_aws_lambda_event(
event_without_content_type_header, test_func
)
assert response["statusCode"] == 200
assert response["body"] == '"john"'
with pytest.raises(BadInput):
event_with_bad_input = {
"headers": {},
"body": "bad_input_content",
}
input_adapter.handle_aws_lambda_event(event_with_bad_input, test_func)
def test_check_dataframe_column_contains():
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=["a", "b", "c"]
)
# this should pass
check_dataframe_column_contains({"a": "int", "b": "int", "c": "int"}, df)
check_dataframe_column_contains({"a": "int"}, df)
check_dataframe_column_contains({"a": "int", "c": "int"}, df)
# this should raise exception
with pytest.raises(BadInput) as e:
check_dataframe_column_contains({"required_column_x": "int"}, df)
assert "Missing columns: required_column_x" in str(e.value)
with pytest.raises(BadInput) as e:
check_dataframe_column_contains(
{"a": "int", "b": "int", "d": "int", "e": "int"}, df
)
assert "Missing columns:" in str(e.value)
assert "required_column:" in str(e.value)
def test_dataframe_handle_request_csv():
def test_function(df):
return df["name"][0]
input_adapter = DataframeInput()
csv_data = 'name,game,city\njohn,mario,sf'
request = MagicMock(spec=flask.Request)
request.headers = (('orient', 'records'),)
request.content_type = 'text/csv'
request.get_data.return_value = csv_data
result = input_adapter.handle_request(request, test_function)
assert result.get_data().decode('utf-8') == '"john"'
def assert_df_equal(left: pd.DataFrame, right: pd.DataFrame):
'''
Compare two instances of pandas.DataFrame ignoring index and columns
'''
try:
left_array = left.values
right_array = right.values
if right_array.dtype == np.float:
np.testing.assert_array_almost_equal(left_array, right_array)
else:
np.testing.assert_array_equal(left_array, right_array)
except AssertionError:
raise AssertionError(
f"\n{left.to_string()}\n is not equal to \n{right.to_string()}\n"
)
DF_CASES = (
pd.DataFrame(np.random.rand(1, 3)),
pd.DataFrame(np.random.rand(2, 3)),
pd.DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C']),
pd.DataFrame(["str1", "str2", "str3"]), # single dim sting array
pd.DataFrame([np.nan]), # special values
pd.DataFrame([math.nan]), # special values
pd.DataFrame([" ", 'a"b', "a,b", "a\nb"]), # special values
pd.DataFrame({"test": [" ", 'a"b', "a,b", "a\nb"]}), # special values
# pd.Series(np.random.rand(2)), # TODO: Series support
# pd.DataFrame([""]), # TODO: -> NaN
)
@pytest.fixture(params=DF_CASES)
def df(request):
return request.param
@pytest.fixture(params=pytest.DF_ORIENTS)
def orient(request):
return request.param
def test_batch_read_dataframes_from_mixed_json_n_csv(df):
test_datas = []
test_types = []
# test content_type=application/json with various orients
for orient in pytest.DF_ORIENTS:
try:
assert_df_equal(df, pd.read_json(df.to_json(orient=orient)))
except (AssertionError, ValueError):
# skip cases not supported by official pandas
continue
test_datas.extend([df.to_json(orient=orient).encode()] * 3)
test_types.extend(['application/json'] * 3)
df_merged, slices = read_dataframes_from_json_n_csv(
test_datas, test_types, orient=None
) # auto detect orient
test_datas.extend([df.to_csv(index=False).encode()] * 3)
test_types.extend(['text/csv'] * 3)
df_merged, slices = read_dataframes_from_json_n_csv(test_datas, test_types)
for s in slices:
assert_df_equal(df_merged[s], df)
def test_batch_read_dataframes_from_csv_other_CRLF(df):
csv_str = df.to_csv(index=False)
if '\r\n' in csv_str:
csv_str = '\n'.join(_csv_split(csv_str, '\r\n')).encode()
else:
csv_str = '\r\n'.join(_csv_split(csv_str, '\n')).encode()
df_merged, _ = read_dataframes_from_json_n_csv([csv_str], ['text/csv'])
assert_df_equal(df_merged, df)
def test_batch_read_dataframes_from_json_of_orients(df, orient):
test_datas = [df.to_json(orient=orient).encode()] * 3
test_types = ['application/json'] * 3
df_merged, slices = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
df_merged, slices = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
for s in slices:
assert_df_equal(df_merged[s], df)
def test_batch_read_dataframes_from_json_with_wrong_orients(df, orient):
test_datas = [df.to_json(orient='table').encode()] * 3
test_types = ['application/json'] * 3
with pytest.raises(BadInput):
read_dataframes_from_json_n_csv(test_datas, test_types, orient)
def test_batch_read_dataframes_from_json_in_mixed_order():
# different column order when orient=records
df_json = b'[{"A": 1, "B": 2, "C": 3}, {"C": 6, "A": 2, "B": 4}]'
df_merged, slices = read_dataframes_from_json_n_csv([df_json], ['application/json'])
for s in slices:
assert_df_equal(df_merged[s], pd.read_json(df_json))
# different row/column order when orient=columns
df_json1 = b'{"A": {"1": 1, "2": 2}, "B": {"1": 2, "2": 4}, "C": {"1": 3, "2": 6}}'
df_json2 = b'{"B": {"1": 2, "2": 4}, "A": {"1": 1, "2": 2}, "C": {"1": 3, "2": 6}}'
df_json3 = b'{"A": {"1": 1, "2": 2}, "B": {"2": 4, "1": 2}, "C": {"1": 3, "2": 6}}'
df_merged, slices = read_dataframes_from_json_n_csv(
[df_json1, df_json2, df_json3], ['application/json'] * 3
)
for s in slices:
assert_df_equal(
df_merged[s][["A", "B", "C"]], pd.read_json(df_json1)[["A", "B", "C"]]
)
def test_guess_orient(df, orient):
json_str = df.to_json(orient=orient)
guessed_orient = _guess_orient(json.loads(json_str))
assert orient == guessed_orient or orient in guessed_orient
@pytest.mark.skipif('not psutil.POSIX')
def test_benchmark_load_dataframes():
'''
read_dataframes_from_json_n_csv should be 30x faster than pd.read_json + pd.concat
'''
test_count = 50
dfs = [pd.DataFrame(np.random.rand(10, 100)) for _ in range(test_count)]
inputs = [df.to_json().encode() for df in dfs]
time_st = time.time()
dfs = [pd.read_json(i) for i in inputs]
result1 = | pd.concat(dfs) | pandas.concat |
import pandas as pd
import numpy as np
from os.path import join
import sys
sys.path.append('../utils')
import preproc_utils
class CsvLoaderMain:
def __init__(self, data_path):
self.data_path = data_path
def LoadCsv2HDF5(self, tbl_name, write_path = './'):
if tbl_name in ['monvals', 'comprvals', 'dervals']:
fields = ['Value', 'VariableID', 'PatientID', 'Datetime', 'Status',
'Entertime' if tbl_name=='monvals' else 'EnterTime']
dtype = {'Value': np.float64, 'VariableID': np.int64, 'PatientID': np.int64,
'Datetime': np.str_, 'Status': np.int64, 'EnterTime': np.str_}
elif tbl_name == 'generaldata':
fields = ['PatientID', 'birthYear', 'Sex', 'AdmissionTime', 'Status', 'PatGroup']
dtype = {'PatientID': np.int64, 'birthYear': np.int64, 'Sex': np.str_,
'AdmissionTime': np.str_, 'Status': np.int64, 'PatGroup': np.int64}
elif tbl_name == 'labres':
fields = ['ResultID', 'Value', 'VariableID', 'PatientID', 'SampleTime', 'Status', 'EnterTime']
dtype = {'Value': np.float64, 'ResultID': np.int64, 'VariableID': np.int64,
'PatientID': np.int64, 'SampleTime': np.str_, 'Status': np.int64, 'EnterTime': np.str_}
elif tbl_name == 'observrec':
fields = ['Value', 'VariableID', 'PatientID', 'DateTime', 'Status', 'EnterTime']
dtype = {'Value': np.float64, 'VariableID': np.int64, 'PatientID': np.int64,
'DateTime': np.str_, 'Status': np.int64, 'EnterTime': np.str_}
elif tbl_name == 'pharmarec':
fields = ['CumulDose', 'GivenDose', 'Rate', 'PharmaID', 'InfusionID', 'Route',
'Status', 'PatientID', 'DateTime', 'EnterTime']
dtype = {'CumulDose': np.float64, 'GivenDose': np.float64, 'Rate': np.float64,
'PharmaID': np.int64, 'InfusionID': np.int64, 'Route': np.int64,
'Status': np.int64, 'PatientID': np.int64, 'DateTime': np.str_,
'EnterTime': np.str_}
else:
raise Exception('Wrong table name.')
filepath_csv = join(self.data_path, 'expot-%s.csv'%tbl_name)
filepath_hdf5 = join(write_path, '%s.h5'%tbl_name)
pID_set = set(preproc_utils.get_consent_patient_ids().tolist())
if tbl_name in ['monvals', 'comprvals', 'dervals']:
chunksize = 10 ** 7
iter_csv = pd.read_csv(filepath_csv, encoding='cp1252', na_values='(null)',
sep=';', low_memory=True, usecols=fields, dtype=dtype,
chunksize=chunksize)
vID_set = []
pID_consent_set = []
for i, chunk in enumerate(iter_csv):
print(i)
chunk.Datetime = | pd.to_datetime(chunk.Datetime) | pandas.to_datetime |
"""
Nothing but variate many functions
"""
# from config import *
import matplotlib.pyplot as plt
from pathlib import Path
import numpy.ma as ma
import math
import pandas as pd
import skgstat as skg
import numpy as np
import glob
import geopandas
import os
from skimage.graph import route_through_array
from sklearn.model_selection import StratifiedKFold
import itertools
import collections
from sklearn.metrics import confusion_matrix, classification_report, plot_confusion_matrix, accuracy_score, make_scorer
from numpy import savetxt
from datetime import datetime
from matplotlib.ticker import MaxNLocator
import joblib
from sklearn.preprocessing import StandardScaler, LabelEncoder
def create_path_array(raster_array, geo_transform, start_coord, stop_coord):
# transform coordinates to array index
start_index_x, start_index_y = coords2offset(geo_transform, start_coord[0], start_coord[1])
stop_index_x, stop_index_y = coords2offset(geo_transform, stop_coord[0], stop_coord[1])
# replace np.nan with max raised by an order of magnitude to exclude pixels from least cost
raster_array[np.isnan(raster_array)] = np.nanmax(raster_array) * 10
# create path and costs
index_path, cost = route_through_array(raster_array, (start_index_y, start_index_x),
(stop_index_y, stop_index_x),
geometric=False, fully_connected=False)
index_path = np.array(index_path).T
path_array = np.zeros_like(raster_array)
path_array[index_path[0], index_path[1]] = 1
return path_array
def raster2line(raster_file_name, out_shp_fn, pixel_value):
"""
Convert a raster to a line shapefile, where pixel_value determines line start and end points
:param raster_file_name: STR of input raster file name, including directory; must end on ".tif"
:param out_shp_fn: STR of target shapefile name, including directory; must end on ".shp"
:param pixel_value: INT/FLOAT of a pixel value
:return: None (writes new shapefile).
"""
# calculate max. distance between points
# ensures correct neighbourhoods for start and end pts of lines
raster, array, geo_transform = gt.raster2array(raster_file_name)
pixel_width = geo_transform[1]
# max_distance = np.ceil(np.sqrt(2 * pixel_width**2))
# _______ Ricardos change
max_distance = pixel_width
sum = np.sum(array)
# _______
# extract pixels with the user-defined pixel value from the raster array
trajectory = np.where(array == pixel_value)
if np.count_nonzero(trajectory) is 0:
print("ERROR: The defined pixel_value (%s) does not occur in the raster band." % str(pixel_value))
return None
# convert pixel offset to coordinates and append to nested list of points
points = []
count = 0
for offset_y in trajectory[0]:
offset_x = trajectory[1][count]
points.append(gt.offset2coords(geo_transform, offset_x, offset_y))
count += 1
# create multiline (write points dictionary to line geometry (wkbMultiLineString)
multi_line = ogr.Geometry(ogr.wkbMultiLineString)
for i in gt.itertools.combinations(points, 2):
point1 = ogr.Geometry(ogr.wkbPoint)
point1.AddPoint(i[0][0], i[0][1])
point2 = ogr.Geometry(ogr.wkbPoint)
point2.AddPoint(i[1][0], i[1][1])
distance = point1.Distance(point2)
if distance <= max_distance:
line = ogr.Geometry(ogr.wkbLineString)
line.AddPoint(i[0][0], i[0][1])
line.AddPoint(i[1][0], i[1][1])
multi_line.AddGeometry(line)
# write multiline (wkbMultiLineString2shp) to shapefile
new_shp = gt.create_shp(out_shp_fn, layer_name="raster_pts", layer_type="line")
lyr = new_shp.GetLayer()
feature_def = lyr.GetLayerDefn()
new_line_feat = ogr.Feature(feature_def)
new_line_feat.SetGeometry(multi_line)
lyr.CreateFeature(new_line_feat)
# create projection file
srs = gt.get_srs(raster)
gt.make_prj(out_shp_fn, int(srs.GetAuthorityCode(None)))
print("Success: Wrote %s" % str(out_shp_fn))
def compute_intensity(r, g, b):
return 1 / 3 * (r + g + b)
def plot_intensity(array):
# reshape array to work in the histogram
array = array.reshape(array.size, 1)
fig = plt.figure(figsize=(6.18, 3.82), dpi=150, facecolor="w", edgecolor="gray")
axes = fig.add_subplot(1, 1, 1)
axes.hist(array, 20)
# text
plt.ylim([0, 25])
plt.xlim([160, 255])
plt.text(180, 21, "LI mean = " + str(truncate(np.nanmean(array), 2)))
plt.text(180, 19, "LI std = " + str(truncate(np.nanstd(array), 2)))
plt.xlabel('LI [--]',size=12)
plt.ylabel('number of pixels')
plt.grid(axis="y")
# text
# plt.xlabel('intensity value')
# plt.title('Histogram of Intensity $\mu=$' + str(truncate(np.nanmean(array), 2)) + '$,\ \sigma=$' + str(
# truncate(np.nanstd(array), 2)))
# plt.text(100, 300,
# '$\mu=$' + str(truncate(np.nanmean(array), 2)) + '$,\ \sigma=$' + str(truncate(np.nanstd(array), 2)))
# </editor-fold>
# show graph
plt.show()
# save figure
# plt.savefig(str(Path("geodata_example/plots/{} from {}")).format(type_, title))
def plot_distribution(array, label, riverbank, predictor):
# reshape array to work in the histogram
array = array.reshape(array.size, 1)
fig = plt.figure(figsize=(6.18, 3.82), dpi=150, facecolor="w", edgecolor="gray")
axes = fig.add_subplot(1, 1, 1)
axes.hist(array, 20)
# text
plt.xlabel('predictor')
plt.ylabel('number of pixels')
plt.title('Riverbank ' + str(riverbank) + ' / label ' + str(label) + " / predictor " + str(predictor))
# plt.text(100, 300,
# '$\mu=$' + str(truncate(np.nanmean(array), 2)) + '$,\ \sigma=$' + str(truncate(np.nanstd(array), 2)))
# </editor-fold>
# show graph
plt.show()
# save figure
# plt.savefig(str(Path("geodata_example/plots/{} from {}")).format(type_, title))
def plot_intensity_std(array, radius):
# reshape array to work in the histogram
array = array.reshape(array.size, 1)
fig = plt.figure(figsize=(6.18, 3.82), dpi=150, facecolor="w", edgecolor="gray")
axes = fig.add_subplot(1, 1, 1)
axes.hist(array, 20)
# text
# plt.ylim([0, 30])
# plt.xlim([9, 13])
plt.text(11.5, 25.2, "LISD mean = " + str(truncate(np.nanmean(array), 2)))
plt.text(11.5, 23, "LISD std = " + str(truncate(np.nanstd(array), 2)))
plt.xlabel('LISD [--]')
plt.ylabel('number of pixels')
plt.grid(axis="y")
# plt.title(
# 'Histogram of Intensity_std $\mu=$' + str(truncate(np.nanmean(array), 2)) + '$,\ \sigma of \sigma = $' + str(
# truncate(np.nanstd(array), 2)))
# plt.text(100, 300,
# '$\mu=$' + str(truncate(np.nanmean(array), 2)) + '$,\ \sigma=$' + str(truncate(np.nanstd(array), 2)))
# </editor-fold>
# show graph
# plt.savefig(str(Path("std_distributions/vari_fine_lm.png")))
plt.show()
# save figure
# plt.savefig(str(Path("geodata_example/plots/{} from {}")).format(type_, title))
def truncate(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
s = '{}'.format(f)
if 'e' in s or 'E' in s:
return '{0:.{1}f}'.format(f, n)
i, p, d = s.partition('.')
return '.'.join([i, (d + '0' * n)[:n]])
def shift_rgb(red, green, blue, shift):
red += shift
green += shift
blue += shift
return red, green, blue
def set_rgb_boundery(rgb_array):
rgb_array = np.where(rgb_array > 255, 255, rgb_array)
rgb_array = np.where(rgb_array < 0, 0, rgb_array)
return rgb_array
def std_constructor_improv(radius_m, cellsize, array, nodatavalue):
""" Captures the neighbours and their memberships
:param radius_m: float, radius in meters to consider
:param array: numpy array
:return: np.array (float) membership of the neighbours (without mask), np.array (float) neighbours' cells (without mask)
"""
# Calcultes the number of cells correspoding to the given radius in meters
radius = int(np.around(radius_m / cellsize, 0))
array = ma.masked_where(array == nodatavalue, array, copy=True)
# Creates number array with same shape as the input array with filling values as the given nodatavalue
std_array = np.full(np.shape(array), np.nan, dtype=np.float)
# Loops through the numpy array
for index, central in np.ndenumerate(array):
if not array.mask[index]:
# Determines the cells that are within the window given by the radius (in cells)
x_up = max(index[0] - radius, 0)
x_lower = min(index[0] + radius + 1, array.shape[0])
y_up = max(index[1] - radius, 0)
y_lower = min(index[1] + radius + 1, array.shape[1])
# neighborhood array (window)
neigh_array = array[x_up: x_lower, y_up: y_lower]
# Distance (in cells) of all neighbours to the cell in x,y in analysis
i, j = np.indices(neigh_array.shape)
i = i.flatten() - (index[0] - x_up)
j = j.flatten() - (index[1] - y_up)
d = np.reshape((i ** 2 + j ** 2) ** 0.5, neigh_array.shape)
# Unraveling of arrays as the order doesnt matter
d = np.ravel(d)
neigh_array = np.ravel(neigh_array)
neigh_array_filtered = neigh_array[d <= radius]
# Test to check if results are correct
# np.savetxt('neigharrayignore.csv', neigh_array_filtered, delimiter=',')
std_array[index] = np.nanstd(neigh_array_filtered)
# std_array = ma.masked_equal(std_array, np.nan)
# std_return = ma.masked_where(array == nodatavalue, std_array, copy=True)
return std_array
def variogram_constructor(radius_m, cellsize, array, nodatavalue, raster):
""" Captures the neighbours and their memberships
:param raster: instantiated object raster
:param nodatavalue: 9999
:param cellsize: pixel size
:param radius_m: float, radius in meters to consider
:param array: numpy array
:return: np.array (float) membership of the neighbours (without mask), np.array (float) neighbours' cells (without mask)
"""
# Calculate the number of cells correspoding to the given radius in meters
radius = int(np.around(radius_m / cellsize, 0))
array = ma.masked_where(array == nodatavalue, array, copy=True)
# get array coordinates
intensity_df = raster.coord_dataframe(array.reshape(array.size, 1))
# turn the frame in a three dimensional array
position_x = intensity_df["x"].to_numpy().reshape(array.shape[0], array.shape[1])
position_y = intensity_df["y"].to_numpy().reshape(array.shape[0], array.shape[1])
# Creates number array with same shape as the input array with filling values as the given nodatavalue
v_array = np.full(np.shape(array), np.nan, dtype=np.float)
# Loops through the numpy array
for index, central in np.ndenumerate(array):
if not array.mask[index]:
# Determines the cells that are within the window given by the radius (in cells)
x_up = max(index[0] - radius, 0)
x_lower = min(index[0] + radius + 1, array.shape[0])
y_up = max(index[1] - radius, 0)
y_lower = min(index[1] + radius + 1, array.shape[1])
# neighborhood array (window)
neigh_array = array[x_up: x_lower, y_up: y_lower]
neigh_array_x = position_x[x_up: x_lower, y_up: y_lower]
neigh_array_y = position_y[x_up: x_lower, y_up: y_lower]
# Distance (in cells) of all neighbours to the cell in x,y in analysis
i, j = np.indices(neigh_array.shape)
i = i.flatten() - (index[0] - x_up)
j = j.flatten() - (index[1] - y_up)
d = np.reshape((i ** 2 + j ** 2) ** 0.5, neigh_array.shape)
# Unraveling of arrays as the order doesnt matter
d = np.ravel(d)
neigh_array = np.ravel(neigh_array)
neigh_array_x = np.ravel(neigh_array_x)
neigh_array_y = np.ravel(neigh_array_y)
neigh_array = neigh_array[d <= radius]
neigh_array_x = neigh_array_x[d <= radius]
neigh_array_y = neigh_array_y[d <= radius]
# create dataframe of local variogram
v_df = pd.DataFrame({"x": neigh_array_x, "y": neigh_array_y, "int": neigh_array})
v_df = v_df.dropna(how='any', axis=0)
# Instantiate variogram
v_obj = skg.Variogram(np.array(v_df[["x", "y"]]),
np.array(v_df[["int"]]).reshape(len(np.array(v_df[["int"]])))
, fit_method='trf', model='exponential') # reshape the array from (R,1) to (R,)
# return lag values
v_lag_value = v_obj.data(n=2)[1][1] # return the last
# plot semivariogram
# v_obj.plot()
# fill v_array fit variogram values for the defines lag(radius)
v_array[index] = v_lag_value
del v_obj
# plot_intensity_std(v_array, radius_m)
return v_array
def tri_constructor(radius_m, cellsize, array, nodatavalue):
"""
:param cellsize: pixel width of the raster object
:param radius_m: radius around pixel to be taken into account o compute TRI
:param nodatavalue: value to be masked when printing raster
:param raster: instatiated object which represents the raster
:return: array with the TRI value for each pixel
"""
# Calculate the number of cells corresponding to the given radius in meters
radius = int(np.around(radius_m / cellsize, 0))
array = ma.masked_where(array == nodatavalue, array, copy=True)
# Creates number array with same shape as the input array with filling values as the given nodatavalue
tri_array = np.full(np.shape(array), np.nan, dtype=np.float)
# Loops through the numpy array
for index, central in np.ndenumerate(array):
if not array.mask[index]:
# Determines the cells that are within the window given by the radius (in cells)
x_up = max(index[0] - radius, 0)
x_lower = min(index[0] + radius + 1, array.shape[0])
y_up = max(index[1] - radius, 0)
y_lower = min(index[1] + radius + 1, array.shape[1])
# neighborhood array (window)
neigh_array = array[x_up: x_lower, y_up: y_lower]
# Distance (in cells) of all neighbours to the cell in x,y in analysis
i, j = np.indices(neigh_array.shape)
i = i.flatten() - (index[0] - x_up)
j = j.flatten() - (index[1] - y_up)
d = np.reshape((i ** 2 + j ** 2) ** 0.5, neigh_array.shape)
# Unraveling of arrays as the order doesnt matter
d = np.ravel(d)
neigh_array = np.ravel(neigh_array)
neigh_array = neigh_array[d <= radius]
# Compute TRI for one pixel
tri_pixel = np.nanstd(neigh_array)
# fill tri_array fit variogram values for the defines lag(radius)
tri_array[index] = tri_pixel
return tri_array
def find_files(directory=None):
"""It finds all the .tif or .shp files inside a folder and
create list of strings with their raw names
:param directory: string of directory's address
:return: list of strings from addresses of all files inside the directory
"""
# Set up variables
is_raster = False
is_shape = False
# raster_folder, shape_folder = verify_folders(directory)
# terminate the code if there is no directory address
if directory is None:
print("Any directory was given")
sys.exit()
# Append / or / in director name if it does not have
if not str(directory).endswith("/") and not str(directory).endswith("\\"):
directory = Path(str(directory) + "/")
# Find out if there is shape or raster file inside the folder
try:
for file_name in os.listdir(directory):
if file_name.endswith('.tif'):
is_raster = True
break
if file_name.endswith('.shp'):
is_shape = True
break
except:
print("Input directory {} was not found".format(directory))
# Create a list of shape files or raster files names
if is_shape:
file_list = glob.glob(str(directory) + "/*.shp")
elif is_raster:
file_list = glob.glob(str(directory) + "/*.tif")
else:
print("There is no valid file inside the folder {}".format(directory))
exit()
return file_list
def find_probes_info(file_list):
numbers_list = []
predictors_names = []
probes_names = []
for i, file_raster in enumerate(file_list):
numbers_list.append(int(file_raster.split(str(Path("/")))[-1].split("_")[1]))
predictors_names.append(file_raster.split(str(Path("/")))[-1].split("_")[2].split(".")[0])
probes_names.append(file_raster.split(str(Path("/")))[-1].split("_")[0] + "_" +
file_raster.split(str(Path("/")))[-1].split("_")[1])
# # remove repeted string in the list
# seen = set()
# result = []
# for item in probes_names:
# if item not in seen:
# seen.add(item)
# result.append(item)
# arrange return values
# probes_names = result
probes_list = [numbers_list[0]]
for i, p in enumerate(numbers_list): probes_list.append(p) if (i > 1 and p != numbers_list[i - 1]) else None
probes_number = len(probes_list)
predictors_number = int(len(numbers_list) / probes_number)
predictors_names = predictors_names[0:predictors_number]
return probes_number, predictors_number, predictors_names, probes_names
def sample_band(band_array, raster, radius):
"""
:param band_array: array to be reduced
:param raster: object raster to get pixel size
:param radius: real radius in meters to be sampled
:return:
"""
# compute matrix distance
pixel_width = raster.transform[1]
matrix_radius = int(radius / pixel_width)
index = (int(band_array.shape[0] / 2), int(band_array.shape[1] / 2))
# find index of the m
# Determines the cells that are within the window given by the radius (in cells)
x_up = max(index[0] - matrix_radius, 0)
x_lower = min(index[0] + matrix_radius + 1, band_array.shape[0])
y_up = max(index[1] - matrix_radius, 0)
y_lower = min(index[1] + matrix_radius + 1, band_array.shape[1])
# neighborhood array (window)
band_array = band_array[x_up: x_lower, y_up: y_lower]
return band_array
def find_colm_classes_in(filename=""):
file_list_raster = find_files(filename)
df_shapes = pd.DataFrame()
for shape in file_list_raster:
df_shape_temp = geopandas.read_file(shape)
if "Innere_Kol" in df_shape_temp.columns:
df_shape_temp.rename(columns={'Innere_Kol': 'Innere_K_1'}, inplace=True) # correct QGIS columns names
# create a colunm with the name of the bank an ID
bank_name = shape.split("\\")[-1].split("_")[0]
df_shape_temp["name"] = bank_name + "_" + df_shape_temp["ID"].astype(str)
df_shape_temp.drop(df_shape_temp.columns.difference(['name', 'Innere_K_1']), 1, inplace=True)
df_shapes = df_shapes.append(df_shape_temp, ignore_index=True)
return df_shapes
def find_colm_classes_out(filename=""):
file_list_raster = find_files(filename)
df_shapes = pd.DataFrame()
for shape in file_list_raster:
df_shape_temp = geopandas.read_file(shape)
if "Stufe_AK" in df_shape_temp.columns:
df_shape_temp.rename(columns={'Stufe_AK': 'AK'}, inplace=True) # correct QGIS columns names
if "STUFE_AK" in df_shape_temp.columns:
df_shape_temp.rename(columns={'STUFE_AK': 'AK'}, inplace=True) # correct QGIS columns names
# create a colunm with the name of the bank an ID
bank_name = shape.split("\\")[-1].split("_")[0]
df_shape_temp["name"] = bank_name + "_" + df_shape_temp["i"].astype(str)
df_shape_temp.drop(df_shape_temp.columns.difference(['name', 'AK']), 1, inplace=True)
df_shapes = df_shapes.append(df_shape_temp, ignore_index=True)
# Get names of indexes
indexes = df_shapes.index[df_shapes['AK'] == "Flimz"].tolist()
indexes.append(df_shapes.index[df_shapes['AK'] == "Bewuchs"].tolist()[0])
# Delete these row indexes from dataFrame
df_shapes.drop(indexes, inplace=True)
return df_shapes
def exclude_bank(df=None, kiesbank=None, status=None):
if df is None or kiesbank is None or status is None:
print("Give the name of the bank correctly")
quit()
if kiesbank == "kb05" and status is "in":
df = df[(df["name"] == "kb05_10") | (df["name"] == "kb05_11") |
(df["name"] == "kb05_12") | (df["name"] == "kb05_13") |
(df["name"] == "kb05_14") | (df["name"] == "kb05_15") |
(df["name"] == "kb05_16") | (df["name"] == "kb05_17") |
(df["name"] == "kb05_18") | (df["name"] == "kb05_19") |
(df["name"] == "kb05_1") | (df["name"] == "kb05_20") |
(df["name"] == "kb05_4") | (df["name"] == "kb05_5") |
(df["name"] == "kb05_6") | (df["name"] == "kb05_7") |
(df["name"] == "kb05_8") | (df["name"] == "kb05_9")
]
if kiesbank == "kb05" and status is "out":
df = df[(df["name"] != "kb05_10") & (df["name"] != "kb05_11") &
(df["name"] != "kb05_12") & (df["name"] != "kb05_13") &
(df["name"] != "kb05_14") & (df["name"] != "kb05_15") &
(df["name"] != "kb05_16") & (df["name"] != "kb05_17") &
(df["name"] != "kb05_18") & (df["name"] != "kb05_19") &
(df["name"] != "kb05_1") & (df["name"] != "kb05_20") &
(df["name"] != "kb05_4") & (df["name"] != "kb05_5") &
(df["name"] != "kb05_6") & (df["name"] != "kb05_7") &
(df["name"] != "kb05_3") & (df["name"] != "kb05_21") &
(df["name"] != "kb05_8") & (df["name"] != "kb05_9")
]
# kb07
if kiesbank is "kb07" and status is "out":
df = df[~df["name"].str.contains("kb07")]
if kiesbank is "kb07" and status is "in":
df = df[df["name"].str.contains("kb07")]
# kb08
if kiesbank is "kb08" and status is "out":
df = df[~df["name"].str.contains("kb08")]
if kiesbank is "kb08" and status is "in":
df = df[df["name"].str.contains("kb08")]
# kb13
if kiesbank is "kb13" and status is "out":
df = df[~df["name"].str.contains("kb13")]
if kiesbank is "kb13" and status is "in":
df = df[df["name"].str.contains("kb13")]
# kb19
if kiesbank is "kb19" and status is "out":
df = df[~df["name"].str.contains("kb19")]
if kiesbank is "kb19" and status is "in":
df = df[df["name"].str.contains("kb19")]
return df
def split_test_samples(df=None, split=None, list_drop=None):
# take randomly a percentage of the samples
df_save = df
num_samples = len(df["name"].unique())
num_samples_test = int(num_samples * split)
samples = df["name"].unique().to_numpy().tolist()
samples_test = []
for i in range(1, num_samples_test + 1):
samples_length = len(samples)
rand_num = np.random.randint(0, samples_length)
randsample = samples[rand_num]
df = df[df["name"] != randsample]
samples.remove(randsample)
samples_test.append(randsample)
# create test set
X = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2019 <NAME> GmbH
# Copyright 2020-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for ROS 2 data model."""
import numpy as np
import pandas as pd
from . import DataModel
from . import DataModelIntermediateStorage
class Ros2DataModel(DataModel):
"""
Container to model pre-processed ROS 2 data for analysis.
This aims to represent the data in a ROS 2-aware way.
"""
def __init__(self) -> None:
"""Create a Ros2DataModel."""
super().__init__()
# Objects (one-time events, usually when something is created)
self._contexts: DataModelIntermediateStorage = []
self._nodes: DataModelIntermediateStorage = []
self._rmw_publishers: DataModelIntermediateStorage = []
self._rcl_publishers: DataModelIntermediateStorage = []
self._rmw_subscriptions: DataModelIntermediateStorage = []
self._rcl_subscriptions: DataModelIntermediateStorage = []
self._subscription_objects: DataModelIntermediateStorage = []
self._services: DataModelIntermediateStorage = []
self._clients: DataModelIntermediateStorage = []
self._timers: DataModelIntermediateStorage = []
self._timer_node_links: DataModelIntermediateStorage = []
self._callback_objects: DataModelIntermediateStorage = []
self._callback_symbols: DataModelIntermediateStorage = []
self._lifecycle_state_machines: DataModelIntermediateStorage = []
# Events (multiple instances, may not have a meaningful index)
self._rclcpp_publish_instances: DataModelIntermediateStorage = []
self._rcl_publish_instances: DataModelIntermediateStorage = []
self._rmw_publish_instances: DataModelIntermediateStorage = []
self._rmw_take_instances: DataModelIntermediateStorage = []
self._rcl_take_instances: DataModelIntermediateStorage = []
self._rclcpp_take_instances: DataModelIntermediateStorage = []
self._callback_instances: DataModelIntermediateStorage = []
self._lifecycle_transitions: DataModelIntermediateStorage = []
def add_context(
self, context_handle, timestamp, pid, version
) -> None:
self._contexts.append({
'context_handle': context_handle,
'timestamp': timestamp,
'pid': pid,
'version': version,
})
def add_node(
self, node_handle, timestamp, tid, rmw_handle, name, namespace
) -> None:
self._nodes.append({
'node_handle': node_handle,
'timestamp': timestamp,
'tid': tid,
'rmw_handle': rmw_handle,
'name': name,
'namespace': namespace,
})
def add_rmw_publisher(
self, handle, timestamp, gid,
) -> None:
self._rmw_publishers.append({
'publisher_handle': handle,
'timestamp': timestamp,
'gid': gid,
})
def add_rcl_publisher(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
self._rcl_publishers.append({
'publisher_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
})
def add_rclcpp_publish_instance(
self, timestamp, message,
) -> None:
self._rclcpp_publish_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rcl_publish_instance(
self, publisher_handle, timestamp, message,
) -> None:
self._rcl_publish_instances.append({
'publisher_handle': publisher_handle,
'timestamp': timestamp,
'message': message,
})
def add_rmw_publish_instance(
self, timestamp, message,
) -> None:
self._rmw_publish_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rmw_subscription(
self, handle, timestamp, gid
) -> None:
self._rmw_subscriptions.append({
'subscription_handle': handle,
'timestamp': timestamp,
'gid': gid,
})
def add_rcl_subscription(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
self._rcl_subscriptions.append({
'subscription_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
})
def add_rclcpp_subscription(
self, subscription_pointer, timestamp, subscription_handle
) -> None:
self._subscription_objects.append({
'subscription': subscription_pointer,
'timestamp': timestamp,
'subscription_handle': subscription_handle,
})
def add_service(
self, handle, timestamp, node_handle, rmw_handle, service_name
) -> None:
self._services.append({
'service_handle': timestamp,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
})
def add_client(
self, handle, timestamp, node_handle, rmw_handle, service_name
) -> None:
self._clients.append({
'client_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
})
def add_timer(
self, handle, timestamp, period, tid
) -> None:
self._timers.append({
'timer_handle': handle,
'timestamp': timestamp,
'period': period,
'tid': tid,
})
def add_timer_node_link(
self, handle, timestamp, node_handle
) -> None:
self._timer_node_links.append({
'timer_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
})
def add_callback_object(
self, reference, timestamp, callback_object
) -> None:
self._callback_objects.append({
'reference': reference,
'timestamp': timestamp,
'callback_object': callback_object,
})
def add_callback_symbol(
self, callback_object, timestamp, symbol
) -> None:
self._callback_symbols.append({
'callback_object': callback_object,
'timestamp': timestamp,
'symbol': symbol,
})
def add_callback_instance(
self, callback_object, timestamp, duration, intra_process
) -> None:
self._callback_instances.append({
'callback_object': callback_object,
'timestamp': np.datetime64(timestamp, 'ns'),
'duration': np.timedelta64(duration, 'ns'),
'intra_process': intra_process,
})
def add_rmw_take_instance(
self, subscription_handle, timestamp, message, source_timestamp, taken
) -> None:
self._rmw_take_instances.append({
'subscription_handle': subscription_handle,
'timestamp': timestamp,
'message': message,
'source_timestamp': source_timestamp,
'taken': taken,
})
def add_rcl_take_instance(
self, timestamp, message
) -> None:
self._rcl_take_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rclcpp_take_instance(
self, timestamp, message
) -> None:
self._rclcpp_take_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_lifecycle_state_machine(
self, handle, node_handle
) -> None:
self._lifecycle_state_machines.append({
'state_machine_handle': handle,
'node_handle': node_handle,
})
def add_lifecycle_state_transition(
self, state_machine_handle, start_label, goal_label, timestamp
) -> None:
self._lifecycle_transitions.append({
'state_machine_handle': state_machine_handle,
'start_label': start_label,
'goal_label': goal_label,
'timestamp': timestamp,
})
def _finalize(self) -> None:
# Some of the lists of dicts might be empty, and setting
# the index for an empty dataframe leads to an error
self.contexts = pd.DataFrame.from_dict(self._contexts)
if self._contexts:
self.contexts.set_index('context_handle', inplace=True, drop=True)
self.nodes = pd.DataFrame.from_dict(self._nodes)
if self._nodes:
self.nodes.set_index('node_handle', inplace=True, drop=True)
self.rmw_publishers = pd.DataFrame.from_dict(self._rmw_publishers)
if self._rmw_publishers:
self.rmw_publishers.set_index('publisher_handle', inplace=True, drop=True)
self.rcl_publishers = pd.DataFrame.from_dict(self._rcl_publishers)
if self._rcl_publishers:
self.rcl_publishers.set_index('publisher_handle', inplace=True, drop=True)
self.rmw_subscriptions = pd.DataFrame.from_dict(self._rmw_subscriptions)
if self._rmw_subscriptions:
self.rmw_subscriptions.set_index('subscription_handle', inplace=True, drop=True)
self.rcl_subscriptions = pd.DataFrame.from_dict(self._rcl_subscriptions)
if self._rcl_subscriptions:
self.rcl_subscriptions.set_index('subscription_handle', inplace=True, drop=True)
self.subscription_objects = pd.DataFrame.from_dict(self._subscription_objects)
if self._subscription_objects:
self.subscription_objects.set_index('subscription', inplace=True, drop=True)
self.services = pd.DataFrame.from_dict(self._services)
if self._services:
self.services.set_index('service_handle', inplace=True, drop=True)
self.clients = pd.DataFrame.from_dict(self._clients)
if self._clients:
self.clients.set_index('client_handle', inplace=True, drop=True)
self.timers = | pd.DataFrame.from_dict(self._timers) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import re,pandas as pd,numpy as np
from pandas import DataFrame
import os
pathDir=os.listdir(r'C:\Users\aklasim\Desktop\Py6.11 Pdf\t1')
pt=(r'C:\Users\aklasim\Desktop\Py6.11 Pdf\t1')
cols=['工单编号','上级工单编号','项目编号','工单描述','上级工单描述','施工单位','合同号','计划服务费','开工日期','完工日期','作业类型','通知单创建','通知单批准','计划','待审','下达','验收确认','完工确认','完工时间','打印者','打印日期','工序号','工作中心','控制码','工序内容','计划量','签证','物料编码','物料描述','单位计划量','出库量','签证']
l=[]
x=0
l1=[]
dfb = pd.DataFrame(columns=['工单编号', '上级工单编号', '项目编号', '工单描述', '上级工单描述', '施工单位', '合同号', '计划服务费','开工日期', '完工日期', '作业类型', '通知单创建', '通知单批准', '计划', '待审', '下达', '验收确认','完工确认', '完工时间', '打印者', '打印日期', '工序号', '工作中心', '控制码', '工序内容', '计划量',
'签证', '物料编码', '物料描述', '单位计划量', '出库量', '签证', '单位', '数量确认'])
for filename in pathDir:
x=x+1
df = pd.DataFrame(index=range(30), columns=cols)
def gg(rg,n):
e=[]
f = open(pt + '\\' + filename, encoding='gbk')
for line in f:
d=re.search(rg,line)
if d:
d=str(d.group())
e.append(d)
print(e)
df[n]=pd.Series(e)
f.close()
desc=gg('工单描述\s\S+','工单描述')#desc = re.findall('工单描述\s\S+', line)
n=gg('工单编号\s\d+','工单编号')
up_n=gg('上级工单编号\s\d+','上级工单编号') #sup_desc = re.findall('上级工单描述\s\d+', line)
pro_n=gg('项目编号\s\d+','项目编号') #pro_co=re.findall('项目编号\s\d+',line)
unit=gg('施工单位\s\S+','施工单位')#unit= re.findall('施工单位\s\S+', line)
contr_co=gg('合同号\s\d+','合同号') #contr_co = re.findall('合同号\s\d+', line)
cost=gg('计划服务费\s+\d+\,*\d*\.\d+','计划服务费')#cost = re.findall('计划服务费\s+\d+\,*\d*\.\d+', line)
#if len(cost)>0:
# money=cost[0].split()[1]
start_d=gg('开工日期\s\S+','开工日期')#start_d = re.findall('开工日期\s\S+', line)
over_d=gg('完工日期\s\S+','完工日期')#over_d = re.findall('完工日期\s\S+', line)
worktp = gg('作业类型\s\S+', '作业类型')#worktp = re.findall('作业类型\s\S+', line)
#ntc_crt = re.findall('通知单创建\s\S+', line)
#ntc_pmt = re.findall('通知单批准\s\S+', line)
#plan = re.findall('计划\s\S+', line)
#ass= re.findall('待审\s\S+', line)
#order= re.findall('下达\s\S+', line)
#acpt_ck = re.findall('验收确认\s\S+', line)
#fns_ck = re.findall('完工确认\s\S+', line)
#fns_tm = re.findall('完工时间\s\S+', line)
#printer = re.findall('打印者:\S+', line)
#prt_d = re.findall('打印日期:\d+-\d+-\d+', line)
ntc_crt = gg('通知单创建\s\S+', '通知单创建')
ntc_pmt = gg('通知单批准\s\S+', '通知单批准')
plan = gg('计划\s\S+', '计划')
ass= gg('待审\s\S+', '待审')
order= gg('下达\s\S+', '下达')
acpt_ck = gg('验收确认\s\S+', '验收确认')
fns_ck = gg('完工确认\s\S+', '完工确认')
fns_tm = gg('完工时间\s\S+', '完工时间')
printer = gg('打印者:\S+', '打印者')
prt_d = gg('打印日期:\d+-\d+-\d+', '打印日期')
wp_num = []
wk_ctr = []
ctr_code = []
wp_contts = []
cert = []
f = open(pt + '\\' + filename, encoding='gbk')
for line in f:
proc_set = re.findall('(^\d+)\s(\D+\d*)(\D+\d*)\s((\S*\d*\s*\.*)+)(\d+\.*\d*\D+)+\n', line)#426
if proc_set:# 工序号/工作中心/控制码/工序内容/签证
sets=list(proc_set[0])
wp_num.append(sets[0])
wk_ctr.append (sets[1])
ctr_code.append (sets[2])
wp_contts.append (sets[3])
cert.append (sets[5])
df['工序号']=pd.Series(wp_num)
df['工作中心']=pd.Series(wk_ctr)
df['控制码']=pd.Series(ctr_code)
df['工序内容']=pd.Series(wp_contts)
df['签证']=pd.Series(cert)
wp_num = []
mat_code = []
mat_descr = []
msr_unit = []
all_num = []
cert=[]
f.close()
f = open(pt + '\\' + filename, encoding='gbk')
for line in f:
mat_set = re.findall('(^\d+)\s(\d+)\s((\S*\s*)+)\s(\D)\s((\d\.*\d*\s*)+)\n', line) # 140
if mat_set: # 工序号/物料编码/物料描述/单位/数量确认/计划量/出库量/签证
sets = list(mat_set[0])
wp_num.append(sets[0])
mat_code.append(sets[1])
mat_descr.append(sets[2])
msr_unit.append(sets[4])
all_num.append(sets[5])
cert.append(sets[6])
df['工序号']=pd.Series(wp_num)
df['物料编码']=pd.Series(mat_code)
df['物料描述']=pd.Series(mat_descr)
df['单位']=pd.Series(msr_unit)
df['数量确认']=pd.Series(all_num)
df['签证']=pd.S | eries(cert) | pandas.Series |
from datetime import datetime
from sqlite3 import connect
from typing import Dict, NamedTuple, Optional, Mapping
import json
from black import line_to_string
import kfp.dsl as dsl
import kfp
from kfp.components import func_to_container_op, InputPath, OutputPath
import kfp.compiler as compiler
from kfp.dsl.types import Dict as KFPDict, List as KFPList
from kubernetes import client, config
import pprint
from numpy import testing
import pandas as pd
from pandas import DataFrame
from requests import head
def python_function_factory(
function_name: str,
packages: Optional[list] = [],
base_image_name: Optional[str] = "python:3.9-slim-buster",
annotations: Optional[Mapping[str, str]] = [],
):
return func_to_container_op(
func=function_name,
base_image=base_image_name,
packages_to_install=packages,
annotations=annotations,
)
def load_secret(
keyvault_url: str = "",
keyvault_credentials_b64: str = "",
connection_string_secret_name: str = "",
) -> str:
import os
import json
from azure.identity import DefaultAzureCredential
from azure.keyvault.secrets import SecretClient
if (
keyvault_url == ""
or keyvault_credentials_b64 == ""
or connection_string_secret_name == ""
):
return ""
def base64_decode_to_dict(b64string: str) -> dict:
import base64
decode_secret_b64_bytes = b64string.encode("utf-8")
decode_secret_raw_bytes = base64.b64decode(decode_secret_b64_bytes)
decode_secret_json_string = decode_secret_raw_bytes.decode("utf-8")
return json.loads(decode_secret_json_string)
secret_name_string = str(connection_string_secret_name)
keyvault_credentials_dict = base64_decode_to_dict(str(keyvault_credentials_b64))
os.environ["AZURE_CLIENT_ID"] = keyvault_credentials_dict["appId"]
os.environ["AZURE_CLIENT_SECRET"] = keyvault_credentials_dict["password"]
os.environ["AZURE_TENANT_ID"] = keyvault_credentials_dict["tenant"]
credential = DefaultAzureCredential()
secret_client = SecretClient(vault_url=keyvault_url, credential=credential)
retrieved_secret_b64 = secret_client.get_secret(secret_name_string)
return retrieved_secret_b64.value
def load_secret_dapr(connection_string_secret_name: str) -> str:
import os
import json
from dapr.clients import DaprClient
with DaprClient() as d:
key = "POSTGRES_CONNECTION_STRING_B64"
storeName = "kubernetes-secret-store"
print(f"Requesting secret from vault: POSTGRES_CONNECTION_STRING_B64")
resp = d.get_secret(store_name=storeName, key=key)
secret_value = resp.secret[key]
print(f"Secret retrieved from vault: {secret_value}", flush=True)
def print_metrics(
training_dataframe_string: str,
testing_dataframe_string: str,
mlpipeline_metrics_path: OutputPath("Metrics"),
output_path: str,
):
score = 1337
metrics = {
"metrics": [
{
"name": "rmsle", # The name of the metric. Visualized as the column name in the runs table.
"numberValue": score, # The value of the metric. Must be a numeric value.
"format": "RAW", # The optional format of the metric. Supported values are "RAW" (displayed in raw format) and "PERCENTAGE" (displayed in percentage format).
}
]
}
with open(mlpipeline_metrics_path, "w") as f:
json.dump(metrics, f)
def download_data(url: str, output_text_path: OutputPath(str)) -> None:
import requests
req = requests.get(url)
url_content = req.content
with open(output_text_path, "wb") as writer:
writer.write(url_content)
def get_dataframes_development(
training_csv: InputPath(str),
testing_csv: InputPath(str),
cache_buster: str = "",
) -> NamedTuple(
"DataframeOutputs",
[
("training_dataframe_string", str),
("testing_dataframe_string", str),
],
):
import pandas as pd
from pandas import DataFrame
from collections import namedtuple
training_dataframe = DataFrame
testing_dataframe = DataFrame
training_dataframe = pd.read_csv(training_csv)
testing_dataframe = pd.read_csv(testing_csv)
dataframe_outputs = namedtuple(
"DataframeOutputs",
["training_dataframe_string", "testing_dataframe_string"],
)
return dataframe_outputs(training_dataframe.to_json(), testing_dataframe.to_json())
def get_dataframes_live(
postgres_connection_string_b64: str,
percent_to_withhold_for_test: float,
cache_buster: str = "",
) -> NamedTuple(
"DataframeOutputs",
[
("training_dataframe_string", str),
("testing_dataframe_string", str),
],
):
import psycopg2
import base64
import json
from sqlalchemy import create_engine
import pandas as pd
from pprint import pp
print(f"Inbound PSQL: {postgres_connection_string_b64}")
decode_secret_b64_bytes = postgres_connection_string_b64.encode("ascii")
decode_secret_raw_bytes = base64.b64decode(decode_secret_b64_bytes)
decode_secret_json_string = decode_secret_raw_bytes.decode("ascii")
connection_string_dict = json.loads(decode_secret_json_string)
pp(f"Conn string dict: {connection_string_dict}")
engine = create_engine(
f'postgresql://{connection_string_dict["user"]}:{connection_string_dict["password"]}@{connection_string_dict["host"]}:{connection_string_dict["port"]}/{connection_string_dict["database"]}'
)
df = | pd.read_sql_query(f"select * from drug_classification_staging", con=engine) | pandas.read_sql_query |
"""Process the USCRN station table
ftp://ftp.ncdc.noaa.gov/pub/data/uscrn/products/stations.tsv
"""
import pandas as pd
from pyiem.util import get_dbconn
def main():
"""Go"""
pgconn = get_dbconn('mesosite', user='mesonet')
cursor = pgconn.cursor()
df = pd.read_csv('stations.tsv', sep=r'\t', engine='python')
df['stname'] = df['LOCATION'] + " " + df['VECTOR']
for _, row in df.iterrows():
station = row['WBAN']
if station == 'UN' or | pd.isnull(station) | pandas.isnull |
# -*- coding: utf-8 -*-
from datetime import datetime
import pandas as pd
import numpy as np
from findy.database.schema.fundamental.finance import BalanceSheet
from findy.database.plugins.eastmoney.common import to_report_period_type
from findy.database.plugins.eastmoney.finance.base_china_stock_finance_recorder import BaseChinaStockFinanceRecorder
from findy.utils.convert import to_float
balance_sheet_map = {
# 流动资产
#
# 货币资金
"Monetaryfund": "cash_and_cash_equivalents",
# 应收票据
"Billrec": "note_receivable",
# 应收账款
"Accountrec": "accounts_receivable",
# 预付款项
"Advancepay": "advances_to_suppliers",
# 其他应收款
"Otherrec": "other_receivables",
# 存货
"Inventory": "inventories",
# 一年内到期的非流动资产
"Nonlassetoneyear": "current_portion_of_non_current_assets",
# 其他流动资产
"Otherlasset": "other_current_assets",
# 流动资产合计
"Sumlasset": "total_current_assets",
# 非流动资产
#
# 可供出售金融资产
"Saleablefasset": "fi_assets_saleable",
# 长期应收款
"Ltrec": "long_term_receivables",
# 长期股权投资
"Ltequityinv": "long_term_equity_investment",
# 投资性房地产
"Estateinvest": "real_estate_investment",
# 固定资产
"Fixedasset": "fixed_assets",
# 在建工程
"Constructionprogress": "construction_in_process",
# 无形资产
"Intangibleasset": "intangible_assets",
# 商誉
"Goodwill": "goodwill",
# 长期待摊费用
"Ltdeferasset": "long_term_prepaid_expenses",
# 递延所得税资产
"Deferincometaxasset": "deferred_tax_assets",
# 其他非流动资产
"Othernonlasset": "other_non_current_assets",
# 非流动资产合计
"Sumnonlasset": "total_non_current_assets",
# 资产总计
"Sumasset": "total_assets",
# 流动负债
#
# 短期借款
"Stborrow": "short_term_borrowing",
# 吸收存款及同业存放
"Deposit": "accept_money_deposits",
# 应付账款
"Accountpay": "accounts_payable",
# 预收款项
"Advancereceive": "advances_from_customers",
# 应付职工薪酬
"Salarypay": "employee_benefits_payable",
# 应交税费
"Taxpay": "taxes_payable",
# 应付利息
"Interestpay": "interest_payable",
# 其他应付款
"Otherpay": "other_payable",
# 一年内到期的非流动负债
"Nonlliaboneyear": "current_portion_of_non_current_liabilities",
# 其他流动负债
"Otherlliab": "other_current_liabilities",
# 流动负债合计
"Sumlliab": "total_current_liabilities",
# 非流动负债
#
# 长期借款
"Ltborrow": "long_term_borrowing",
# 长期应付款
"Ltaccountpay": "long_term_payable",
# 递延收益
"Deferincome": "deferred_revenue",
# 递延所得税负债
"Deferincometaxliab": "deferred_tax_liabilities",
# 其他非流动负债
"Othernonlliab": "other_non_current_liabilities",
# 非流动负债合计
"Sumnonlliab": "total_non_current_liabilities",
# 负债合计
"Sumliab": "total_liabilities",
# 所有者权益(或股东权益)
#
# 实收资本(或股本)
"Sharecapital": "capital",
# 资本公积
"Capitalreserve": "capital_reserve",
# 专项储备
"Specialreserve": "special_reserve",
# 盈余公积
"Surplusreserve": "surplus_reserve",
# 未分配利润
"Retainedearning": "undistributed_profits",
# 归属于母公司股东权益合计
"Sumparentequity": "equity",
# 少数股东权益
"Minorityequity": "equity_as_minority_interest",
# 股东权益合计
"Sumshequity": "total_equity",
# 负债和股东权益合计
"Sumliabshequity": "total_liabilities_and_equity",
# 银行相关
# 资产
# 现金及存放中央银行款项
"Cashanddepositcbank": "fi_cash_and_deposit_in_central_bank",
# 存放同业款项
"Depositinfi": "fi_deposit_in_other_fi",
# 贵金属
"Preciousmetal": "fi_expensive_metals",
# 拆出资金
"Lendfund": "fi_lending_to_other_fi",
# 以公允价值计量且其变动计入当期损益的金融资产
"Fvaluefasset": "fi_financial_assets_effect_current_income",
# 衍生金融资产
"Derivefasset": "fi_financial_derivative_asset",
# 买入返售金融资产
"Buysellbackfasset": "fi_buying_sell_back_fi__asset",
# 应收账款
#
# 应收利息
"Interestrec": "fi_interest_receivable",
# 发放贷款及垫款
"Loanadvances": "fi_disbursing_loans_and_advances",
# 可供出售金融资产
#
# 持有至到期投资
"Heldmaturityinv": "fi_held_to_maturity_investment",
# 应收款项类投资
"Investrec": "fi_account_receivable_investment",
# 投资性房地产
#
# 固定资产
#
# 无形资产
#
# 商誉
#
# 递延所得税资产
#
# 其他资产
"Otherasset": "fi_other_asset",
# 资产总计
#
# 负债
#
# 向中央银行借款
"Borrowfromcbank": "fi_borrowings_from_central_bank",
# 同业和其他金融机构存放款项
"Fideposit": "fi_deposit_from_other_fi",
# 拆入资金
"Borrowfund": "fi_borrowings_from_fi",
# 以公允价值计量且其变动计入当期损益的金融负债
"Fvaluefliab": "fi_financial_liability_effect_current_income",
# 衍生金融负债
"Derivefliab": "fi_financial_derivative_liability",
# 卖出回购金融资产款
"Sellbuybackfasset": "fi_sell_buy_back_fi_asset",
# 吸收存款
"Acceptdeposit": "fi_savings_absorption",
# 存款证及应付票据
"Cdandbillrec": "fi_notes_payable",
# 应付职工薪酬
#
# 应交税费
#
# 应付利息
#
# 预计负债
"Anticipateliab": "fi_estimated_liabilities",
# 应付债券
"Bondpay": "fi_bond_payable",
# 其他负债
"Otherliab": "fi_other_liability",
# 负债合计
#
# 所有者权益(或股东权益)
# 股本
"Shequity": "fi_capital",
# 其他权益工具
"Otherequity": "fi_other_equity_instruments",
# 其中:优先股
"Preferredstock": "fi_preferred_stock",
# 资本公积
#
# 盈余公积
#
# 一般风险准备
"Generalriskprepare": "fi_generic_risk_reserve",
# 未分配利润
#
# 归属于母公司股东权益合计
#
# 股东权益合计
#
# 负债及股东权益总计
# 券商相关
# 资产
#
# 货币资金
#
# 其中: 客户资金存款
"Clientfund": "fi_client_fund",
# 结算备付金
"Settlementprovision": "fi_deposit_reservation_for_balance",
# 其中: 客户备付金
"Clientprovision": "fi_client_deposit_reservation_for_balance",
# 融出资金
"Marginoutfund": "fi_margin_out_fund",
# 以公允价值计量且其变动计入当期损益的金融资产
#
# 衍生金融资产
#
# 买入返售金融资产
#
# 应收利息
#
# 应收款项
"Receivables": "fi_receivables",
# 存出保证金
"Gdepositpay": "fi_deposit_for_recognizance",
# 可供出售金融资产
#
# 持有至到期投资
#
# 长期股权投资
#
# 固定资产
#
# 在建工程
#
# 无形资产
#
# 商誉
#
# 递延所得税资产
#
# 其他资产
#
# 资产总计
#
# 负债
#
# 短期借款
#
# 拆入资金
#
# 以公允价值计量且其变动计入当期损益的金融负债
#
# 衍生金融负债
#
# 卖出回购金融资产款
#
# 代理买卖证券款
"Agenttradesecurity": "fi_receiving_as_agent",
# 应付账款
#
# 应付职工薪酬
#
# 应交税费
#
# 应付利息
#
# 应付短期融资款
"Shortfinancing": "fi_short_financing_payable",
# 预计负债
#
# 应付债券
#
# 递延所得税负债
#
# 其他负债
#
# 负债合计
#
# 所有者权益(或股东权益)
#
# 股本
#
# 资本公积
#
# 其他权益工具
#
# 盈余公积
#
# 一般风险准备
#
# 交易风险准备
"Traderiskprepare": "fi_trade_risk_reserve",
# 未分配利润
#
# 归属于母公司股东权益合计
#
# 少数股东权益
#
# 股东权益合计
#
# 负债和股东权益总计
# 保险相关
# 应收保费
"Premiumrec": "fi_premiums_receivable",
"Rirec": "fi_reinsurance_premium_receivable",
# 应收分保合同准备金
"Ricontactreserverec": "fi_reinsurance_contract_reserve",
# 保户质押贷款
"Insuredpledgeloan": "fi_policy_pledge_loans",
# 定期存款
"Tdeposit": "fi_time_deposit",
# 可供出售金融资产
#
# 持有至到期投资
#
# 应收款项类投资
#
# 应收账款
#
# 长期股权投资
#
# 存出资本保证金
"Capitalgdepositpay": "fi_deposit_for_capital_recognizance",
# 投资性房地产
#
# 固定资产
#
# 无形资产
#
# 商誉
#
# 递延所得税资产
#
# 其他资产
#
# 独立账户资产
"Independentasset": "fi_capital_in_independent_accounts",
# 资产总计
#
# 负债
#
# 短期借款
#
# 同业及其他金融机构存放款项
#
# 拆入资金
#
# 以公允价值计量且其变动计入当期损益的金融负债
#
# 衍生金融负债
#
# 卖出回购金融资产款
#
# 吸收存款
#
# 代理买卖证券款
#
# 应付账款
#
# 预收账款
"Advancerec": "fi_advance_from_customers",
# 预收保费
"Premiumadvance": "fi_advance_premium",
# 应付手续费及佣金
"Commpay": "fi_fees_and_commissions_payable",
# 应付分保账款
"Ripay": "fi_dividend_payable_for_reinsurance",
# 应付职工薪酬
#
# 应交税费
#
# 应付利息
#
# 预计负债
#
# 应付赔付款
"Claimpay": "fi_claims_payable",
# 应付保单红利
"Policydivipay": "fi_policy_holder_dividend_payable",
# 保户储金及投资款
"Insureddepositinv": "fi_policy_holder_deposits_and_investment_funds",
# 保险合同准备金
"Contactreserve": "fi_contract_reserve",
# 长期借款
#
# 应付债券
#
# 递延所得税负债
#
# 其他负债
#
# 独立账户负债
"Independentliab": "fi_independent_liability",
# 负债合计
#
# 所有者权益(或股东权益)
#
# 股本
#
# 资本公积
#
# 盈余公积
#
# 一般风险准备
#
# 未分配利润
#
# 归属于母公司股东权益总计
#
# 少数股东权益
#
# 股东权益合计
#
# 负债和股东权益总计
}
class ChinaStockBalanceSheetRecorder(BaseChinaStockFinanceRecorder):
data_schema = BalanceSheet
url = 'https://emh5.eastmoney.com/api/CaiWuFenXi/GetZiChanFuZhaiBiaoList'
finance_report_type = 'ZiChanFuZhaiBiaoList'
data_type = 3
def format(self, entity, df):
cols = list(df.columns)
str_cols = ['Title']
date_cols = [self.get_original_time_field()]
float_cols = list(set(cols) - set(str_cols) - set(date_cols))
for column in float_cols:
df[column] = df[column].apply(lambda x: to_float(x[0]))
df.rename(columns=balance_sheet_map, inplace=True)
df.update(df.select_dtypes(include=[np.number]).fillna(0))
if 'timestamp' not in df.columns:
df['timestamp'] = pd.to_datetime(df[self.get_original_time_field()])
elif not isinstance(df['timestamp'].dtypes, datetime):
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['report_period'] = df['timestamp'].apply(lambda x: to_report_period_type(x))
df['report_date'] = | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(norows_df.dtypes, pd.Series(object, index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool_), ("c", np.float64)])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": | date_range("20130101", periods=3) | pandas.date_range |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.ticker import FuncFormatter
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
params = {
"axes.titlesize": 14,
"axes.labelsize": 14,
"font.size": 14,
"xtick.labelsize": 14,
"ytick.labelsize": 14,
"legend.fontsize": 14,
"savefig.bbox": "tight",
}
plt.rcParams.update(params)
class RATPMetroTweetsAnalyzer:
"""
Class for analyzing Paris RATP metro line incidents, using their
official Twitter accounts
To be able to download tweets you need to obtain your Twitter developer API keys (`consumer_key`, `consumer_secret`, `access_key` and `access_secret`). Be aware that it may be not possible to download all 14 lines on a row: there is some usage limitation of the Twitter API.
Args:
api (dict): Dictionary containing Twitter developer API keys: ``consumer_key``, ``consumer_secret``, ``access_key``, ``access_secret``
"""
def __init__(self, api=None):
self.df = None
self.df_processed = None
self._define_incidents()
if api is not None:
assert type(api) == dict
keys = ["consumer_key", "consumer_secret", "access_key", "access_secret"]
for key in keys:
assert key in api
assert type(api[key]) == str
self.api = api
def load(
self, line, number_of_tweets=3200, folder_tweets="tweets", force_download=False
):
"""
Download the tweets from the official RATP Twitter account.
Some code is adapted from https://github.com/gitlaura/get_tweets
Args:
line (int or str): RATP metro line number (1 to 14), or ``"A"``, ``"B"`` for RER lines
number_of_tweets (int): Number of tweets to download, must be smaller than 3200 due to some limitation of the Twitter API
folder_tweets (str): Folder to store the downloaded tweets as a ``.csv`` file
force_download (bool): If ``False``, it will directly load the already downloaded file without re-downloading it. You can force downloading by using ``force_download = True``
"""
import os
username = self._twitter_account(line)
outfile = os.path.join(folder_tweets, username + ".csv")
if not os.path.isfile(outfile) or force_download:
os.makedirs(os.path.dirname(outfile), exist_ok=True)
import csv
import tweepy
auth = tweepy.OAuthHandler(
self.api["consumer_key"], self.api["consumer_secret"]
)
auth.set_access_token(self.api["access_key"], self.api["access_secret"])
api = tweepy.API(auth, wait_on_rate_limit=True)
tweets_for_csv = [["time", "tweet"]]
print(f"Downloading tweets for {username}")
for tweet in tweepy.Cursor(api.user_timeline, screen_name=username).items(
number_of_tweets
):
tweets_for_csv.append([tweet.created_at, tweet.text])
with open(outfile, "w", newline="") as f:
writer = csv.writer(f, delimiter=",")
writer.writerows(tweets_for_csv)
self.df = | pd.read_csv(outfile) | pandas.read_csv |
# Imports
import pandas as pd
from edbo.utils import Data
# import pdb
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import matplotlib
from sklearn.cluster import KMeans
import numpy as np
from sklearn import metrics
from edbo.bro import BO_express
from gpytorch.priors import GammaPrior
import random
###
# Constants
###
MASTER_SEED = 213090120
# Colors for the different clusters
COLORS = ['#F72585', '#7209B7', '#3A0CA3', '#4361EE', '#4CC9F0']
# Colors for the different arrows indicating moving in the search space.
# Black is the explorer, red is the exploiter, yellow is the optimiser.
COLORS_2 = ['black', 'red', 'yellow']
# Temporary storage here.
FOLDER_PATH = "test_bo_suzuki/temp/"
# Number of TSNE plots to make. Each plot follows 10 experiments.
# Can have up to a maximum of 5 plots.
NUM_PLOTS = 1
#############################
#############################
##### REACTION ENCODING #####
#############################
#############################
print("Starting Reaction Encoding!")
# Load DFT descriptor CSV files computed with auto-qchem using pandas
# Instantiate a Data object
# Suzuki here
electrophiles = Data(pd.read_csv('data/suzuki/electrophile_dft.csv'))
nucleophiles = Data(pd.read_csv('data/suzuki/nucleophile_dft.csv'))
ligands = Data(pd.read_csv('data/suzuki/ligand-random_dft.csv'))
bases = Data(pd.read_csv('data/suzuki/base_dft.csv'))
solvents = Data(pd.read_csv('data/suzuki/solvent_dft.csv'))
reactants = [electrophiles, nucleophiles, ligands, bases, solvents]
print("Loaded csv files...")
# Use Data.drop method to drop descriptors containing some unwanted keywords
for data in reactants:
data.drop(['file_name', 'vibration', 'correlation', 'Rydberg',
'correction', 'atom_number', 'E-M_angle', 'MEAN', 'MAXG',
'STDEV'])
print("Dropped unnecessary data...")
# Parameters in reaction space
# Suzuki here
components = {
'electrophile': 'DFT',
'nucleophile': 'DFT',
'ligand': 'DFT',
'base': 'DFT',
'solvent': 'DFT'
}
# External descriptor matrices override specified encoding
dft = {
'electrophile':electrophiles.data,
'nucleophile':nucleophiles.data,
'ligand':ligands.data,
'base':bases.data,
'solvent':solvents.data
}
encoding = {}
############################
############################
#### Instantiating EDBO ####
############################
############################
def instantiate_bo(acquisition_func: str, batch_size: int, init_method='rand'):
bo = BO_express(
components,
encoding=encoding,
descriptor_matrices=dft,
acquisition_function=acquisition_func,
init_method=init_method,
batch_size=batch_size,
target='yield'
)
# BO_express actually automatically chooses priors
# We can reset them manually to make sure they match the ones from our paper
bo.lengthscale_prior = [GammaPrior(2.0, 0.2), 5.0]
bo.outputscale_prior = [GammaPrior(5.0, 0.5), 8.0]
bo.noise_prior = [GammaPrior(1.5, 0.5), 1.0]
return bo
# We instantiate a particular instance of the optimiser so that
# we can see how the optimiser encodes the search space,
# and then consequently encode the search space ourselves using TSNE.
bo = instantiate_bo('VarMax', 1)
#####
# Constructing kmeans clusters
####
data_embedded = TSNE(init='pca').fit_transform(bo.reaction.data)
N_CLUSTERS = 5
kmeans = KMeans(n_clusters=N_CLUSTERS).fit_predict(bo.reaction.data)
colors = [COLORS[thing] for thing in kmeans]
plots = [plt.subplots(1) for i in range(NUM_PLOTS)]
fig_clusters = [item[0] for item in plots]
axs_clusters = [item[1] for item in plots]
for cluster in axs_clusters:
cluster.scatter([item[0] for item in data_embedded], [item[1] for item in data_embedded], c=colors)
cluster.set_xlabel('t-SNE1')
cluster.set_ylabel('t-SNE2')
cluster.set_title('Paths taken in reaction space')
fig_3b, (axs_r2, axs_yield) = plt.subplots(nrows=2, ncols=1, sharex=True)
axs_yield.set_xlabel('Experiment')
axs_r2.set_ylabel('Model fit score')
axs_yield.set_ylabel('Observed yield')
####################################
####################################
#### Bayesian Optimization Loop ####
####################################
####################################
RESULT_PATH = 'data/suzuki/experiment_index.csv'
NUM_ROUNDS = 10
# Colormap for indicating color of points on path. Turned out to be unnecessary,
# went with arrows instead for indicating order of points. Kept for
# completeness.
path_cm = matplotlib.cm.get_cmap(name='Reds')
path_norm = matplotlib.colors.Normalize(vmin=0.0, vmax=NUM_ROUNDS)
with open(RESULT_PATH) as f:
FULL_RESULT_DICT = {",".join(line.split(",")[1:-1]): float(line.split(",")[-1][:-1]) for line in f.readlines()[1:]}
def fill_in_experiment_values(input_path):
# Reading in values
newfile = ""
with open(input_path) as f:
# In this case f is a csv file
first_line = True
for line in f:
original_line = line
if first_line:
newfile += line
first_line = False
continue
line = line.split(",")
search_string = ",".join(line[1:-1])
input_yield = FULL_RESULT_DICT[search_string]
line = ",".join(original_line.split(",")[:-1]) + "," + str(input_yield) + "\n"
newfile += line
with open(input_path, 'w') as f:
f.write(newfile)
return input_yield
# Refer to matplotlib linestyles for an explanation of these.
styles = [(0, (1, 4)), 'solid', (0, (5, 15))]
def workflow(export_path, count=0, indices=None, fig=0, plot=None):
if indices is None:
indices = []
bo.run()
new_experiment_index = bo.get_experiments().index[0]
indices.append(new_experiment_index)
if len(indices) > 1 and plot is not None:
axs_clusters[plot].scatter(
[data_embedded[new_experiment_index][0]],
[data_embedded[new_experiment_index][1]],
color=path_cm(path_norm(count)),
s=9 # The size
)
x, y = data_embedded[indices[count - 1]] # Previous point.
x_new, y_new = data_embedded[indices[count]]
dx, dy = x_new - x, y_new - y
arr = axs_clusters[plot].arrow(
x, y, dx, dy, # plot an arrow from previous to current point.
width=0.3,
length_includes_head=True,
head_width = 3,
head_length = 3,
linestyle=styles[fig],
color=COLORS_2[fig]
)
if len(indices) == 2:
# So it's the first one
arr.set_label(['Explorer', 'Exploiter', 'Edbo optimiser'][fig])
bo.export_proposed(export_path)
return indices
human_readable_domain_data = bo.reaction.base_data[bo.reaction.index_headers]
results_array = np.array([FULL_RESULT_DICT[",".join(human_readable_domain_data.iloc[i].tolist())] for i in range(len(human_readable_domain_data))])
# The point of the ",".join is that the .tolist() returns all the descriptors in order as a list
# And then we join them with commas to form the search key for the results dict
def simulate_bo(bo, fig_num):
indices = None
obs_yields = []
bo.init_sample(seed=MASTER_SEED) # Initialize
indices = [bo.get_experiments().index[0]]
bo.export_proposed(FOLDER_PATH + 'init.csv') # Export design to a CSV file
obs_yields.append(fill_in_experiment_values(FOLDER_PATH + 'init.csv'))
bo.add_results(FOLDER_PATH + 'init.csv')
r2_values = list()
for num in range(NUM_ROUNDS):
print("Starting round ", num)
try:
indices = workflow(
FOLDER_PATH + 'round' + str(num) + '.csv',
count=num,
indices=indices,
fig=fig_num,
plot=[None, 0][num < 10] # This is for a single plot
# For the first 10 rounds, it plots the arrows
# beyond that, it doesn't plot anything.
# for multiple plots (e.g. 5), just use
# plot = num // 10
)
except RuntimeError as e:
print(e)
print("No idea how to fix this, seems to occur randomly for different seeds...")
break
obs_yields.append(fill_in_experiment_values(FOLDER_PATH + 'round' + str(num) + '.csv'))
bo.add_results(FOLDER_PATH + "round" + str(num) + ".csv")
print("Finished round ", num)
pred = np.array(bo.obj.scaler.unstandardize(bo.model.predict(bo.obj.domain.values)))
print(f"Current R^2 value is {metrics.r2_score(results_array, pred)}")
r2_values.append(metrics.r2_score(results_array, pred))
# The very first score tends to be very negative, so instead
# we will ignore the first one
axs_r2.plot(list(range(NUM_ROUNDS))[1:], r2_values[1:], color=COLORS[fig_num])
axs_yield.plot(list(range(NUM_ROUNDS + 1)), obs_yields, color=COLORS[fig_num])
simulate_bo(bo, 0)
bo = instantiate_bo('MeanMax', 1)
print("Instantiated BO object...")
simulate_bo(bo, 1)
bo = instantiate_bo('EI', 2)
print("Instantiated BO object...")
simulate_bo(bo, 2)
axs_clusters[0].legend()
############
############
# The following code is for producing fig_3c.csv,
# which is needed to produce the diagrams for figure 3c.
NUM_ROUNDS = 10
NUM_AVG = 50
fig_max_yield, axs_max_yield = plt.subplots(1)
random.seed(MASTER_SEED)
# Ensure consistency across methods
seeds = random.sample(range(10 ** 6), NUM_AVG)
def simulate_bo_2(method):
"""
Simulate the optimiser given the acquisition function method.
The idea is to run the optimiser NUM_AVG times,
with a batch size of 5 and NUM_ROUNDS rounds each time.
Each time you run the optimiser, at the end,
we take the data in the round1.csv, round2.csv etc.,
and calculate at each round what the maximum observed yield was thus far.
This data is then put into a pandas dataframe,
where each entry corresponds to a run of the optimiser,
and each column corresponds to a rounds number,
to make a table with NUM_AVG rows and NUM_ROUNDS columns.
"""
full_yields = []
for seed in seeds:
bo = instantiate_bo(method, 5)
if method == 'greedy':
bo.eps = 0.1 # To match the value used in the paper
bo.init_sample(seed=seed) # Initialize
bo.export_proposed(FOLDER_PATH + 'init.csv') # Export design to a CSV file
fill_in_experiment_values(FOLDER_PATH + 'init.csv')
bo.add_results(FOLDER_PATH + 'init.csv')
for num in range(NUM_ROUNDS):
print("Starting round ", num)
bo.run()
bo.export_proposed(FOLDER_PATH + 'round' + str(num) + '.csv')
fill_in_experiment_values(FOLDER_PATH + 'round' + str(num) + '.csv')
bo.add_results(FOLDER_PATH + "round" + str(num) + ".csv")
print("Finished round ", num)
max_yields = []
results = pd.DataFrame(columns=bo.reaction.index_headers + ['yield'])
for index, path in enumerate([FOLDER_PATH + 'init'] + [FOLDER_PATH + 'round' + str(num) for num in range(NUM_ROUNDS)]):
results = pd.concat([results, pd.read_csv(path + '.csv', index_col=0)], sort=False)
results = results.sort_values('yield', ascending=False)
max_yields.append(results['yield'].tolist()[0])
# At each round, we determine the maximum observed yield thus far,
# recording this in a dataframe.
# This is appended to once for every seed in seeds, so a total
# of NUM_AVG times.
full_yields.append(max_yields)
return pd.DataFrame.from_records(full_yields)
methods = ['EI', 'TS', 'greedy', 'MeanMax', 'VarMax']
yield_df = pd.DataFrame(columns=['method'].extend(range(11)))
yield_df['method'] = methods
# yield_df will have len(methods) rows,
# and 11 columns.
# Column x corresponds to the average maximal observed yield observed at
# round x, for the method, averaged over all NUM_AVG optimiser runs.
# e.g. if NUM_AVG was 2, and for EI at round 5 the maximal yields observed
# were 30 and 40, then the corresponding yield_df entry would be 35.
yield_dict = {}
for index, method in enumerate(methods):
print("TRYING OUT METHOD ", method)
result = simulate_bo_2(method, index)
yield_dict[method] = result
for key, value in yield_dict.items():
value.insert(0, 'method', [key for num in range(NUM_AVG)], allow_duplicates=True)
full_yield_df = pd.DataFrame()
for value in yield_dict.values():
full_yield_df = | pd.concat([full_yield_df, value]) | pandas.concat |
import requests
import json
import os
from dotenv import load_dotenv
import pandas as pd
import pickle
import time
# Getting the api key from .env
load_dotenv()
API_KEY = os.getenv("RIOT_API_KEY")
# Getting the data from a json file to a string
while True:
try:
matchesID = pickle.load(open("matchesId.p", "rb"))
break
except:
matchData = requests.get('http://canisback.com/matchId/matchlist_na1.json')
matchesID = json.loads(matchData.text)
break
while True:
try:
matchesData = pickle.load(open("matchesData.p", "rb"))
break
except:
matchesData = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
( | Week(weekday=0) | pandas.core.datetools.Week |
# -*- coding: utf-8 -*-
import logging
from dotenv import find_dotenv, load_dotenv
import pickle
import os
import numpy as np
import pandas as pd
from goactiwe import GoActiwe
from goactiwe.steps import remove_drops
import dask.dataframe as dd
from fastparquet import write, ParquetFile
def fill_df_with_datetime_vars(df):
df['5min'] = df.index.hour * 12 + df.index.minute // 5
df['quarter_hour'] = df.index.hour * 4 + df.index.minute // 15
df['hour'] = df.index.hour
df['day'] = df.index.weekday
df['month'] = df.index.month
df['date'] = df.index.date
df['date'] = df['date'].apply(lambda x: x.strftime('%Y-%m-%d'))
return df
class DataLoader:
def __init__(self, logger=None):
self.log = logger.info or print
self._ga = GoActiwe()
self._location = self._ga.get_location()
self._activity = self._ga.get_activity()
self._steps = self._ga.get_steps()
self._screen = self._ga.get_screen()
self.modalities = ('cpm', 'steps', 'activity', 'screen', 'location_lat', 'location_lon')
@staticmethod
def save_scalers(user, scalers):
pickle.dump(scalers, open('scalers_{}.pkl'.format(user), 'w'))
@staticmethod
def load_scalers():
return pickle.load(open('scalers.pkl'))
@staticmethod
def load_cpm(user):
def load_smartphone_cpm(user):
cpm_root_dir = '/home/sdka/data/cpm/smartphone/20170222110955/'
df = dd.read_csv(os.path.join(cpm_root_dir, str(user), '*.csv'))
df = df.compute().set_index('timestamp')
df.index = pd.to_datetime(df.index)
return df.sort_index()
cpm = load_smartphone_cpm(user)
cpm = fill_df_with_datetime_vars(cpm)
cpm = cpm.pivot_table(index='date', columns='5min', values='cpm', aggfunc='sum')
return cpm
def load_location_lat(self, user):
location = self._location.copy()
location = location[location.user == user]
location = location[location.accuracy < 50]
location = fill_df_with_datetime_vars(location)
location_pt_lat = location.pivot_table(index='date', columns='5min', values='lat', aggfunc='median')
return location_pt_lat
def load_location_lon(self, user):
location = self._location.copy()
location = location[location.user == user]
location = location[location.accuracy < 50]
location = fill_df_with_datetime_vars(location)
location_pt_lon = location.pivot_table(index='date', columns='5min', values='lon', aggfunc='median')
return location_pt_lon
def load_activity(self, user):
activity = self._activity.copy()
activity = activity[activity.user == user]
activity = activity[activity.confidence > 70]
# Remove still, tilting and unknown
activity[activity == 3] = np.nan
activity[activity == 4] = np.nan
activity[activity == 5] = np.nan
activity[activity == 6] = np.nan
activity = activity.dropna()
# Reduce walking and running indices
activity = activity.replace({'activity': {7.: 3., 8.: 4.}})
# Add descriptions
# act_labels = ['IN_VEHICLE', 'ON_BICYCLE', 'ON_FOOT', 'STILL', 'UNKNOWN', 'TILTING', 'UNKNOWN2', 'WALKING',
# 'RUNNING']
# activity['activity_str'] = [act_labels[int(activity)] for activity in activity['activity']]
activity = fill_df_with_datetime_vars(activity)
activity = activity.pivot_table(index='date', columns='5min', values='activity', aggfunc='median')
return activity
def load_steps(self, user):
steps = self._steps.copy()
steps = steps[steps.user == user]
steps = remove_drops(steps.step_count).to_frame()
# steps = steps[steps.step_count < steps.step_count.mean() + 2 * steps.step_count.std()]
steps = fill_df_with_datetime_vars(steps)
steps = steps.pivot_table(index='date', columns='5min', values='step_count', aggfunc='sum')
steps = steps.clip(0, 2000)
return steps
def load_screen(self, user):
screen = self._screen.copy()
screen = screen[screen.user == user]
screen = screen.groupby( | pd.TimeGrouper('15min') | pandas.TimeGrouper |
import os
from nose.tools import *
import unittest
import pandas as pd
from py_entitymatching.utils.generic_helper import get_install_path
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.io.parsers import read_csv_metadata
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
catalog_datasets_path = os.sep.join([get_install_path(), 'tests',
'test_datasets', 'catalog'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class CatalogManagerTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
def test_get_property_valid_df_name_1(self):
# cm.del_catalog()
df = read_csv_metadata(path_a)
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
def test_get_property_valid_df_name_2(self):
# cm.del_catalog()
self.assertEqual(cm.get_catalog_len(), 0)
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_property(C, 'key'), '_id')
self.assertEqual(cm.get_property(C, 'fk_ltable'), 'ltable_ID')
self.assertEqual(cm.get_property(C, 'fk_rtable'), 'rtable_ID')
self.assertEqual(cm.get_property(C, 'ltable').equals(A), True)
self.assertEqual(cm.get_property(C, 'rtable').equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_property_invalid_df_1(self):
cm.get_property(10, 'key')
@raises(AssertionError)
def test_get_property_invalid_path_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
cm.get_property(A, None)
# cm.del_catalog()
@raises(KeyError)
def test_get_property_df_notin_catalog(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.get_property(A, 'key')
# cm.del_catalog()
def test_set_property_valid_df_name_value(self):
# cm.del_catalog()
df = pd.read_csv(path_a)
cm.set_property(df, 'key', 'ID')
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_invalid_df(self):
# cm.del_catalog()
cm.set_property(None, 'key', 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_valid_df_invalid_prop(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.set_property(A, None, 'ID')
# cm.del_catalog()
def test_init_properties_valid(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.init_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), True)
# cm.del_catalog()
@raises(AssertionError)
def test_init_properties_invalid_df(self):
cm.init_properties(None)
def test_get_all_properties_valid_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
m = cm.get_all_properties(A)
self.assertEqual(len(m), 1)
self.assertEqual(m['key'], 'ID')
# cm.del_catalog()
def test_get_all_properties_valid_2(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
m = cm.get_all_properties(C)
self.assertEqual(len(m), 5)
self.assertEqual(m['key'], '_id')
self.assertEqual(m['fk_ltable'], 'ltable_ID')
self.assertEqual(m['fk_rtable'], 'rtable_ID')
self.assertEqual(m['ltable'].equals(A), True)
self.assertEqual(m['rtable'].equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_all_properties_invalid_df_1(self):
# cm.del_catalog()
C = cm.get_all_properties(None)
@raises(KeyError)
def test_get_all_properties_invalid_df_2(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
C = cm.get_all_properties(A)
def test_del_property_valid_df_name(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key')
self.assertEqual(len(cm.get_all_properties(A)), 0)
@raises(AssertionError)
def test_del_property_invalid_df(self):
cm.del_property(None, 'key')
@raises(AssertionError)
def test_del_property_invalid_property(self):
A = read_csv_metadata(path_a)
cm.del_property(A, None)
@raises(KeyError)
def test_del_property_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_property(A, 'key')
@raises(KeyError)
def test_del_property_prop_notin_catalog(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key1')
def test_del_all_properties_valid_1(self):
A = read_csv_metadata(path_a)
cm.del_all_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), False)
def test_del_all_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.del_all_properties(C)
self.assertEqual(cm.is_dfinfo_present(C), False)
@raises(AssertionError)
def test_del_all_properties_invalid_df(self):
cm.del_all_properties(None)
@raises(KeyError)
def test_del_all_properties_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_all_properties(A)
def test_get_catalog_valid(self):
A = read_csv_metadata(path_a)
cg = cm.get_catalog()
self.assertEqual(len(cg), 1)
def test_del_catalog_valid(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
cg = cm.get_catalog()
self.assertEqual(len(cg), 0)
def test_is_catalog_empty(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
self.assertEqual(cm.is_catalog_empty(), True)
def test_is_dfinfo_present_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, True)
def test_is_dfinfo_present_valid_2(self):
A = pd.read_csv(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_dfinfo_present_invalid(self):
cm.is_dfinfo_present(None)
def test_is_property_present_for_df_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key')
self.assertEqual(status, True)
def test_is_property_present_for_df_valid_2(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key1')
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_property_present_for_df_invalid_df(self):
cm.is_property_present_for_df(None, 'key')
@raises(KeyError)
def test_is_property_present_for_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.is_property_present_for_df(A, 'key')
def test_catalog_len(self):
A = read_csv_metadata(path_a)
self.assertEqual(cm.get_catalog_len(), 1)
def test_set_properties_valid_1(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.init_properties(B)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
def test_set_properties_valid_2(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
@raises(AssertionError)
def test_set_properties_invalid_df_1(self):
cm.set_properties(None, {})
@raises(AssertionError)
def test_set_properties_invalid_dict_1(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, None)
def test_set_properties_df_notin_catalog_replace_false(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, {}, replace=False)
self.assertEqual(cm.get_key(A), 'ID')
# def test_has_property_valid_1(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key'), True)
#
# def test_has_property_valid_2(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key1'), False)
#
# @raises(AssertionError)
# def test_has_property_invalid_df(self):
# cm.has_property(None, 'key')
#
# @raises(AssertionError)
# def test_has_property_invalid_prop_name(self):
# A = read_csv_metadata(path_a)
# cm.has_property(A, None)
#
# @raises(KeyError)
# def test_has_property_df_notin_catalog(self):
# A = pd.read_csv(path_a)
# cm.has_property(A, 'key')
def test_copy_properties_valid_1(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
self.assertEqual(cm.is_dfinfo_present(A1), True)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
def test_copy_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
C1 = pd.read_csv(path_c)
cm.copy_properties(C, C1)
self.assertEqual(cm.is_dfinfo_present(C1), True)
p = cm.get_all_properties(C1)
p1 = cm.get_all_properties(C1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(C1), cm.get_key(C))
self.assertEqual(cm.get_ltable(C1).equals(A), True)
self.assertEqual(cm.get_rtable(C1).equals(B), True)
self.assertEqual(cm.get_fk_ltable(C1), cm.get_fk_ltable(C))
self.assertEqual(cm.get_fk_rtable(C1), cm.get_fk_rtable(C))
@raises(AssertionError)
def test_copy_properties_invalid_tar_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(A, None)
@raises(AssertionError)
def test_copy_properties_invalid_src_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(None, A)
def test_copy_properties_update_false_1(self):
A = read_csv_metadata(path_a)
A1 = read_csv_metadata(path_a)
status=cm.copy_properties(A, A1, replace=False)
self.assertEqual(status, False)
def test_copy_properties_update_false_2(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1, replace=False)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
@raises(KeyError)
def test_copy_properties_src_df_notin_catalog(self):
A = pd.read_csv(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
def test_get_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_get_key_invalid_df(self):
cm.get_key(None)
@raises(KeyError)
def test_get_key_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.get_key(A)
def test_set_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_set_key_invalid_df(self):
cm.set_key(None, 'ID')
@raises(KeyError)
def test_set_key_notin_df(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID1')
def test_set_key_with_dupids(self):
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_set_key_with_mvals(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_get_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_ltable(C), cm.get_property(C, 'fk_ltable'))
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_get_fk_ltable_invalid_df(self):
cm.get_fk_ltable(None)
def test_get_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_rtable(C), cm.get_property(C, 'fk_rtable'))
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_get_fk_rtable_invalid_df(self):
cm.get_fk_rtable(None)
def test_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_set_fk_ltable_invalid_df(self):
cm.set_fk_ltable(None, 'ltable_ID')
@raises(KeyError)
def test_set_fk_ltable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID1')
def test_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID')
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_set_fk_rtable_invalid_df(self):
cm.set_fk_rtable(None, 'rtable_ID')
@raises(KeyError)
def test_set_fk_rtable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID1')
def test_validate_and_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
def test_validate_and_set_fk_ltable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_ltable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_rtable(C), 'ltable_ID')
def test_validate_and_set_fk_rtable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
# def test_get_reqd_metadata_from_catalog_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, 'key')
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, ['key'])
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_3(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable', 'fk_rtable', 'ltable', 'rtable'])
# self.assertEqual(d['key'], cm.get_key(C))
# self.assertEqual(d['fk_ltable'], cm.get_fk_ltable(C))
# self.assertEqual(d['fk_rtable'], cm.get_fk_rtable(C))
# self.assertEqual(cm.get_ltable(C).equals(A), True)
# self.assertEqual(cm.get_rtable(C).equals(B), True)
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_1(self):
# cm.get_reqd_metadata_from_catalog(None, ['key'])
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_2(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable1', 'fk_rtable', 'ltable', 'rtable'])
#
#
# def test_update_reqd_metadata_with_kwargs_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key'])
# self.assertEqual(metadata['key'], d['key'])
#
# def test_update_reqd_metadata_with_kwargs_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, 'key')
# self.assertEqual(metadata['key'], d['key'])
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(None, d, 'key')
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(d, None, 'key')
#
# @raises(AssertionError)
# def test_update_reqd_metadata_with_kwargs_invalid_elts(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key1'])
# def test_get_diff_with_reqd_metadata_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, 'key1')
# self.assertEqual(len(d1), 1)
#
# def test_get_diff_with_reqd_metadata_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, ['key1'])
# self.assertEqual(len(d1), 1)
#
# @raises(AssertionError)
# def test_get_diff_with_reqd_metadata_invalid_dict(self):
# d1 = cm._get_diff_with_required_metadata(None, ['key1'])
# def test_is_all_reqd_metadata_present_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, 'key'),True)
#
# def test_is_all_reqd_metadata_present_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key']),True)
#
# def test_is_all_reqd_metadata_present_valid_3(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key1']), False)
#
# @raises(AssertionError)
# def test_is_all_reqd_metadata_present_invalid_dict(self):
# cm.is_all_reqd_metadata_present(None, 'key')
def test_show_properties_for_df_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_2(self):
A = pd.read_csv(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties(C)
def test_show_properties_for_objid_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties_for_id(id(A))
@raises(KeyError)
def test_show_properties_for_objid_err_1(self):
A = pd.read_csv(path_a)
cm.show_properties_for_id(id(A))
def test_show_properties_for_objid_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties_for_id(id(C))
def test_validate_metadata_for_table_valid_1(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', None, False)
self.assertEqual(status, True)
def test_validate_metadata_for_table_valid_2(self):
import logging
logger = logging.getLogger(__name__)
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', logger, True)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_table_invalid_df(self):
status = cm._validate_metadata_for_table(None, 'ID', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notin_catalog(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID1', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, None, 'table', None, False)
@raises(AssertionError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'zipcode', 'table', None, False)
def test_validate_metadata_for_candset_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_candset_invalid_df(self):
status = cm._validate_metadata_for_candset(None, '_id', 'ltable_ID', 'rtable_ID', None, None,
'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_id_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, 'id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_ltable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltableID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_rtable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtableID', A, B, 'ID', 'ID', None, False)
@raises(AssertionError)
def test_validate_metadata_for_candset_invlaid_ltable(self):
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', None, B, 'ID', 'ID', None, False)
@raises(AssertionError)
def test_validate_metadata_for_candset_invlaid_rtable(self):
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', B, None, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_lkey_notin_ltable(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID1', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_rkey_notin_rtable(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID1', None, False)
def test_get_keys_for_ltable_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(A, B, None, False)
self.assertEqual(l_key, 'ID')
self.assertEqual(r_key, 'ID')
@raises(AssertionError)
def test_get_keys_for_ltable_rtable_invalid_ltable(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(None, B, None, False)
@raises(AssertionError)
def test_get_keys_for_ltable_rtable_invalid_rtable(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(A, None, None, False)
def test_get_metadata_for_candset_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(C, None, False)
self.assertEqual(key, '_id')
self.assertEqual(fk_ltable, 'ltable_ID')
self.assertEqual(fk_rtable, 'rtable_ID')
self.assertEqual(l_key, 'ID')
self.assertEqual(r_key, 'ID')
self.assertEqual(ltable.equals(A), True)
self.assertEqual(rtable.equals(B), True)
@raises(AssertionError)
def test_get_metadata_for_candset_invalid_df(self):
cm.get_metadata_for_candset(None, None, False)
#--- catalog ---
def test_catalog_singleton_isinstance(self):
from py_entitymatching.catalog.catalog import Singleton
x = Singleton(object)
x.__instancecheck__(object)
@raises(TypeError)
def test_catalog_singleton_call(self):
from py_entitymatching.catalog.catalog import Singleton
x = Singleton(object)
x.__call__()
# -- catalog helper --
def test_check_attrs_present_valid_1(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, 'ID')
self.assertEqual(status, True)
def test_check_attrs_present_valid_2(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, ['ID'])
self.assertEqual(status, True)
def test_check_attrs_present_valid_3(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, ['_ID'])
self.assertEqual(status, False)
@raises(AssertionError)
def test_check_attrs_present_invalid_df(self):
ch.check_attrs_present(None, 'ID')
def test_check_attrs_invalid_None(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, None)
self.assertEqual(status, False)
@raises(AssertionError)
def test_are_all_attrs_present_invalid_df(self):
ch.are_all_attrs_in_df(None, 'id')
def test_are_all_attrs_present_invalid_None(self):
A = pd.read_csv(path_a)
status = ch.are_all_attrs_in_df(A, None)
self.assertEqual(status, False)
def test_is_attr_unique_valid_1(self):
A = pd.read_csv(path_a)
status = ch.is_attr_unique(A, 'ID')
self.assertEqual(status, True)
def test_is_attr_unique_valid_2(self):
A = pd.read_csv(path_a)
status = ch.is_attr_unique(A, 'zipcode')
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_attr_unique_invalid_df(self):
ch.is_attr_unique(None, 'zipcode')
@raises(AssertionError)
def test_is_attr_unique_invalid_attr(self):
A = pd.read_csv(path_a)
ch.is_attr_unique(A, None)
def test_does_contain_missing_values_valid_1(self):
A = pd.read_csv(path_a)
status = ch.does_contain_missing_vals(A, 'ID')
self.assertEqual(status, False)
def test_does_contain_missing_values_valid_2(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = ch.does_contain_missing_vals(A, 'ID')
self.assertEqual(status, True)
@raises(AssertionError)
def test_does_contain_missing_values_invalid_df(self):
ch.does_contain_missing_vals(None, 'zipcode')
@raises(AssertionError)
def test_does_invalid_attr(self):
A = pd.read_csv(path_a)
ch.does_contain_missing_vals(A, None)
def test_is_key_attribute_valid_1(self):
A = pd.read_csv(path_a)
status = ch.is_key_attribute(A, 'ID', True)
self.assertEqual(status, True)
def test_is_key_attribute_valid_2(self):
A = pd.read_csv(path_a)
status = ch.is_key_attribute(A, 'zipcode', True)
self.assertEqual(status, False)
def test_is_key_attribute_valid_3(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = ch.is_key_attribute(A, 'ID', True)
self.assertEqual(status, False)
def test_is_key_attribute_valid_4(self):
A = pd.DataFrame(columns=['id', 'name'])
status = ch.is_key_attribute(A, 'id')
self.assertEqual(status, True)
@raises(AssertionError)
def test_is_key_attribute_invalid_df(self):
ch.is_key_attribute(None, 'id')
@raises(AssertionError)
def test_is_key_attribute_invalid_attr(self):
A = pd.read_csv(path_a)
ch.is_key_attribute(A, None)
def test_check_fk_constraint_valid_1(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = ch.check_fk_constraint(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, True)
status = ch.check_fk_constraint(C, 'rtable_ID', B, 'ID')
self.assertEqual(status, True)
@raises(AssertionError)
def test_check_fk_constraint_invalid_foreign_df(self):
ch.check_fk_constraint(None, 'rtable_ID', pd.DataFrame(), 'ID')
@raises(AssertionError)
def test_check_fk_constraint_invalid_base_df(self):
ch.check_fk_constraint(pd.DataFrame(), 'rtable_ID', None, 'ID')
@raises(AssertionError)
def test_check_fk_constraint_invalid_base_attr(self):
ch.check_fk_constraint(pd.DataFrame(), 'rtable_ID', pd.DataFrame(), None)
@raises(AssertionError)
def test_check_fk_constraint_invalid_foreign_attr(self):
ch.check_fk_constraint(pd.DataFrame(), None, | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Vatsal's Code
# This notebook shows you how to build a model for predicting degradation at various locations along RNA sequence.
# * We will first pre-process and tokenize the sequence, secondary structure and loop type.
# * Then, we will use all the information to train a model on degradations recorded by the researchers from OpenVaccine.
# * Finally, we run our model on the public test set (shorter sequences) and the private test set (longer sequences), and submit the predictions.
#
# In[1]:
# %%capture
# !pip install forgi
# !yes Y |conda install -c bioconda viennarna
# In[2]:
import json,os, math
import subprocess
# from forgi.graph import bulge_graph
# import forgi.visual.mplotlib as fvm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow.keras.backend as K
import plotly.express as px
import tensorflow.keras.layers as L
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore')
import tensorflow_addons as tfa
from itertools import combinations_with_replacement
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold,GroupKFold
from keras.utils import plot_model
from colorama import Fore, Back, Style
# ### Configuration
# In[3]:
###### USE DIFFERENT SEED FOR DIFFERENT STRATIFIED KFOLD
SEED = 53
###### NUMBER OF FOLDS. USE 3, 5, 7,...
n_folds=5
###### TRAIN DEBUG
debug=True
###### APPLY WINDOW FEATURES
Window_features = True
###### Number of Feature Given to Model
# cat_feature = 3 ## ( Categorical Features Only)
# num_features = 1 ## ( Numerical Features Only)
###### Model Configuration ######
model_name="GG" ## MODEL NAME (Files will save according to this )
epochs=100 ## NUMBER OF EPOCHS MODEL TRAIN IN EACH FOLD. USE 3, 5, 7,...
BATCH_SIZE = 32 ## NUMBER OF BATCH_SIZE USE 16, 32, 64, 128,...
n_layers = 2 ## Number of Layers Present in model # ex. 3 Layer of GRU Model
layers = ["GRU","GRU"] ## Stacking sequence of GRU and LSTM (list of length == n_layers)
hidden_dim = [128, 128] ## Hidden Dimension in Model (Default : [128,128]) (list of length == n_layers)
dropout = [0.5, 0.5] ## 1.0 means no dropout, and 0.0 means no outputs from the layer.
sp_dropout = 0.2 ## SpatialDropout1D (Fraction of the input units to drop) [https://stackoverflow.com/a/55244985]
embed_dim = 250 ## Output Dimention of Embedding Layer (Default : 75)
num_hidden_units = 8 ## Number of GRU units after num_input layer
###### LR Schedular ######
Cosine_Schedule = True ## cosine_schedule Rate
Rampup_decy_lr = False ## Rampup decy lr Schedule
# ### Set Seed
# In[4]:
def seed_everything(seed=1234):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed_everything(SEED)
# ### Used Columns
#
# In[5]:
target_cols = ['reactivity', 'deg_Mg_pH10', 'deg_Mg_50C', 'deg_pH10', 'deg_50C']
window_columns = ['sequence','structure','predicted_loop_type']
categorical_features = ['sequence', 'structure', 'predicted_loop_type',]
# 'predicted_loop_index']
cat_feature = len(categorical_features)
if Window_features:
cat_feature += len(window_columns)
numerical_features = ['BPPS_Max','BPPS_nb', 'BPPS_sum',
'positional_entropy',
'stems', 'interior_loops', 'multiloops',#'hairpin loops', 'fiveprimes', 'threeprimes',
'A_percent', 'G_percent','C_percent', 'U_percent',
'U-G', 'C-G', 'U-A', 'G-C', 'A-U', 'G-U',
# 'E', 'S', 'H', 'B', 'X', 'I', 'M',
'pair_map', 'pair_distance', ]
num_features = len(numerical_features) ## ( Numerical Features Only)
feature_cols = categorical_features + numerical_features
pred_col_names = ["pred_"+c_name for c_name in target_cols]
target_eval_col = ['reactivity','deg_Mg_pH10','deg_Mg_50C']
pred_eval_col = ["pred_"+c_name for c_name in target_eval_col]
# ### Load and preprocess data
# In[6]:
data_dir = '/kaggle/input/stanford-covid-vaccine/'
fearure_data_path = '../input/openvaccine/'
# train = pd.read_csv(fearure_data_path+'train.csv')
# test = pd.read_csv(fearure_data_path+'test.csv')
train = pd.read_json(fearure_data_path+'train.json')
test = pd.read_json(fearure_data_path+'test.json')
# train_j = pd.read_json(data_dir + 'train.json', lines=True)
# test_j = pd.read_json(data_dir + 'test.json', lines=True)
sample_sub = pd.read_csv(data_dir + 'sample_submission.csv')
# In[7]:
train[target_cols] = train[target_cols].applymap(lambda x: x[1:-1].split(", "))
# In[8]:
# train = train[train['SN_filter'] == 1]
train = train[train['signal_to_noise'] >= 0.5]
# In[9]:
def pair_feature(row):
arr = list(row)
its = [iter(['_']+arr[:]) ,iter(arr[1:]+['_'])]
list_touple = list(zip(*its))
return list(map("".join,list_touple))
# In[10]:
def preprocess_categorical_inputs(df, cols=categorical_features,Window_features=Window_features):
if Window_features:
for c in window_columns:
df["pair_"+c] = df[c].apply(pair_feature)
cols.append("pair_"+c)
cols = list(set(cols))
return np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
# In[11]:
def preprocess_numerical_inputs(df, cols=numerical_features):
return np.transpose(
np.array(
df[cols].values.tolist()
),
(0, 2, 1)
)
# In[12]:
# We will use this dictionary to map each character to an integer
# so that it can be used as an input in keras
# ().ACGUBEHIMSXshftim0123456789[]{}'_,
token_list = list("().<KEY>")
if Window_features:
comb = combinations_with_replacement(list('_().<KEY>'*2), 2)
token_list += list(set(list(map("".join,comb))))
token2int = {x:i for i, x in enumerate(list(set(token_list)))}
print("token_list Size :",len(token_list))
train_inputs_all_cat = preprocess_categorical_inputs(train,cols=categorical_features)
train_inputs_all_num = preprocess_numerical_inputs(train,cols=numerical_features)
train_labels_all = np.array(train[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1))
print("Train categorical Features Shape : ",train_inputs_all_cat.shape)
print("Train numerical Features Shape : ",train_inputs_all_num.shape)
print("Train labels Shape : ",train_labels_all.shape)
# ### Reduce Train Data
# In[13]:
# train_inputs_all_cat = train_inputs_all_cat[:,:68,:]
# train_inputs_all_num = train_inputs_all_num[:,:68,:]
# train_labels_all = train_labels_all[:,:68,:]
# print("Train categorical Features Shape : ",train_inputs_all_cat.shape)
# print("Train numerical Features Shape : ",train_inputs_all_num.shape)
# print("Train labels Shape : ",train_labels_all.shape)
# #### Public and private sets have different sequence lengths, so we will preprocess them separately and load models of different tensor shapes.
# In[14]:
public_df = test.query("seq_length == 107")
private_df = test.query("seq_length == 130")
print("public_df : ",public_df.shape)
print("private_df : ",private_df.shape)
public_inputs_cat = preprocess_categorical_inputs(public_df)
private_inputs_cat = preprocess_categorical_inputs(private_df)
public_inputs_num = preprocess_numerical_inputs(public_df,cols=numerical_features)
private_inputs_num = preprocess_numerical_inputs(private_df,cols=numerical_features)
print("Public categorical Features Shape : ",public_inputs_cat.shape)
print("Public numerical Features Shape : ",public_inputs_num.shape)
print("Private categorical Features Shape : ",private_inputs_cat.shape)
print("Private numerical Features Shape : ",private_inputs_num.shape)
# ### loss Function
# In[15]:
### Custom Loss Function for ['reactivity','deg_Mg_pH10','deg_Mg_50C'] target Columns
# def rmse(y_actual, y_pred):
# mse = tf.keras.losses.mean_squared_error(y_actual, y_pred)
# return K.sqrt(mse)
# def MCRMSE(y_actual, y_pred, num_scored=3):
# score = 0
# for i in range(num_scored):
# score += rmse(y_actual[:,:, i], y_pred[:,:, i]) / num_scored
# return score
def MCRMSE(y_true, y_pred):
colwise_mse = tf.reduce_mean(tf.square(y_true[:,:,:3] - y_pred[:,:,:3]), axis=1)
return tf.reduce_mean(tf.sqrt(colwise_mse), axis=1)
# ### Learning Rate Schedulars
# ### Rampup decy lr Schedule
# In[16]:
def get_lr_callback(batch_size=8):
lr_start = 0.00001
lr_max = 0.004
lr_min = 0.00005
lr_ramp_ep = 45
lr_sus_ep = 2
lr_decay = 0.8
def lrfn(epoch):
if epoch < lr_ramp_ep:
lr = (lr_max - lr_start) / lr_ramp_ep * epoch + lr_start
elif epoch < lr_ramp_ep + lr_sus_ep:
lr = lr_max
else:
lr = (lr_max - lr_min) * lr_decay**(epoch - lr_ramp_ep - lr_sus_ep) + lr_min
return lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False)
return lr_callback
# ### Cosine schedule with warmup
# In[17]:
def get_cosine_schedule_with_warmup(lr,num_warmup_steps, num_training_steps, num_cycles=3.5):
"""
Modified version of the get_cosine_schedule_with_warmup from huggingface.
(https://huggingface.co/transformers/_modules/transformers/optimization.html#get_cosine_schedule_with_warmup)
Create a schedule with a learning rate that decreases following the
values of the cosine function between 0 and `pi * cycles` after a warmup
period during which it increases linearly between 0 and 1.
"""
def lrfn(epoch):
if epoch < num_warmup_steps:
return (float(epoch) / float(max(1, num_warmup_steps))) * lr
progress = float(epoch - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) * lr
return tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False)
# ### Different Layers
# In[18]:
def lstm_layer(hidden_dim, dropout):
return tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(hidden_dim,
dropout=dropout,
return_sequences=True,
kernel_initializer = 'orthogonal'))
# In[19]:
def gru_layer(hidden_dim, dropout):
return L.Bidirectional(
L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal')
)
# ### Model Building
# In[20]:
# def build_model(embed_size,
# seq_len = 107,
# pred_len = 68,
# dropout = dropout,
# sp_dropout = sp_dropout,
# num_features = num_features,
# num_hidden_units = num_hidden_units,
# embed_dim = embed_dim,
# layers = layers,
# hidden_dim = hidden_dim,
# n_layers = n_layers,
# cat_feature = cat_feature):
# inputs = L.Input(shape=(seq_len, cat_feature),name='category_input')
# embed = L.Embedding(input_dim=embed_size, output_dim=embed_dim)(inputs)
# reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
# reshaped_conv = tf.keras.layers.Conv1D(filters=512, kernel_size=3,strides=1, padding='same', activation='elu')(reshaped)
# numerical_input = L.Input(shape=(seq_len, num_features), name='numeric_input')
# n_Dense_1 = L.Dense(64)(numerical_input)
# n_Dense_2 = L.Dense(128)(n_Dense_1)
# numerical_conv = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(n_Dense_2)
# hidden = L.concatenate([reshaped_conv, numerical_conv])
# hidden = L.SpatialDropout1D(sp_dropout)(hidden)
# for x in range(n_layers):
# if layers[x] == "GRU":
# hidden = gru_layer(hidden_dim[x], dropout[x])(hidden)
# else:
# hidden = lstm_layer(hidden_dim[x], dropout[x])(hidden)
# # Since we are only making predictions on the first part of each sequence,
# # we have to truncate it
# truncated = hidden[:, :pred_len]
# out = L.Dense(5)(truncated)
# model = tf.keras.Model(inputs=[inputs] + [numerical_input], outputs=out)
# adam = tf.optimizers.Adam()
# radam = tfa.optimizers.RectifiedAdam()
# lookahead = tfa.optimizers.Lookahead(adam, sync_period=6)
# ranger = tfa.optimizers.Lookahead(radam, sync_period=6)
# model.compile(optimizer=radam, loss=MCRMSE)
# return model
# In[21]:
def build_model(embed_size,
seq_len = 107,
pred_len = 68,
dropout = dropout,
sp_dropout = sp_dropout,
num_features = num_features,
num_hidden_units = num_hidden_units,
embed_dim = embed_dim,
layers = layers,
hidden_dim = hidden_dim,
n_layers = n_layers,
cat_feature = cat_feature):
inputs = L.Input(shape=(seq_len, cat_feature),name='category_input')
embed = L.Embedding(input_dim=embed_size, output_dim=embed_dim)(inputs)
reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
reshaped = L.SpatialDropout1D(sp_dropout)(reshaped)
reshaped_conv = tf.keras.layers.Conv1D(filters=512, kernel_size=3,strides=1, padding='same', activation='elu')(reshaped)
numerical_input = L.Input(shape=(seq_len, num_features), name='numeric_input')
# n_Dense_1 = L.Dense(64)(numerical_input)
# n_Dense_2 = L.Dense(128)(n_Dense_1)
# numerical_conv = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(n_Dense_2)
hidden = L.concatenate([reshaped_conv, numerical_input])
hidden_1 = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(hidden)
hidden = gru_layer(128, 0.5)(hidden_1)
hidden = L.concatenate([hidden, hidden_1])
# hidden = L.SpatialDropout1D(sp_dropout)(hidden)
for x in range(n_layers):
if layers[x] == "GRU":
hidden = gru_layer(hidden_dim[x], dropout[x])(hidden)
else:
hidden = lstm_layer(hidden_dim[x], dropout[x])(hidden)
hidden = L.concatenate([hidden, hidden_1])
# Since we are only making predictions on the first part of each sequence,
# we have to truncate it
truncated = hidden[:, :pred_len]
out = L.Dense(5)(truncated)
model = tf.keras.Model(inputs=[inputs] + [numerical_input], outputs=out)
adam = tf.optimizers.Adam()
radam = tfa.optimizers.RectifiedAdam()
lookahead = tfa.optimizers.Lookahead(adam, sync_period=6)
ranger = tfa.optimizers.Lookahead(radam, sync_period=6)
model.compile(optimizer=radam, loss=MCRMSE)
return model
# ### Build and train model
#
# We will train a bi-directional GRU model. It has three layer and has dropout. To learn more about RNNs, LSTM and GRU, please see [this blog post](https://colah.github.io/posts/2015-08-Understanding-LSTMs/).
# In[22]:
model = build_model(embed_size=len(token_list))
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
# ### Add Augmentation Data
# ### stratify_group Based on structure and SN_Filter
# In[23]:
def get_stratify_group(row):
snf = row['SN_filter']
snr = row['signal_to_noise']
cnt = row['cnt']
id_ = row['id']
structure = row['structure']
if snf == 0:
if snr<0:
snr_c = 0
elif 0<= snr < 2:
snr_c = 1
elif 2<= snr < 4:
snr_c = 2
elif 4<= snr < 5.5:
snr_c = 3
elif 5.5<= snr < 10:
snr_c = 4
elif snr >= 10:
snr_c = 5
else: # snf == 1
if snr<0:
snr_c = 6
elif 0<= snr < 1:
snr_c = 7
elif 1<= snr < 2:
snr_c = 8
elif 2<= snr < 3:
snr_c = 9
elif 3<= snr < 4:
snr_c = 10
elif 4<= snr < 5:
snr_c = 11
elif 5<= snr < 6:
snr_c = 12
elif 6<= snr < 7:
snr_c = 13
elif 7<= snr < 8:
snr_c = 14
elif 8<= snr < 9:
snr_c = 15
elif 9<= snr < 10:
snr_c = 15
elif snr >= 10:
snr_c = 16
return '{}_{}'.format(id_,snr_c)
train['stratify_group'] = train.apply(get_stratify_group, axis=1)
train['stratify_group'] = train['stratify_group'].astype('category').cat.codes
skf = StratifiedKFold(n_folds, shuffle=True, random_state=SEED)
gkf = GroupKFold(n_splits=n_folds)
fig, ax = plt.subplots(n_folds,3,figsize=(20,5*n_folds))
for Fold, (train_index, val_index) in enumerate(gkf.split(train_inputs_all_cat, groups=train['stratify_group'])):
print(Fore.YELLOW);print('#'*45);print("### Fold : ", str(Fold+1));print('#'*45);print(Style.RESET_ALL)
train_data = train.iloc[train_index]
val_data = train.iloc[val_index]
print("Augmented data Present in Val Data : ",len(val_data[val_data['cnt'] != 1]))
print("Augmented data Present in Train Data : ",len(train_data[train_data['cnt'] != 1]))
val_data = val_data[val_data['cnt'] == 1]
print("Data Lekage : ",len(val_data[val_data['id'].isin(train_data['id'])]))
# print(train_data['stratify_group'].unique(),val_data['stratify_group'].unique())
print("number of Train Data points : ",len(train_data))
print("number of val_data Data points : ",len(val_data))
print("number of unique Structure in Train data : ", len(train_data.structure.unique()))
print("number of unique Structure in val data : ",len(val_data.structure.unique()), val_data.structure.value_counts()[:5].values)
print("Train SN_Filter == 1 : ", len(train_data[train_data['SN_filter']==1]))
print("val_data SN_Filter == 1 : ", len(val_data[val_data['SN_filter']==1]))
print("Train SN_Filter == 0 : ", len(train_data[train_data['SN_filter']==0]))
print("val_data SN_Filter == 0 : ", len(val_data[val_data['SN_filter']==0]))
print("Unique ID :",len(train_data.id.unique()))
sns.kdeplot(train[train['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Red",label='Train All')
sns.kdeplot(train_data[train_data['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Blue",label='Train')
sns.kdeplot(val_data[val_data['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Green",label='Validation')
ax[Fold][0].set_title(f'Fold : {Fold+1} Signal/Noise & SN_filter == 0')
sns.kdeplot(train[train['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Red",label='Train All')
sns.kdeplot(train_data[train_data['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Blue",label='Train')
sns.kdeplot(val_data[val_data['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Green",label='Validation')
ax[Fold][1].set_title(f'Fold : {Fold+1} Signal/Noise & SN_filter == 1')
sns.kdeplot(train['signal_to_noise'],ax=ax[Fold][2],color="Red",label='Train All')
sns.kdeplot(train_data['signal_to_noise'],ax=ax[Fold][2],color="Blue",label='Train')
sns.kdeplot(val_data['signal_to_noise'],ax=ax[Fold][2],color="Green",label='Validation')
ax[Fold][2].set_title(f'Fold : {Fold+1} Signal/Noise')
plt.show()
# In[24]:
submission = pd.DataFrame(index=sample_sub.index, columns=target_cols).fillna(0) # test dataframe with 0 values
val_losses = []
historys = []
oof_preds_all = []
stacking_pred_all = []
kf = KFold(n_folds, shuffle=True, random_state=SEED)
skf = StratifiedKFold(n_folds, shuffle=True, random_state=SEED)
gkf = GroupKFold(n_splits=n_folds)
for Fold, (train_index, val_index) in enumerate(gkf.split(train_inputs_all_cat, groups=train['stratify_group'])):
print(Fore.YELLOW);print('#'*45);print("### Fold : ", str(Fold+1));print('#'*45);print(Style.RESET_ALL)
print(f"|| Batch_size: {BATCH_SIZE} \n|| n_layers: {n_layers} \n|| embed_dim: {embed_dim}")
print(f"|| cat_feature: {cat_feature} \n|| num_features: {num_features}")
print(f"|| layers : {layers} \n|| hidden_dim: {hidden_dim} \n|| dropout: {dropout} \n|| sp_dropout: {sp_dropout}")
train_data = train.iloc[train_index]
val_data = train.iloc[val_index]
print("|| number Augmented data Present in Val Data : ",len(val_data[val_data['cnt'] != 1]))
print("|| number Augmented data Present in Train Data : ",len(train_data[train_data['cnt'] != 1]))
print("|| Data Lekage : ",len(val_data[val_data['id'].isin(train_data['id'])]))
val_data = val_data[val_data['cnt'] == 1]
model_train = build_model(embed_size=len(token_list))
model_short = build_model(embed_size=len(token_list),seq_len=107, pred_len=107)
model_long = build_model(embed_size=len(token_list),seq_len=130, pred_len=130)
train_inputs_cat = preprocess_categorical_inputs(train_data,cols=categorical_features)
train_inputs_num = preprocess_numerical_inputs(train_data,cols=numerical_features)
train_labels = np.array(train_data[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1))
val_inputs_cat = preprocess_categorical_inputs(val_data,cols=categorical_features)
val_inputs_num = preprocess_numerical_inputs(val_data,cols=numerical_features)
val_labels = np.array(val_data[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1))
# train_inputs_cat, train_labels = train_inputs_all_cat[train_index], train_labels_all[train_index]
# val_inputs_cat, val_labels = train_inputs_all_cat[val_index], train_labels_all[val_index]
# train_inputs_num, val_inputs_num = train_inputs_all_num[train_index],train_inputs_all_num[val_index]
# csv_logger
csv_logger = tf.keras.callbacks.CSVLogger(f'Fold_{Fold}_log.csv', separator=',', append=False)
# SAVE BEST MODEL EACH FOLD
checkpoint = tf.keras.callbacks.ModelCheckpoint(f'{model_name}_Fold_{Fold}.h5',
monitor='val_loss',
verbose=0,
mode='min',
save_freq='epoch')
if Cosine_Schedule:
#cosine Callback
lr_schedule= get_cosine_schedule_with_warmup(lr=0.001, num_warmup_steps=20, num_training_steps=epochs)
elif Rampup_decy_lr :
# Rampup decy lr
lr_schedule = get_lr_callback(BATCH_SIZE)
else:
lr_schedule = tf.keras.callbacks.ReduceLROnPlateau()
history = model_train.fit(
{'numeric_input': train_inputs_num,
'category_input': train_inputs_cat} , train_labels,
validation_data=({'numeric_input': val_inputs_num,
'category_input': val_inputs_cat}
,val_labels),
batch_size=BATCH_SIZE,
epochs=epochs,
callbacks=[lr_schedule, checkpoint, csv_logger,lr_schedule],
verbose=1 if debug else 0
)
print("Min Validation Loss : ", min(history.history['val_loss']))
print("Min Validation Epoch : ",np.argmin( history.history['val_loss'] )+1)
val_losses.append(min(history.history['val_loss']))
historys.append(history)
model_short.load_weights(f'{model_name}_Fold_{Fold}.h5')
model_long.load_weights(f'{model_name}_Fold_{Fold}.h5')
public_preds = model_short.predict({'numeric_input': public_inputs_num,
'category_input': public_inputs_cat})
private_preds = model_long.predict({'numeric_input': private_inputs_num,
'category_input': private_inputs_cat})
oof_preds = model_train.predict({'numeric_input': val_inputs_num,
'category_input': val_inputs_cat})
stacking_pred = model_short.predict({'numeric_input': val_inputs_num,
'category_input': val_inputs_cat})
preds_model = []
for df, preds in [(public_df, public_preds), (private_df, private_preds)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_df = pd.DataFrame(single_pred, columns=target_cols)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
preds_model.append(single_df)
preds_model_df = pd.concat(preds_model)
preds_model_df = preds_model_df.groupby(['id_seqpos'],as_index=True).mean()
submission[target_cols] += preds_model_df[target_cols].values / n_folds
for df, preds in [(val_data, oof_preds)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_label = val_labels[i]
single_label_df = pd.DataFrame(single_label, columns=target_cols)
single_label_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_label_df.shape[0])]
single_label_df['id'] = [f'{uid}' for x in range(single_label_df.shape[0])]
single_label_df['s_id'] = [x for x in range(single_label_df.shape[0])]
single_df = pd.DataFrame(single_pred, columns=pred_col_names)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
single_df = pd.merge(single_label_df,single_df, on="id_seqpos", how="left")
oof_preds_all.append(single_df)
for df, preds in [(val_data, stacking_pred)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
# single_label = val_labels[i]
# single_label_df = pd.DataFrame(single_label, columns=target_cols)
# single_label_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_label_df.shape[0])]
# single_label_df['id'] = [f'{uid}' for x in range(single_label_df.shape[0])]
# single_label_df['s_id'] = [x for x in range(single_label_df.shape[0])]
single_df = pd.DataFrame(single_pred, columns=pred_col_names)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
single_df['id'] = [uid for x in range(single_df.shape[0])]
stacking_pred_all.append(single_df)
# PLOT TRAINING
history_data = pd.read_csv(f'Fold_{Fold}_log.csv')
EPOCHS = len(history_data['epoch'])
history = pd.DataFrame({'history':history_data.to_dict('list')})
fig = plt.figure(figsize=(15,5))
plt.plot(np.arange(EPOCHS),history.history['lr'],'-',label='Learning Rate',color='#ff7f0e')
x = np.argmax( history.history['lr'] ); y = np.max( history.history['lr'] )
xdist = plt.xlim()[1] - plt.xlim()[0]; ydist = plt.ylim()[1] - plt.ylim()[0]
plt.scatter(x,y,s=200,color='#1f77b4'); plt.text(x-0.03*xdist,y-0.13*ydist,f'Max Learning Rate : {y}' ,size=12)
plt.ylabel('Learning Rate',size=14); plt.xlabel('Epoch',size=14)
plt.legend(loc=1)
plt2 = plt.gca().twinx()
plt2.plot(np.arange(EPOCHS),history.history['loss'],'-o',label='Train Loss',color='#2ca02c')
plt2.plot(np.arange(EPOCHS),history.history['val_loss'],'-o',label='Val Loss',color='#d62728')
x = np.argmin( history.history['val_loss'] ); y = np.min( history.history['val_loss'] )
ydist = plt.ylim()[1] - plt.ylim()[0]
plt.scatter(x,y,s=200,color='#d62728'); plt.text(x-0.03*xdist,y+0.05*ydist,'min loss',size=14)
plt.ylabel('Loss',size=14)
fig.text(s=f"Model Name : {model_name}" , x=0.5, y=1.08, fontsize=18, ha='center', va='center',color="green")
fig.text(s=f"|| Fold : {Fold+1} | Batch_size: {BATCH_SIZE} | num_features: {num_features} | cat_feature: {cat_feature} |n_layers: {n_layers} | embed_dim: {embed_dim} ||", x=0.5, y=1.0, fontsize=15, ha='center', va='center',color="red")
fig.text(s=f"|| layers : {layers} | hidden_dim: {hidden_dim} | dropout: {dropout} | sp_dropout: {sp_dropout} ||", x=0.5, y=0.92, fontsize=15, ha='center', va='center',color="blue")
plt.legend(loc=3)
plt.savefig(f'Fold_{Fold+1}.png', bbox_inches='tight')
plt.show()
submission["id_seqpos"] = preds_model_df.index
submission = pd.merge(sample_sub["id_seqpos"], submission, on="id_seqpos", how="left")
OOF = | pd.concat(oof_preds_all) | pandas.concat |
import biom
import skbio
import numpy as np
import pandas as pd
from deicode.matrix_completion import MatrixCompletion
from deicode.preprocessing import rclr
from deicode._rpca_defaults import (DEFAULT_RANK, DEFAULT_MSC, DEFAULT_MFC,
DEFAULT_ITERATIONS)
from scipy.linalg import svd
def rpca(table: biom.Table,
n_components: int = DEFAULT_RANK,
min_sample_count: int = DEFAULT_MSC,
min_feature_count: int = DEFAULT_MFC,
max_iterations: int = DEFAULT_ITERATIONS) -> (
skbio.OrdinationResults,
skbio.DistanceMatrix):
"""Runs RPCA with an rclr preprocessing step.
This code will be run by both the standalone and QIIME 2 versions of
DEICODE.
"""
# filter sample to min depth
def sample_filter(val, id_, md): return sum(val) > min_sample_count
def observation_filter(val, id_, md): return sum(val) > min_feature_count
# filter and import table
table = table.filter(observation_filter, axis='observation')
table = table.filter(sample_filter, axis='sample')
table = table.to_dataframe().T
if len(table.index) != len(set(table.index)):
raise ValueError('Data-table contains duplicate indices')
if len(table.columns) != len(set(table.columns)):
raise ValueError('Data-table contains duplicate columns')
# rclr preprocessing and OptSpace (RPCA)
opt = MatrixCompletion(n_components=n_components,
max_iterations=max_iterations).fit(rclr(table))
rename_cols = ['PC' + str(i+1) for i in range(n_components)]
X = opt.sample_weights @ opt.s @ opt.feature_weights.T
X = X - X.mean(axis=0)
X = X - X.mean(axis=1).reshape(-1, 1)
u, s, v = svd(X)
u = u[:, :n_components]
v = v.T[:, :n_components]
p = s**2 / np.sum(s**2)
p = p[:n_components]
s = s[:n_components]
feature_loading = pd.DataFrame(v, index=table.columns, columns=rename_cols)
sample_loading = pd.DataFrame(u, index=table.index, columns=rename_cols)
# % var explained
proportion_explained = pd.Series(p, index=rename_cols)
# get eigenvalues
eigvals = | pd.Series(s, index=rename_cols) | pandas.Series |
# 指标计算器
import pandas as pd
import talib
def talib_OBV(DataFrame):
res = talib.OBV(DataFrame.close.values, DataFrame.volume.values)
return pd.DataFrame({'OBV': res}, index=DataFrame.index)
def talib_DEMA(DataFrame, N=30):
res = talib.DEMA(DataFrame.close.values, timeperiod=N)
return pd.DataFrame({'DEMA': res}, index=DataFrame.index)
def talib_KAMA(DataFrame, N=30):
res = talib.KAMA(DataFrame.close.values, timeperiod=N)
return pd.DataFrame({'KAMA': res}, index=DataFrame.index)
def talib_MIDPOINT(DataFrame, N=14):
res = talib.MIDPOINT(DataFrame.close.values, timeperiod=N)
return pd.DataFrame({'MIDPOINT': res}, index=DataFrame.index)
def talib_MIDPRICE(DataFrame, N=14):
res = talib.MIDPRICE(DataFrame.high.values, DataFrame.low.values,
timeperiod=N)
return pd.DataFrame({'MIDPRICE': res}, index=DataFrame.index)
def talib_T3(DataFrame, N=5, vfactor=0):
res = talib.T3(DataFrame.close.values, timeperiod=N, vfactor=vfactor)
return pd.DataFrame({'T3': res}, index=DataFrame.index)
def talib_TEMA(DataFrame, N=30):
res = talib.TEMA(DataFrame.close.values, timeperiod=N)
return pd.DataFrame({'TEMA': res}, index=DataFrame.index)
def talib_TRIMA(DataFrame, N=30):
res = talib.TRIMA(DataFrame.close.values, timeperiod=N)
return pd.DataFrame({'TRIMA': res}, index=DataFrame.index)
def talib_WMA(DataFrame, N=30):
res = talib.WMA(DataFrame.close.values, timeperiod=N)
return pd.DataFrame({'WMA': res}, index=DataFrame.index)
def talib_MOM(DataFrame, N=10):
res = talib.MOM(DataFrame.close.values, timeperiod=N)
return pd.DataFrame({'MOM': res}, index=DataFrame.index)
def talib_BBANDS(DataFrame, N=5, nbdevup=2, nbdevdn=2, matype=0):
upperband, middleband, lowerband = talib.BBANDS(DataFrame.close.values,
timeperiod=N,
nbdevup=nbdevup,
nbdevdn=nbdevdn,
matype=matype)
return pd.DataFrame(
{'UPPER': upperband, 'MIDDLE': middleband, 'LOWER': lowerband},
index=DataFrame.index)
def talib_AVGPRICE(DataFrame):
"""AVGPRICE - Average Price 平均价格函数"""
res = talib.AVGPRICE(DataFrame.open.values, DataFrame.high.values,
DataFrame.low.values, DataFrame.close.values)
return pd.DataFrame({'AVGPRICE': res}, index=DataFrame.index)
def talib_MEDPRICE(DataFrame):
"""MEDPRICE - Median Price 中位数价格"""
res = talib.MEDPRICE(DataFrame.high.values, DataFrame.low.values)
return pd.DataFrame({'MEDPRICE': res}, index=DataFrame.index)
def talib_TYPPRICE(DataFrame):
"""TYPPRICE - Typical Price 代表性价格"""
res = talib.TYPPRICE(DataFrame.high.values, DataFrame.low.values,
DataFrame.close.values)
return pd.DataFrame({'TYPPRICE': res}, index=DataFrame.index)
def talib_WCLPRICE(DataFrame):
"""WCLPRICE - Weighted Close Price 加权收盘价"""
res = talib.WCLPRICE(DataFrame.high.values, DataFrame.low.values,
DataFrame.close.values)
return pd.DataFrame({'WCLPRICE': res}, index=DataFrame.index)
def talib_NATR(DataFrame, N=14):
res = talib.NATR(DataFrame.high.values, DataFrame.low.values,
DataFrame.close.values, timeperiod=N)
return pd.DataFrame({'NATR': res}, index=DataFrame.index)
def talib_TRANGE(DataFrame):
res = talib.TRANGE(DataFrame.high.values, DataFrame.low.values,
DataFrame.close.values)
return | pd.DataFrame({'TRANGE': res}, index=DataFrame.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index( | lrange(4) | pandas.compat.lrange |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
from RLC.real_chess import agent, environment, learn, tree
import chess
from chess.pgn import Game
opponent = agent.GreedyAgent()
env = environment.Board(opponent, FEN=None)
player = agent.Agent(lr=0.01, network='big')
learner = learn.TD_search(env, player, gamma=0.8, search_time=2)
node = tree.Node(learner.env.board, gamma=learner.gamma)
player.model.summary()
learner.learn(iters=1000, timelimit_seconds=3600)
reward_smooth = | pd.DataFrame(learner.reward_trace) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from time import time
from lr.models.transformers.RobertaWrapper import RobertaWrapper
from lr.models.transformers.processor import clean_df
from lr.training.util import filter_df_by_label
from tqdm import tqdm
import glob
import argparse
import logging
# Help Functions
def clean_folder(folder):
cacheds = glob.glob('data/{}/cached_*'.format(folder))
for path in cacheds:
os.remove(path)
def clean_folder_log(output_dir_name):
cacheds = glob.glob(output_dir_name + '/*_log.csv')
for path in cacheds:
os.remove(path)
def search(train_path,
dev_path,
folder,
random_state,
n_cores,
n_iter,
output_dir_name,
verbose,
max_range=10):
# Get data
train = pd.read_csv(train_path)
dev = pd.read_csv(dev_path)
# dev = dev.sample(1000) # debug
if verbose:
print("clean train")
train = clean_df(train, n_cores=n_cores)
if verbose:
print("clean dev")
dev = clean_df(dev, n_cores=n_cores)
if verbose:
print("train.shape", train.shape)
print("dev.shape", dev.shape)
# Get hyperarams
basic_hyperparams = {"local_rank": -1,
"overwrite_cache": False,
"per_gpu_train_batch_size": 32,
"per_gpu_eval_batch_size": 50,
"gradient_accumulation_steps": 1,
"max_steps": -1,
# "max_steps": 100, # debug
"warmup_steps": 0,
"save_steps": 80580,
"no_cuda": False,
"n_gpu": 1,
"model_name_or_path": "roberta",
"output_dir": output_dir_name,
"random_state": random_state,
"fp16": False,
"fp16_opt_level": "01",
"device": "cpu",
"verbose": True,
"model_type": "roberta",
"pad_on_left": False,
"pad_token": 0,
"n_cores": n_cores,
'eval_sample_size': 200,
"pad_token_segment_id": 0,
"mask_padding_with_zero": True,
"base_path": "data/{}/cached_".format(folder),
"pretrained_weights": 'roberta-large-mnli'}
choice_0 = {'num_train_epochs': 3.0,
"max_seq_length": 200,
"learning_rate": 5e-5,
"weight_decay": 0.0,
"adam_epsilon": 1e-8,
"max_grad_norm": 1.0}
param_grid = {"max_seq_length": range(50, 210, max_range),
"num_train_epochs": [1.0, 2.0, 3.0],
"learning_rate": np.linspace(0.00005, 0.0001, max_range),
"weight_decay": np.linspace(0, 0.01, max_range),
"adam_epsilon": np.linspace(1e-8, 1e-7, max_range),
"max_grad_norm": np.linspace(0.9, 1.0, max_range)}
np.random.seed(random_state)
choices = []
for i in range(n_iter):
hyper_choice = {}
for k in param_grid:
hyper_choice[k] = np.random.choice(param_grid[k])
choices.append(hyper_choice)
# choices.append(choice_0)
# Search
all_accs = []
all_train_times = []
init_search = time()
for hyper_choice in tqdm(choices):
hyperparams = basic_hyperparams.copy()
hyperparams.update(hyper_choice)
model = RobertaWrapper(hyperparams)
init = time()
model.fit(train)
train_time = time() - init
result = model.get_results(dev, mode="dev")
acc = result.indicator.mean()
all_accs.append(acc)
all_train_times.append(train_time)
# log partial Results
logging.info("\n\n\n***** acc = {:.1%} *****\n".format(acc))
logging.info(
"***** train_time = {:.2f} *****\n".format(train_time / 3600))
for k in hyper_choice:
logging.info("***** {} = {} *****\n".format(k, hyper_choice[k]))
logging.info("\n\n\n")
clean_folder(folder)
search_time = time() - init_search
search_time = search_time / 3600
# Store Results
best_id = np.argmax(all_accs)
best_score = all_accs[best_id]
param_df = pd.DataFrame(choices[best_id], index=[0])
dict_ = {"search_random_state": [random_state],
"number_of_search_trails": [n_iter],
"expected_val_score": [np.mean(all_accs)],
"best_val_score": [best_score],
"mean_fit_time": [np.mean(all_train_times) / 3600],
"search_time": [search_time]}
search_results = | pd.DataFrame(dict_) | pandas.DataFrame |
import numpy as np
import pandas as pd
class NB:
def __init__(self):
self.target = "" # name of the label
self.columns = pd.Index([]) # name of the features
self.num_cols = pd.Index([]) # name of numerical features
self.cat_cols = pd.Index([]) # name of categorical features
self.py = {} # P(y)
self.px = {} # P(xi|y)
def train(self, X: pd.DataFrame, y: pd.Series):
# Sanity check
assert all(X.index == y.index), "Indices mismatch"
# Drop rows with missing data
Xy = pd.concat([X, y], axis=1).dropna(axis=0, how='any')
_X, _y = Xy[X.columns], Xy[y.name]
# Initialization
self.target = _y.name
self.columns = _X.columns
self.num_cols = _X.select_dtypes(include='number').columns
self.cat_cols = _X.select_dtypes(exclude='number').columns
self.cat_cols = self.columns.drop(self.num_cols)
# Estimate log P(y)
y_counts = _y.value_counts()
y_total = y_counts.sum()
self.py = {y_val: y_count / y_total for y_val, y_count in y_counts.iteritems()}
# Estimate log P(xi|y)
for y_val, py in self.py.items():
self.px[y_val] = {}
X_given_y = _X[_y == y_val]
# Split X_given_y into numerical and categorical parts
X_num_given_y = X_given_y[self.num_cols]
X_cat_given_y = X_given_y[self.cat_cols]
# Numerical: mean and standard deviation
self.px[y_val]['numerical'] = X_num_given_y.describe().loc[['mean', 'std'], :]
# Categorical: frequency
self.px[y_val]['categorical'] = {feature: xi.value_counts(normalize=True)
for feature, xi in X_cat_given_y.iteritems()}
def predict(self, X: pd.DataFrame, return_LL: bool = False):
r"""Predict the labels of all the instances in a feature matrix
Args:
X: pd.DataFrame
return_LL: bool
If set to True, return the log-posterior
Returns:
pred (return_LL=False)
pred, LL (return_LL=True)
"""
pred = []
LL = []
for index, x in X.iterrows():
# Compute log-likelihood
ll = self.LL_numerical(x) + self.LL_categorical(x)
# Find the most likely label
ll.sort_values(ascending=False, inplace=True)
LL.append(ll)
if np.inf in ll.values: # xi contains values not included by the training set
# Break ties by comparing P(y)
pred.append(pd.Series(self.py).sort_values(ascending=False).index[0])
else:
pred.append(ll.index[0])
# Clean up LL and pred
LL = pd.concat(LL, axis=1).T
LL.index = X.index
pred = pd.Series(pred, index=X.index, name=self.target)
if return_LL:
return pred, LL
else:
return pred
def LL_numerical(self, x: pd.Series) -> pd.Series:
r"""Log-likelihood of all numerical features of a given instance
Args:
x: pd.Series
Returns:
pd.Series
"""
_num_cols = self.num_cols.drop(x.index[x.isna()], errors='ignore')
_x = x[_num_cols].to_numpy()
_ll = {}
for (y_val, px), py in zip(self.px.items(), self.py.values()):
_mu = px['numerical'].loc['mean', _num_cols].to_numpy()
_sigma = px['numerical'].loc['std', _num_cols].to_numpy()
_ll[y_val] = np.sum(self.log_gaussian(_x, _mu, _sigma))
return pd.Series(_ll)
def LL_categorical(self, x: pd.Series) -> pd.Series:
r"""Log-posterior of all categorical features of a given instance
Args:
x: pd.Series
Returns:
pd.Series
"""
_cat_cols = self.cat_cols.drop(x.index[x.isna()], errors='ignore')
_x = x[_cat_cols]
_ll = {}
for (y_val, px), py in zip(self.px.items(), self.py.values()):
px_given_y = [px['categorical'][feature].get(xi, 0) for feature, xi in _x.iteritems()]
_ll[y_val] = np.sum(np.log(px_given_y)) + np.log(py)
return | pd.Series(_ll) | pandas.Series |
import pandas as pd
import numpy as np
import category_encoders as ce
from sklearn.preprocessing import normalize
from sklearn.utils import resample
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.under_sampling import NearMiss
from scipy.stats import skew, kurtosis
from src.utils.common.common_helper import read_config
from loguru import logger
import os
from from_root import from_root
config_args = read_config("./config.yaml")
log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])
logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO")
class Preprocessing:
@staticmethod
def get_data(filepath):
try:
data = filepath
df = pd.read_csv(data)
logger.info("Data successfully loaded into data frame")
return df
except Exception as e:
logger.info(e)
@staticmethod
def col_seperator(df, typ: str):
try:
logger.info("Column Separator Type {typ}")
if typ == 'Numerical_columns':
Numerical_columns = df.select_dtypes(exclude='object')
logger.info("Successfully Implemented")
return Numerical_columns
elif typ == 'Categorical_columns':
Categorical_columns = df.select_dtypes(include='object')
logger.info("Successfully Implemented")
return Categorical_columns
else:
logger.error("Type Not Found")
except Exception as e:
logger.info(e)
@staticmethod
def delete_col(df, cols: list):
temp_list = []
for i in cols:
if i in df.columns:
temp_list.append(i)
else:
raise Exception('Column Not Found')
try:
df = df.drop(temp_list, axis=1)
logger.info("Column Successfully Dropped!")
return df
except Exception as e:
logger.info(e)
raise e
@staticmethod
def missing_values(df):
try:
columns = df.isnull().sum()[df.isnull().sum() > 0].sort_values(ascending=False).index
values = df.isnull().sum()[df.isnull().sum() > 0].sort_values(ascending=False).values
mv_df = pd.DataFrame(columns, columns=['Columns'])
mv_df['Missing_Values'] = values
mv_df['Percentage'] = np.round((values / len(df)) * 100, 2)
logger.info("Missing Values Successfully Implemented")
return columns, values, mv_df
except Exception as e:
logger.info(e)
@staticmethod
def find_skewness(x):
try:
logger.info(f"Skewness : {skew(x)}")
return skew(x)
except Exception as e:
logger.error(e)
@staticmethod
def find_kurtosis(x):
try:
logger.info(f"Skewness : {kurtosis(x)}")
return kurtosis(x)
except Exception as e:
logger.error(e)
@staticmethod
def fill_numerical(df, typ, cols, value=None):
for i in cols:
if i in df.columns:
continue
else:
return 'Column Not Found'
if typ == 'Mean':
try:
logger.info("Missing Values Filled with Mean")
return df[cols].fillna(df[cols].mean())
except Exception as e:
logger.info(e)
elif typ == 'Median':
try:
logger.info("Missing Values Filled with Mean")
return df[cols].fillna(df[cols].median())
except Exception as e:
logger.info(e)
elif typ == 'Arbitrary Value':
try:
logger.info("Missing Values Filled with Arbitrary Value")
return df[cols].fillna(value)
except Exception as e:
logger.info(e)
elif typ == 'Interpolate':
try:
logger.info("Missing Values Filled with Interpolate")
return df[cols].interpolate(value)
except Exception as e:
logger.info(e)
else:
logger.error("Invalid Input")
return 'Type Not present'
@staticmethod
def fill_categorical(df, typ=None, col=None, value=None):
# Replace na with some meaning of na
try:
if typ == 'replace':
temp_list = []
for i in col:
if i in df.cols:
temp_list.append(i)
else:
return 'Column Not Found'
if col and value is not None:
logger.info("Categorical Values Filled with Replace")
return df[col].fillna(value)
else:
return 'Please provide values and columns'
elif typ == 'Mode':
if col is not None:
logger.info("Categorical Values Filled with Mode")
return df[col].fillna(df.mode()[col][0])
else:
return 'Please give provide values and columns'
elif typ == 'New Category':
if col is not None:
logger.info("Categorical Values Filled with New Category")
return df[col].fillna(value)
else:
return 'Please give provide values and columns'
else:
logger.error("Invalid Input")
return 'Type not found'
except Exception as e:
logger.error(e)
@staticmethod
def Unique(df, percent):
try:
percent = percent / 25
holder = []
for column in df.columns:
if df[column].nunique() > int(len(df) * percent / 4):
print(column, '+', df[column].unique())
holder.append(column)
logger.info(f"Found {holder} Unique elements!")
return holder
except Exception as e:
logger.error(e)
@staticmethod
def encodings(df, cols, kind: str):
try:
if kind == 'One Hot Encoder':
onehot = ce.OneHotEncoder(cols=cols)
onehot_df = onehot.fit_transform(df)
logger.info("One Hot Encoding Implemented!")
return onehot_df
elif kind == 'Dummy Encoder':
dummy_df = pd.get_dummies(data=cols, drop_first=True)
logger.info("Dummy Encoding Implemented!")
return dummy_df
elif kind == 'Effective Encoder':
target = ce.TargetEncoder(cols=cols)
target_df = target.fit_transform(df)
logger.info("Effective Encoding Implemented!")
return target_df
elif kind == 'Binary Encoder':
binary = ce.BinaryEncoder(cols=cols, return_df=True)
binary_df = binary.fit_transform(df)
logger.info("Binary Encoding Implemented!")
return binary_df
elif kind == 'Base N Encoder':
basen = ce.BaseNEncoder(cols=cols)
basen_df = basen.fit_transform(df)
logger.info("Base N Encoding Implemented!")
return basen_df
else:
logger.error("Wrong Input!")
except Exception as e:
logger.error(e)
@staticmethod
def balance_data(df, kind: str, target):
try:
if len(df[(df[target] == 0)]) >= df[(df[target] == 1)]:
df_majority = df[(df[target] == 0)]
df_minority = df[(df[target] == 1)]
else:
df_majority = df[(df[target] == 1)]
df_minority = df[(df[target] == 0)]
logger.info("Found Majority and Minority CLasses")
if kind == 'UnderSampling':
df_majority_undersampled = resample(df_majority,
replace=True,
n_samples=len(df_minority),
random_state=42)
logger.info("UnderSampling Implemented")
return pd.concat([df_majority_undersampled, df_minority])
elif kind == 'UpSampling':
df_minority_upsampled = resample(df_minority,
replace=True,
n_samples=len(df_majority),
random_state=42)
logger.info("UpSampling Implemented")
return | pd.concat([df_minority_upsampled, df_majority]) | pandas.concat |
"""
Code for "How Is Earnings News Transmitted to Stock Prices?" by
<NAME> and <NAME>.
Python 2
The main function takes the TAS (Time and Sales) file for one exchange on one
month and extracts only the trades from daily files, creating trade files.
"""
from os import listdir
import os
import pandas as pd
from datetime import datetime
import gzip
import shutil
import hashlib
import sys
outdir = 'M:\\vgregoire\\TRTH_Trades\\'
# Checks the md5 has to make sure the raw file is not corrupted
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
# This function takes the TAS (Time and Sales) file for one exchange on one
# month and extracts only the trades from daily files, creating trade files.
def process_task(exch, y, m):
mdir = 'Y:\\' + exch + '\\TAS\\' + str(y) + '\\' + str(m).zfill(2) + '\\'
# List dates in the monthly directory
ls = listdir(mdir)
ls = [fn for fn in ls if fn[15:23] == 'TAS-Data' and fn.endswith('.gz')]
ls_df = | pd.DataFrame(ls, columns=['Filename']) | pandas.DataFrame |
# Regular Imports
from geojson.feature import *
from src.h3_utils import *
import geopandas as gpd
import pandas as pd
def generate_hourly_charges(charges):
# Create a unique identifier
charges['ID'] = [i for i in range(0, charges.shape[0])]
# Create dataframe by minutes in this datetime range
start = charges['start_time'].min()
end = charges['end_time'].max()
index = pd.date_range(start=start, end=end, freq='1T')
df2 = pd.DataFrame(index=index, columns= \
['minutes', 'ID', 'latitude', 'longitude', 'delta_soc', 'energy', 'start_soc'])
# Spread the events across minutes
for index, row in charges.iterrows():
df2['minutes'][row['start_time']:row['end_time']] = 1
df2['ID'][row['start_time']:row['end_time']] = row['ID']
df2['latitude'][row['start_time']:row['end_time']] = row['latitude']
df2['longitude'][row['start_time']:row['end_time']] = row['longitude']
df2['delta_soc'][row['start_time']:row['end_time']] = row['delta_soc']
df2['energy'][row['start_time']:row['end_time']] = row['energy']
df2['start_soc'][row['start_time']:row['end_time']] = row['start_soc']
# Clean up dataframe
df2 = df2[df2.ID.notna()]
df2['time'] = df2.index
df2['hour'] = df2['time'].apply(lambda x: x.hour)
# GroupBy ID and hour
df3 = df2.groupby(['ID', 'hour']).agg(
{'minutes': 'count', 'time': 'first', 'latitude': 'first', 'longitude': 'first', 'delta_soc': 'first',
'energy': 'first', 'start_soc': 'first'}).reset_index()
# Recreate time index
df3['time'] = df3['time'].apply(lambda x: | pd.datetime(year=x.year, month=x.month, day=x.day, hour=x.hour) | pandas.datetime |
import pandas as pd
class PassHash:
def __init__(self):
# Combinations of header labels
self.base = ['Rk', 'Date', 'G#', 'Age', 'Tm', 'Home', 'Opp', 'Result', 'GS']
self.passing = ['pass_cmp', 'pass_att', 'Cmp%', 'pass_yds', 'pass_td', 'Int', 'Rate', 'Sk', 'Sk-Yds',
'pass_Y/A', 'AY/A']
self.rushing = ['rush_att', 'rush_yds', 'rush_Y/A', 'rush_TD']
self.rush_sk = ['rush_sk', 'tkl', 'Ast']
self.receiving = ['Rec_Tgt', 'Rec_Rec', 'Rec_Yds', 'Rec_Y/R', 'Rec_TD', 'Rec_Ctch%', 'Rec_Y/Tgt']
self.scoring2p = ['2pt']
self.scoring = ['Any_TD', 'Any_Pts']
self.punting = ['Pnt', 'Pnt_Yds', 'Y/P', 'Blck']
def md564b4c5df667e588d59b856ae9d724c7d(self, df):
cols = self.base + self.passing
# Rename columns
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.rushing + self.receiving + self.rush_sk + self.scoring2p +
self.scoring + self.punting)],axis=1)
# set all the new columns to zero
df.loc[:, self.rushing + self.receiving + self.rush_sk + self.scoring2p + self.scoring + self.punting] = 0
return df
def md5b06cd4dff23f7376af6a879f99cc5a1c(self, df):
# Rename columns
df.columns = self.base + self.passing + self.rushing
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.receiving + self.rush_sk + self.scoring2p + self.scoring + self.punting)], axis=1)
# set all the new columns to zero
df.loc[:, self.receiving + self.rush_sk + self.scoring2p + self.scoring + self.punting] = 0
return df
def md5677c3564a754183605775bac5aba623d(self, df):
# rename colsums
cols = self.base + self.passing + self.rushing + self.scoring
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.receiving + self.rush_sk + self.scoring2p + self.punting)], axis=1)
# set all the new columns to zero
df.loc[:, self.receiving + self.rush_sk + self.scoring2p + self.punting] = 0
return df
def md51609c51a70ab5e3d763c0d698e00eb16(self, df):
# Rename columns
cols = self.base + self.passing + self.rushing + self.rush_sk
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.receiving + self.scoring2p + self.scoring + self.punting)], axis=1)
# set all the new columns to zero
df.loc[:, self.receiving + self.scoring2p + self.scoring + self.punting] = 0
return df
def md59d7339709d13dc7484e7090522596eda(self, df):
# Rename columns
cols = self.base + self.passing + self.rushing + self.punting
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.receiving + self.rush_sk + self.scoring2p + self.scoring)], axis=1)
# set all the new columns to zero
df.loc[:, self.receiving + self.rush_sk + self.scoring2p + self.scoring] = 0
return df
def md547963026aa9103eea8203b6717da2caf(self, df):
cols = self.base + self.passing + self.rushing + self.scoring2p + self.scoring
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.receiving + self.rush_sk + self.punting)], axis=1)
# set all the new columns to zero
df.loc[:, self.receiving + self.rush_sk + self.punting] = 0
return df
def md50feae896081ca775b997f372f93d1977(self, df):
cols = self.base + self.passing + self.rushing + self.rush_sk + self.scoring2p + self.scoring
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.receiving + self.punting)], axis=1)
# set all the new columns to zero
df.loc[:, self.receiving + self.punting] = 0
return df
def md5366e35869d52cf189f6575ef82c562e1(self, df):
# Rename columns
cols = self.base + self.passing + self.rushing + self.scoring + self.rush_sk
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.receiving + self.scoring2p + self.punting)], axis=1)
# set all the new columns to zero
df.loc[:, self.receiving + self.scoring2p + self.punting] = 0
return df
def md5c34721f06f1a5aad95fab7fc16577538(self, df):
# Rename columns
cols = self.base + self.passing + self.rushing + self.scoring + self.punting
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.receiving + self.scoring2p + self.rush_sk)], axis=1)
# set all the new columns to zero
df.loc[:, self.receiving + self.scoring2p + self.rush_sk] = 0
return df
def md5afa7cf6859400c6023d114abc175c24d(self, df):
# rename cols
cols = self.base + self.passing + self.rushing + self.receiving
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.rush_sk + self.scoring2p + self.scoring + self.punting)],
axis=1)
# set all the new columns to zero
df.loc[:, self.rush_sk + self.scoring2p + self.scoring + self.punting] = 0
return df
def md560befa83b7115d584e02dea9908a707d(self, df):
# pandas on travis loads the above md5afa7cf6859400c6023d114abc175c24d with 56 cols vs 31 that most users
# will get locally so we create another lookup function w the hash of all 56 and slice it down to size.
df = df.iloc[:, :31]
# rename cols
cols = self.base + self.passing + self.rushing + self.receiving
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.rush_sk + self.scoring2p + self.scoring + self.punting)],
axis=1)
# set all the new columns to zero
df.loc[:, self.rush_sk + self.scoring2p + self.scoring + self.punting] = 0
return df
def md52451894bb088c27b0a02ad350e35b9ad(self, df):
# rename cols
cols = self.base + self.passing + self.rushing + self.receiving + self.scoring
df.columns = cols
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.rush_sk + self.scoring2p + self.punting)], axis=1)
# set all the new columns to zero
df.loc[:, self.rush_sk + self.scoring2p + self.punting] = 0
return df
def md5e22db471405382c6d4e868c4d29d9cb5(self, df):
# rename cols
cols = self.base + self.passing + self.rushing + self.receiving + self.scoring + self.rush_sk
df.columns = cols
# add missing cols
df = pd.concat([df, | pd.DataFrame(columns=self.punting + self.scoring2p) | pandas.DataFrame |
import pandas as pd
class FeatureExtractor():
def __init__(self):
pass
def fit(self, X_df, y):
pass
def transform(self, X_df):
X_df.index = range(len(X_df))
X_df_new = pd.concat(
[X_df.get(['instant_t', 'windspeed', 'latitude', 'longitude',
'hemisphere', 'Jday_predictor', 'initial_max_wind',
'max_wind_change_12h', 'dist2land']),
| pd.get_dummies(X_df.nature, prefix='nature', drop_first=True) | pandas.get_dummies |
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
# process_texas_7k_data.py: adaption from default Prescient script to process the
# Texas 7k data. Attemped to make general where possible to allow possibility of
# different future data.
# Author: <NAME>
# Email: <EMAIL>
# Date: July 27, 2021
import os
import pandas as pd
import numpy as np
import glob
# hard coded the zones here
tx_zones = ["Coast", "East", "Far_West", "North", "North_Central", "South_Central",
"South", "West"]
def gmlc_to_prescient(source, aggregate=False, forecast_error=False):
"""
This takes the Texas-7k time series data and
puts it into the format required for prescient
:param source: options are WIND, PV, RTPV, or Hydro
:param aggregate: Aggregate all sites within the file or not?
:return: writes csv files of forecast/actual in prescient format
"""
# Find the files you want, forecast and actual:
data_folder = os.path.join(path,source)
print(data_folder)
forecast_file = glob.glob(os.path.join(data_folder,'DAY_AHEAD*'))[0]
actual_file = glob.glob(os.path.join(data_folder,'REAL_TIME*'))[0]
# Read in forecast data, identify site names, collect datetime
forecast = pd.read_csv(forecast_file)
site_list = forecast.columns.values.tolist()[4:forecast.shape[1]]
dt = pd.to_datetime({'year':forecast.Year, 'month':forecast.Month, 'day':forecast.Day, 'hour':forecast.Period})
# Read in actual data, create 5-min datetime, re-sample to hourly
if forecast_error == True:
"""
actual_raw = pd.read_csv(actual_file)
dt_5min = pd.date_range(dt[0], periods=actual_raw.shape[0], freq='5Min')
actual = actual_raw[site_list]
actual = actual.assign(time5min = dt_5min.values)
actual = actual.set_index('time5min')
actual = actual.resample('H').mean()
actual = actual.reset_index()
"""
actual = pd.read_csv(actual_file)
else:
actual = forecast.copy()
# If you want to combine all sites (or regions, whatever), write one file for all data:
if aggregate == True:
agg_forecast = forecast[site_list].sum(axis=1)
agg_actual = actual[site_list].sum(axis=1)
prescient_format = pd.DataFrame({'datetime': dt, 'forecasts': agg_forecast, 'actuals': agg_actual})
prescient_format = prescient_format[['datetime', 'forecasts', 'actuals']]
prescient_format.to_csv(os.path.join(write_path,source + '_forecasts_actuals' + '.csv'), index=False)
# If not, write separate files for each site
elif aggregate == False:
for site in site_list:
prescient_format = | pd.DataFrame({'datetime':dt, 'forecasts':forecast[site], 'actuals':actual[site]}) | pandas.DataFrame |
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.5** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # Assignment 3 - More Pandas
# This assignment requires more individual learning then the last one did - you are encouraged to check out the [pandas documentation](http://pandas.pydata.org/pandas-docs/stable/) to find functions or methods you might not have used yet, or ask questions on [Stack Overflow](http://stackoverflow.com/) and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff.
# ### Question 1 (20%)
# Load the energy data from the file `Energy Indicators.xls`, which is a list of indicators of [energy supply and renewable electricity production](Energy%20Indicators.xls) from the [United Nations](http://unstats.un.org/unsd/environment/excel_file_tables/2013/Energy%20Indicators.xls) for the year 2013, and should be put into a DataFrame with the variable name of **energy**.
#
# Keep in mind that this is an Excel file, and not a comma separated values file. Also, make sure to exclude the footer and header information from the datafile. The first two columns are unneccessary, so you should get rid of them, and you should change the column labels so that the columns are:
#
# `['Country', 'Energy Supply', 'Energy Supply per Capita', '% Renewable']`
#
# Convert `Energy Supply` to gigajoules (there are 1,000,000 gigajoules in a petajoule). For all countries which have missing data (e.g. data with "...") make sure this is reflected as `np.NaN` values.
#
# Rename the following list of countries (for use in later questions):
#
# ```"Republic of Korea": "South Korea",
# "United States of America": "United States",
# "United Kingdom of Great Britain and Northern Ireland": "United Kingdom",
# "China, Hong Kong Special Administrative Region": "Hong Kong"```
#
# There are also several countries with numbers and/or parenthesis in their name. Be sure to remove these,
#
# e.g.
#
# `'Bolivia (Plurinational State of)'` should be `'Bolivia'`,
#
# `'Switzerland17'` should be `'Switzerland'`.
#
# <br>
#
# Next, load the GDP data from the file `world_bank.csv`, which is a csv containing countries' GDP from 1960 to 2015 from [World Bank](http://data.worldbank.org/indicator/NY.GDP.MKTP.CD). Call this DataFrame **GDP**.
#
# Make sure to skip the header, and rename the following list of countries:
#
# ```"Korea, Rep.": "South Korea",
# "Iran, Islamic Rep.": "Iran",
# "Hong Kong SAR, China": "Hong Kong"```
#
# <br>
#
# Finally, load the [Sciamgo Journal and Country Rank data for Energy Engineering and Power Technology](http://www.scimagojr.com/countryrank.php?category=2102) from the file `scimagojr-3.xlsx`, which ranks countries based on their journal contributions in the aforementioned area. Call this DataFrame **ScimEn**.
#
# Join the three datasets: GDP, Energy, and ScimEn into a new dataset (using the intersection of country names). Use only the last 10 years (2006-2015) of GDP data and only the top 15 countries by Scimagojr 'Rank' (Rank 1 through 15).
#
# The index of this DataFrame should be the name of the country, and the columns should be ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations',
# 'Citations per document', 'H index', 'Energy Supply',
# 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008',
# '2009', '2010', '2011', '2012', '2013', '2014', '2015'].
#
# *This function should return a DataFrame with 20 columns and 15 entries.*
# In[14]:
import numpy as np
import pandas as pd
import re
def answer_one():
energy = pd.read_excel('Energy Indicators.xls',skiprows = 17, skipfooter = 38, na_values = "...")
energy.drop(energy.columns[[0,1]],axis=1,inplace=True)
energy.columns=['Country','Energy Supply','Energy Supply per Capita','% Renewable']
energy['Energy Supply']*=1000000
energy['Country'] = energy['Country'].str.replace("[0-9()]+$", "")
#energy['Country'] = energy['Country'].str.replace('()\d.+', '')
#energy['Country'] = energy['Country'].str.replace('()\(.+\)', '')
#energy['Country']=energy['Country'].str.replace('()\(.+\)|()\d.+', '')
lst_countries = {"Republic of Korea": "South Korea","United States of America": "United States",
"United Kingdom of Great Britain and Northern Ireland": "United Kingdom",
"China, Hong Kong Special Administrative Region": "Hong Kong"}
for oldname in lst_countries.keys():
energy.loc[energy['Country']== oldname,'Country'] = lst_countries[oldname]
energy['Country'] = energy['Country'].apply(lambda x: re.split('\(',x)[0]).str.strip()
GDP = pd.read_csv('world_bank.csv',skiprows = 4)
GDP = GDP.rename(columns={'Country Name': 'Country'})
lst_countries2 = {"Korea, Rep.": "South Korea",
"Iran, Islamic Rep.": "Iran",
"Hong Kong SAR, China": "Hong Kong"}
for oldname in lst_countries2.keys():
GDP.loc[GDP['Country']== oldname,'Country'] = lst_countries2[oldname]
ScimEn = pd.read_excel("scimagojr-3.xlsx")
df_merge1=pd.merge(energy,GDP,on='Country')
df_merged_full=pd.merge(df_merge1,ScimEn,on='Country')
df_full_sorted=df_merged_full.sort(['Rank'],ascending=True)
df_top15=df_full_sorted[df_full_sorted['Rank']<16]
column_names=[i for i in df_top15.columns.values]
column_years=[j for j in column_names if j.isdigit() ]
last_ten_years = df_top15[['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']]
result = pd.concat([df_top15.drop(column_years,axis = 1),last_ten_years],axis = 1).drop(['Country Code','Indicator Name','Indicator Code','Country'],axis=1)
result.index = df_top15['Country']
print(result.shape)
return result
answer_one()
# ### Question 2 (6.6%)
# The previous question joined three datasets then reduced this to just the top 15 entries. When you joined the datasets, but before you reduced this to the top 15 items, how many entries did you lose?
#
# *This function should return a single number.*
# In[9]:
get_ipython().run_cell_magic('HTML', '', '<svg width="800" height="300">\n <circle cx="150" cy="180" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="blue" />\n <circle cx="200" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="red" />\n <circle cx="100" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="green" />\n <line x1="150" y1="125" x2="300" y2="150" stroke="black" stroke-width="2" fill="black" stroke-dasharray="5,3"/>\n <text x="300" y="165" font-family="Verdana" font-size="35">Everything but this!</text>\n</svg>')
# In[34]:
def answer_two():
energy = pd.read_excel('Energy Indicators.xls',skiprows = 17, skipfooter = 38, na_values = "...")
energy.drop(energy.columns[[0,1]],axis=1,inplace=True)
energy.columns=['Country','Energy Supply','Energy Supply per Capita','% Renewable']
energy['Energy Supply']*=1000000
energy['Country'] = energy['Country'].str.replace("[0-9()]+$", "")
#energy['Country'] = energy['Country'].str.replace('()\d.+', '')
#energy['Country'] = energy['Country'].str.replace('()\(.+\)', '')
#energy['Country']=energy['Country'].str.replace('()\(.+\)|()\d.+', '')
lst_countries = {"Republic of Korea": "South Korea","United States of America": "United States",
"United Kingdom of Great Britain and Northern Ireland": "United Kingdom",
"China, Hong Kong Special Administrative Region": "Hong Kong"}
for oldname in lst_countries.keys():
energy.loc[energy['Country']== oldname,'Country'] = lst_countries[oldname]
energy['Country'] = energy['Country'].apply(lambda x: re.split('\(',x)[0]).str.strip()
GDP = pd.read_csv('world_bank.csv',skiprows = 4)
GDP = GDP.rename(columns={'Country Name': 'Country'})
lst_countries2 = {"Korea, Rep.": "South Korea",
"Iran, Islamic Rep.": "Iran",
"Hong Kong SAR, China": "Hong Kong"}
for oldname in lst_countries2.keys():
GDP.loc[GDP['Country']== oldname,'Country'] = lst_countries2[oldname]
ScimEn = pd.read_excel("scimagojr-3.xlsx")
df_merge1= | pd.merge(energy,GDP,on='Country') | pandas.merge |
import io
import numpy as np
import pytest
from pandas.compat._optional import VERSIONS
from pandas import (
DataFrame,
date_range,
read_csv,
read_excel,
read_feather,
read_json,
read_parquet,
read_pickle,
read_stata,
read_table,
)
import pandas._testing as tm
from pandas.util import _test_decorators as td
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
text = str(df1.to_csv(index=False)).encode()
@pytest.fixture
def cleared_fs():
fsspec = pytest.importorskip("fsspec")
memfs = fsspec.filesystem("memory")
yield memfs
memfs.store.clear()
def test_read_csv(cleared_fs):
with cleared_fs.open("test/test.csv", "wb") as w:
w.write(text)
df2 = read_csv("memory://test/test.csv", parse_dates=["dt"])
tm.assert_frame_equal(df1, df2)
def test_reasonable_error(monkeypatch, cleared_fs):
from fsspec import registry
from fsspec.registry import known_implementations
registry.target.clear()
with pytest.raises(ValueError, match="nosuchprotocol"):
read_csv("nosuchprotocol://test/test.csv")
err_msg = "test error message"
monkeypatch.setitem(
known_implementations,
"couldexist",
{"class": "unimportable.CouldExist", "err": err_msg},
)
with pytest.raises(ImportError, match=err_msg):
read_csv("couldexist://test/test.csv")
def test_to_csv(cleared_fs):
df1.to_csv("memory://test/test.csv", index=True)
df2 = read_csv("memory://test/test.csv", parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@pytest.mark.parametrize("ext", ["xls", "xlsx"])
def test_to_excel(cleared_fs, ext):
if ext == "xls":
pytest.importorskip("xlwt")
else:
pytest.importorskip("openpyxl")
path = f"memory://test/test.{ext}"
df1.to_excel(path, index=True)
df2 = read_excel(path, parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@pytest.mark.parametrize("binary_mode", [False, True])
def test_to_csv_fsspec_object(cleared_fs, binary_mode):
fsspec = pytest.importorskip("fsspec")
path = "memory://test/test.csv"
mode = "wb" if binary_mode else "w"
fsspec_object = fsspec.open(path, mode=mode).open()
df1.to_csv(fsspec_object, index=True)
assert not fsspec_object.closed
fsspec_object.close()
mode = mode.replace("w", "r")
fsspec_object = fsspec.open(path, mode=mode).open()
df2 = read_csv(
fsspec_object,
parse_dates=["dt"],
index_col=0,
)
assert not fsspec_object.closed
fsspec_object.close()
tm.assert_frame_equal(df1, df2)
def test_csv_options(fsspectest):
df = DataFrame({"a": [0]})
df.to_csv(
"testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False
)
assert fsspectest.test[0] == "csv_write"
read_csv("testmem://test/test.csv", storage_options={"test": "csv_read"})
assert fsspectest.test[0] == "csv_read"
def test_read_table_options(fsspectest):
# GH #39167
df = DataFrame({"a": [0]})
df.to_csv(
"testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False
)
assert fsspectest.test[0] == "csv_write"
read_table("testmem://test/test.csv", storage_options={"test": "csv_read"})
assert fsspectest.test[0] == "csv_read"
@pytest.mark.parametrize("extension", ["xlsx", "xls"])
def test_excel_options(fsspectest, extension):
if extension == "xls":
pytest.importorskip("xlwt")
else:
pytest.importorskip("openpyxl")
df = DataFrame({"a": [0]})
path = f"testmem://test/test.{extension}"
df.to_excel(path, storage_options={"test": "write"}, index=False)
assert fsspectest.test[0] == "write"
read_excel(path, storage_options={"test": "read"})
assert fsspectest.test[0] == "read"
@td.skip_if_no("fastparquet")
def test_to_parquet_new_file(cleared_fs):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df1.to_parquet(
"memory://test/test.csv", index=True, engine="fastparquet", compression=None
)
@td.skip_if_no("pyarrow", min_version="2")
def test_arrowparquet_options(fsspectest):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df = DataFrame({"a": [0]})
df.to_parquet(
"testmem://test/test.csv",
engine="pyarrow",
compression=None,
storage_options={"test": "parquet_write"},
)
assert fsspectest.test[0] == "parquet_write"
read_parquet(
"testmem://test/test.csv",
engine="pyarrow",
storage_options={"test": "parquet_read"},
)
assert fsspectest.test[0] == "parquet_read"
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
@td.skip_if_no("fastparquet")
def test_fastparquet_options(fsspectest):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df = DataFrame({"a": [0]})
df.to_parquet(
"testmem://test/test.csv",
engine="fastparquet",
compression=None,
storage_options={"test": "parquet_write"},
)
assert fsspectest.test[0] == "parquet_write"
read_parquet(
"testmem://test/test.csv",
engine="fastparquet",
storage_options={"test": "parquet_read"},
)
assert fsspectest.test[0] == "parquet_read"
@pytest.mark.single_cpu
@td.skip_if_no("s3fs")
def test_from_s3_csv(s3_resource, tips_file, s3so):
tm.assert_equal(
read_csv("s3://pandas-test/tips.csv", storage_options=s3so), read_csv(tips_file)
)
# the following are decompressed by pandas, not fsspec
tm.assert_equal(
| read_csv("s3://pandas-test/tips.csv.gz", storage_options=s3so) | pandas.read_csv |
#! /usr/bin/env python
from datetime import datetime, timedelta
import hb_config
import mwapi
from mwapi.errors import APIError
import requests
from requests_oauthlib import OAuth1
import pandas as pd
import json
#TODO
#encapsulate what's in MAIN
#pull hard-coded vals to hb_config
#docstrings
#rmv my dumb API function
#code from https://.com/mediawiki-utilities/python-mwapi
def get_template_mems(template):
# If passed a `continuation` parameter, returns an iterable over a continued query.
# On each iteration, a new request is made for the next portion of the results.
continued = session.get(
formatversion=2,
action='query',
generator='transcludedin',
gtinamespace = "0",
gtiprop= "title",
gtishow = "!redirect",
titles= template,
gtilimit=500, # 100 results per request
continuation=True)
pages = []
try:
for portion in continued:
if 'query' in portion:
for page in portion['query']['pages']:
pages.append(page['title'])
else:
print("MediaWiki returned empty result batch.")
except APIError as error:
raise ValueError(
"MediaWiki returned an error:", str(error)
)
return pages
def api_call(endpoint, parameters): #I don't need this
try:
call = requests.get(endpoint, params=parameters)
response = call.json()
except:
response = None
return response
def get_latest_rev(page_title):
#https://en.wikipedia.org/w/api.php?action=parse&prop=sections&format=json&formatversion=2&page=Whidbey_Island
ENDPOINT = 'https://en.wikipedia.org/w/api.php'
params = {'action' : 'query',
'prop' : 'revisions',
'titles' : page_title,
'format' : 'json',
'formatversion' : 2,
}
page_data = api_call(ENDPOINT, params)
# pprint(page_data)
try:
latest_rev = page_data['query']['pages'][0]['revisions'][0]['revid']
except:
print("unable to retrieve latest revision for " + page_title)
latest_rev = None
return latest_rev
def get_quality_score(revision):
#https://ores.wikimedia.org/v3/scores/enwiki/866126465/wp10?features=true
ENDPOINT = 'https://ores.wikimedia.org/v3/scores/enwiki/'
params = {'models' : 'wp10',
'revids' : revision,
}
page_data = api_call(ENDPOINT, params)
# pprint(page_data)
try:
prediction = page_data['enwiki']['scores'][str(revision)]['wp10']['score']['prediction']
# print(prediction)
except:
print("unable to retrieve ORES score for " + str(revision))
prediction = None
return prediction
def get_pageviews(article_params):
#sample https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia.org/all-access/user/Zeng_Guang/daily/20200314/20200314
q_template= "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia.org/all-access/user/{title}/daily/{startdate}/{enddate}"
q_string = q_template.format(**article_params)
# print(q_string)
# r = requests.get(q_string)
r = requests.get(
url = q_string,
headers = {'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, <EMAIL>"},
)
# print(r.headers)
# print(r.text)
# print(r.url)
response = r.json()
# print(response)
try:
views = response['items'][0]['views']
except:
views = None
return views
def get_yesterdates():
"""
Returns month, day year for yesterday; month and day for day before
"""
date_parts = {'year': datetime.strftime(datetime.now() - timedelta(1), '%Y'),
'month' : datetime.strftime(datetime.now() - timedelta(1), '%m'),
'day': datetime.strftime(datetime.now() - timedelta(1), '%d'),
'month2' : datetime.strftime(datetime.now() - timedelta(2), '%m'),
'day2': datetime.strftime(datetime.now() - timedelta(2), '%d'),
}
return date_parts
def get_total_pageviews(df, column_key):
"""
Return sum of numeric column from dataframe based on column title
"""
total_views = df[column_key].sum()
return total_views
def format_row(rank, title, views, prediction, row_template):
table_row = {'view rank': rank,
'title' : title.replace("_"," "),
'views' : views,
'prediction' : prediction,
}
row = row_template.format(**table_row)
# print(row)
return(row)
def get_token(auth1):
"""
Accepts an auth object for a user
Returns an edit token for the specified wiki
"""
result = requests.get(
url="https://en.wikipedia.org/w/api.php", #TODO add to config
params={
'action': "query",
'meta': "tokens",
'type': "csrf",
'format': "json"
},
headers={'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, <EMAIL>"}, #TODO add to config
auth=auth1,
).json()
# print(result)
edit_token = result['query']['tokens']['csrftoken']
# print(edit_token)
return(edit_token)
def publish_report(output, edit_sum, auth1, edit_token):
"""
Accepts the page text, credentials and edit token
Publishes the formatted page text to the specified wiki
"""
response = requests.post(
url = "https://en.wikipedia.org/w/api.php", #TODO add to config
data={
'action': "edit",
'title': "Wikipedia:WikiProject_COVID-19/Article_report", #TODO add to config
'section': "1",
# 'summary': edit_sum,
'summary': edit_sum,
'text': output,
'bot': 1,
'token': edit_token,
'format': "json"
},
headers={'User-Agent': "<EMAIL>"}, #TODO add to config
auth=auth1
)
if __name__ == "__main__":
auth1 = OAuth1("b5d87cbe96174f9435689a666110159c",
hb_config.client_secret,
"<KEY>",
hb_config.resource_owner_secret)
session = mwapi.Session('https://en.wikipedia.org/', user_agent="hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, <EMAIL>") #add ua to config
#get yesterday's date info for queries and reporting
date_parts = get_yesterdates()
cat = 'Template:COVID-19_pandemic'
mems = get_template_mems(cat)
# print(mems)
#put these in a dataframe
df_pandemic = | pd.DataFrame(mems) | pandas.DataFrame |
"""
this is compilation of functions to analyse BEAM-related data for NYC simulation
"""
from urllib.error import HTTPError
import matplotlib.pyplot as plt
import numpy as np
import time
import datetime as dt
import pandas as pd
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from io import StringIO
def get_output_path_from_s3_url(s3_url):
"""
transform s3 output path (from beam runs spreadsheet) into path to s3 output
that may be used as part of path to the file.
s3path = get_output_path_from_s3_url(s3url)
beam_log_path = s3path + '/beamLog.out'
"""
return s3_url \
.strip() \
.replace("s3.us-east-2.amazonaws.com/beam-outputs/index.html#", "beam-outputs.s3.amazonaws.com/")
def parse_config(s3url, complain=True):
"""
parse beam config of beam run.
:param s3url: url to s3 output
:param complain: it will complain if there are many config values found with the same name
:return: dictionary config key -> config value
"""
s3path = get_output_path_from_s3_url(s3url)
url = s3path + "/fullBeamConfig.conf"
config = urllib.request.urlopen(url)
config_keys = ["flowCapacityFactor", "speedScalingFactor", "quick_fix_minCarSpeedInMetersPerSecond",
"activitySimEnabled", "transitCapacity",
"minimumRoadSpeedInMetersPerSecond", "fractionOfInitialVehicleFleet",
"agentSampleSizeAsFractionOfPopulation",
"simulationName", "directory", "generate_secondary_activities", "lastIteration",
"fractionOfPeopleWithBicycle",
"parkingStallCountScalingFactor", "parkingPriceMultiplier", "parkingCostScalingFactor", "queryDate",
"transitPrice", "transit_crowding", "transit_crowding_percentile",
"maxLinkLengthToApplySpeedScalingFactor", "max_destination_distance_meters",
"max_destination_choice_set_size",
"transit_crowding_VOT_multiplier", "transit_crowding_VOT_threshold",
"activity_file_path", "intercept_file_path", "additional_trip_utility",
"ModuleProbability_1", "ModuleProbability_2", "ModuleProbability_3", "ModuleProbability_4",
"BUS-DEFAULT", "RAIL-DEFAULT", "SUBWAY-DEFAULT"]
intercept_keys = ["bike_intercept", "car_intercept", "drive_transit_intercept", "ride_hail_intercept",
"ride_hail_pooled_intercept", "ride_hail_transit_intercept", "walk_intercept",
"walk_transit_intercept", "transfer"]
config_map = {}
default_value = ""
for conf_key in config_keys:
config_map[conf_key] = default_value
def set_value(key, line_value):
value = line_value.strip().replace("\"", "")
if key not in config_map:
config_map[key] = value
else:
old_val = config_map[key]
if old_val == default_value or old_val.strip() == value.strip():
config_map[key] = value
else:
if complain:
print("an attempt to rewrite config value with key:", key)
print(" value in the map \t", old_val)
print(" new rejected value\t", value)
physsim_names = ['JDEQSim', 'BPRSim', 'PARBPRSim', 'CCHRoutingAssignment']
def look_for_physsim_type(config_line):
for physsim_name in physsim_names:
if 'name={}'.format(physsim_name) in config_line:
set_value("physsim_type", "physsim_type = {}".format(physsim_name))
for b_line in config.readlines():
line = b_line.decode("utf-8").strip()
look_for_physsim_type(line)
for ckey in config_keys:
if ckey + "=" in line or ckey + "\"=" in line or '"' + ckey + ":" in line:
set_value(ckey, line)
for ikey in intercept_keys:
if ikey in line:
set_value(ikey, line)
return config_map
def get_from_s3(s3url, file_name,
s3_additional_output='scripts_output'):
s3path = get_output_path_from_s3_url(s3url)
path = "{}/{}/{}".format(s3path, s3_additional_output, file_name)
df = None
try:
df = pd.read_csv(path, low_memory=False)
except HTTPError:
print('File does not exist by path:', path)
return df
def plot_fake_real_walkers(title, fake_walkers, real_walkers, threshold):
fig, axs = plt.subplots(2, 2, figsize=(24, 4 * 2))
fig.tight_layout()
fig.subplots_adjust(wspace=0.05, hspace=0.2)
fig.suptitle(title, y=1.11)
ax1 = axs[0, 0]
ax2 = axs[0, 1]
fake_walkers['length'].hist(bins=50, ax=ax1, alpha=0.3, label='fake walkers')
real_walkers['length'].hist(bins=50, ax=ax1, alpha=0.3, label='real walkers')
ax1.legend(loc='upper right', prop={'size': 10})
ax1.set_title("Trip length histogram. Fake vs Real walkers. Min length of trip is {0}".format(threshold))
ax1.axvline(5000, color="black", linestyle="--")
fake_walkers['length'].hist(bins=50, ax=ax2, log=True, alpha=0.3, label='fake walkers')
real_walkers['length'].hist(bins=50, ax=ax2, log=True, alpha=0.3, label='real walkers')
ax2.legend(loc='upper right', prop={'size': 10})
ax2.set_title(
"Trip length histogram. Fake vs Real walkers. Logarithmic scale. Min length of trip is {0}".format(threshold))
ax2.axvline(5000, color="black", linestyle="--")
ax1 = axs[1, 0]
ax2 = axs[1, 1]
long_real_walkers = real_walkers[real_walkers['length'] >= threshold]
number_of_top_alternatives = 5
walkers_by_alternative = long_real_walkers.groupby('availableAlternatives')['length'].count().sort_values(
ascending=False)
top_alternatives = set(
walkers_by_alternative.reset_index()['availableAlternatives'].head(number_of_top_alternatives))
for alternative in top_alternatives:
label = str(list(set(alternative.split(':')))).replace('\'', '')[1:-1]
selected = long_real_walkers[long_real_walkers['availableAlternatives'] == alternative]['length']
selected.hist(bins=50, ax=ax1, alpha=0.4, linewidth=4, label=label)
selected.hist(bins=20, ax=ax2, log=True, histtype='step', linewidth=4, label=label)
ax1.set_title("Length histogram of top {} alternatives of real walkers".format(number_of_top_alternatives))
ax1.legend(loc='upper right', prop={'size': 10})
ax2.set_title(
"Length histogram of top {} alternatives of real walkers. Logarithmic scale".format(number_of_top_alternatives))
ax2.legend(loc='upper right', prop={'size': 10})
def get_fake_real_walkers(s3url, iteration, threshold=2000):
s3path = get_output_path_from_s3_url(s3url)
events_file_path = s3path + "/ITERS/it.{0}/{0}.events.csv.gz".format(iteration)
start_time = time.time()
modechoice = pd.concat([df[(df['type'] == 'ModeChoice') | (df['type'] == 'Replanning')]
for df in pd.read_csv(events_file_path, low_memory=False, chunksize=100000)])
print("events file url:", events_file_path)
print("loading took %s seconds" % (time.time() - start_time))
count_of_replanning = modechoice[modechoice['type'] == 'Replanning'].shape[0]
modechoice = modechoice[modechoice['type'] == 'ModeChoice']
count_of_modechouces = len(modechoice) - count_of_replanning
walk_modechoice = modechoice[modechoice['mode'] == 'walk'].copy()
def is_real(row):
if row['length'] < threshold:
return True
alternatives = set(row['availableAlternatives'].split(':'))
if len(alternatives) == 0:
print('+1')
return False
if len(alternatives) == 1 and ('WALK' in alternatives or 'NaN' in alternatives):
return False
return True
walk_modechoice[['availableAlternatives']] = walk_modechoice[['availableAlternatives']].fillna('NaN')
walk_modechoice['isReal'] = walk_modechoice.apply(is_real, axis=1)
fake_walkers = walk_modechoice[~walk_modechoice['isReal']]
real_walkers = walk_modechoice[walk_modechoice['isReal']]
plot_fake_real_walkers(s3url, fake_walkers, real_walkers, threshold)
columns = ['real_walkers', 'real_walkers_ratio', 'fake_walkers', 'fake_walkers_ratio', 'total_modechoice']
values = [len(real_walkers), len(real_walkers) / count_of_modechouces,
len(fake_walkers), len(fake_walkers) / count_of_modechouces, count_of_modechouces]
walkers = pd.DataFrame(np.array([values]), columns=columns)
return walkers
def save_to_s3(s3url, df, file_name,
aws_access_key_id, aws_secret_access_key,
output_bucket='beam-outputs', s3_additional_output='scripts_output'):
import boto3
s3 = boto3.resource('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
require_string = 'index.html#'
if require_string not in s3url:
print(
's3url does not contain "{}". That means there is no way to save df. Cancelled.'.format(
require_string))
else:
df.to_csv(file_name)
folder_path = s3url.split('#')[1].strip()
out_path = "{}/{}/{}".format(folder_path, s3_additional_output, file_name)
s3.meta.client.upload_file(file_name, output_bucket, out_path)
print('saved to s3: ', out_path)
def read_traffic_counts(df):
df['date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, "%m/%d/%Y"))
df['hour_0'] = df['12:00-1:00 AM']
df['hour_1'] = df['1:00-2:00AM']
df['hour_2'] = df['2:00-3:00AM']
df['hour_3'] = df['2:00-3:00AM']
df['hour_4'] = df['3:00-4:00AM']
df['hour_5'] = df['4:00-5:00AM']
df['hour_6'] = df['5:00-6:00AM']
df['hour_7'] = df['6:00-7:00AM']
df['hour_8'] = df['7:00-8:00AM']
df['hour_9'] = df['9:00-10:00AM']
df['hour_10'] = df['10:00-11:00AM']
df['hour_11'] = df['11:00-12:00PM']
df['hour_12'] = df['12:00-1:00PM']
df['hour_13'] = df['1:00-2:00PM']
df['hour_14'] = df['2:00-3:00PM']
df['hour_15'] = df['3:00-4:00PM']
df['hour_16'] = df['4:00-5:00PM']
df['hour_17'] = df['5:00-6:00PM']
df['hour_18'] = df['6:00-7:00PM']
df['hour_19'] = df['7:00-8:00PM']
df['hour_20'] = df['8:00-9:00PM']
df['hour_21'] = df['9:00-10:00PM']
df['hour_22'] = df['10:00-11:00PM']
df['hour_23'] = df['11:00-12:00AM']
df = df.drop(['Date', '12:00-1:00 AM', '1:00-2:00AM', '2:00-3:00AM', '3:00-4:00AM', '4:00-5:00AM', '5:00-6:00AM',
'6:00-7:00AM', '7:00-8:00AM', '8:00-9:00AM',
'9:00-10:00AM', '10:00-11:00AM', '11:00-12:00PM', '12:00-1:00PM', '1:00-2:00PM', '2:00-3:00PM',
'3:00-4:00PM', '4:00-5:00PM', '5:00-6:00PM',
'6:00-7:00PM', '7:00-8:00PM', '8:00-9:00PM', '9:00-10:00PM', '10:00-11:00PM', '11:00-12:00AM'],
axis=1)
return df
def aggregate_per_hour(traffic_df, date):
wednesday_df = traffic_df[traffic_df['date'] == date]
agg_df = wednesday_df.groupby(['date']).sum()
agg_list = []
for i in range(0, 24):
xs = [i, agg_df['hour_%d' % i][0]]
agg_list.append(xs)
return pd.DataFrame(agg_list, columns=['hour', 'count'])
def plot_traffic_count(date):
# https://data.cityofnewyork.us/Transportation/Traffic-Volume-Counts-2014-2018-/ertz-hr4r
path_to_csv = 'https://data.cityofnewyork.us/api/views/ertz-hr4r/rows.csv?accessType=DOWNLOAD'
df = read_traffic_counts(pd.read_csv(path_to_csv))
agg_per_hour_df = aggregate_per_hour(df, date)
agg_per_hour_df.plot(x='hour', y='count', title='Date is %s' % date)
def get_volume_reference_values():
nyc_volumes_benchmark_date = '2018-04-11'
nyc_volumes_benchmark_raw = read_traffic_counts(
pd.read_csv('https://data.cityofnewyork.us/api/views/ertz-hr4r/rows.csv?accessType=DOWNLOAD'))
nyc_volumes_benchmark = aggregate_per_hour(nyc_volumes_benchmark_raw, nyc_volumes_benchmark_date)
return nyc_volumes_benchmark
def plot_simulation_volumes_vs_bench(s3url, iteration, ax, title="Volume SUM comparison with reference.",
simulation_volumes=None, s3path=None, nyc_volumes_reference_values=None):
if s3path is None:
s3path = get_output_path_from_s3_url(s3url)
if nyc_volumes_reference_values is None:
nyc_volumes_reference_values = get_volume_reference_values()
def calc_sum_of_link_stats(link_stats_file_path, chunksize=100000):
start_time = time.time()
df = pd.concat([df.groupby('hour')['volume'].sum() for df in
pd.read_csv(link_stats_file_path, low_memory=False, chunksize=chunksize)])
df = df.groupby('hour').sum().to_frame(name='sum')
# print("link stats url:", link_stats_file_path)
print("link stats downloading and calculation took %s seconds" % (time.time() - start_time))
return df
if simulation_volumes is None:
linkstats_path = s3path + "/ITERS/it.{0}/{0}.linkstats.csv.gz".format(iteration)
simulation_volumes = calc_sum_of_link_stats(linkstats_path)
color_reference = 'tab:red'
color_volume = 'tab:green'
ax1 = ax
ax1.set_title('{} iter {}'.format(title, iteration))
ax1.set_xlabel('hour of day')
ax1.plot(range(0, 24), nyc_volumes_reference_values['count'], color=color_reference, label="reference")
ax1.plot(np.nan, color=color_volume, label="simulation volume") # to have both legends on same axis
ax1.legend(loc="upper right")
ax1.xaxis.set_ticks(np.arange(0, 24, 1))
ax1.tick_params(axis='y', labelcolor=color_reference)
volume_per_hour = simulation_volumes[0:23]['sum']
volume_hours = list(volume_per_hour.index)
shifted_hours = list(map(lambda x: x + 1, volume_hours))
ax12 = ax1.twinx() # to plot things on the same graph but with different Y axis
ax12.plot(shifted_hours, volume_per_hour, color=color_volume)
ax12.tick_params(axis='y', labelcolor=color_volume)
return simulation_volumes
# index is hour
nyc_activity_ends_reference = [0.010526809, 0.007105842, 0.003006647, 0.000310397, 0.011508960, 0.039378258,
0.116178879, 0.300608907, 0.301269741, 0.214196234, 0.220456846, 0.237608230,
0.258382041, 0.277933413, 0.281891163, 0.308248524, 0.289517677, 0.333402259,
0.221353890, 0.140322664, 0.110115403, 0.068543370, 0.057286657, 0.011845660]
def plot_activities_ends_vs_bench(s3url, iteration, ax, ax2=None, title="Activity ends comparison.", population_size=1,
activity_ends=None, s3path=None):
if s3path is None:
s3path = get_output_path_from_s3_url(s3url)
def load_activity_ends(events_file_path, chunksize=100000):
start_time = time.time()
try:
df = pd.concat([df[df['type'] == 'actend']
for df in pd.read_csv(events_file_path, low_memory=False, chunksize=chunksize)])
except HTTPError:
raise NameError('can not download file by url:', events_file_path)
df['hour'] = (df['time'] / 3600).astype(int)
print("activity ends loading took %s seconds" % (time.time() - start_time))
return df
if activity_ends is None:
events_path = s3path + "/ITERS/it.{0}/{0}.events.csv.gz".format(iteration)
activity_ends = load_activity_ends(events_path)
color_act_ends = 'tab:blue'
ax.set_title('{} iter {} [{} total act ends]'.format(title, iteration, activity_ends.shape[0]))
ax.set_xlabel('hour of day')
ax.xaxis.set_ticks(np.arange(0, 24, 1))
act_ends_24 = activity_ends[activity_ends['hour'] <= 24].copy()
act_ends_total = act_ends_24.groupby('hour')['hour'].count() / population_size
act_ends_hours = list(act_ends_total.index)
def plot_act_ends(ax_to_plot, act_type):
df = act_ends_24[act_ends_24['actType'] == act_type].groupby('hour')['hour'].count() / population_size
ax_to_plot.plot(df.index, df, label='# of {} ends'.format(act_type))
def plot_benchmark_and_legend(ax_to_plot):
color_benchmark = 'black'
ax_to_plot.plot(np.nan, color=color_benchmark,
label='benchmark (right scale)') # to have both legends on same axis
ax_to_plot.legend(loc="upper right")
ax_to_plot.tick_params(axis='y', labelcolor=color_act_ends)
ax_twinx = ax_to_plot.twinx() # to plot things on the same graph but with different Y axis
ax_twinx.plot(range(0, 24), nyc_activity_ends_reference, color=color_benchmark)
ax_twinx.tick_params(axis='y', labelcolor=color_benchmark)
ax.plot(act_ends_hours, act_ends_total, color=color_act_ends, label='# of activity ends', linewidth=3)
plot_act_ends(ax, 'Work')
plot_act_ends(ax, 'Home')
plot_benchmark_and_legend(ax)
if ax2 is not None:
ax2.set_title('other activities')
ax2.set_xlabel('hour of day')
ax2.xaxis.set_ticks(np.arange(0, 24, 1))
plot_act_ends(ax2, 'Meal')
plot_act_ends(ax2, 'SocRec')
plot_act_ends(ax2, 'Shopping')
plot_act_ends(ax2, 'Other')
plot_benchmark_and_legend(ax2)
return activity_ends
def plot_volumes_comparison_on_axs(s3url, iteration, suptitle="", population_size=1,
simulation_volumes=None, activity_ends=None,
plot_simulation_volumes=True, plot_activities_ends=True):
fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(25, 7))
fig1.tight_layout(pad=0.1)
fig1.subplots_adjust(wspace=0.25, hspace=0.1)
plt.xticks(np.arange(0, 24, 2))
plt.suptitle(suptitle, y=1.05, fontsize=17)
if plot_simulation_volumes:
plot_simulation_volumes_vs_bench(s3url, iteration=iteration, ax=ax1,
title="Volume SUM comparison with benchmark.",
simulation_volumes=simulation_volumes)
if plot_activities_ends:
plot_activities_ends_vs_bench(s3url, iteration=iteration, ax=ax2, title="Activity ends comparison.",
population_size=population_size, activity_ends=activity_ends)
def read_nyc_ridership_counts_absolute_numbers_for_mta_comparison(s3url, iteration=0):
holland_tunnel = {1110292, 1110293, 1110294, 1110295, 540918, 540919, 782080, 782081}
linkoln_tunnel = {1057628, 1057629, 1057630, 1057631, 308, 309, 817812, 817813, 817814, 817815, 87180, 87181}
george_washingtone_bridge = {735454, 735455, 767820, 767821, 781014, 781015, 781086, 781087, 781156, 781157, 782128,
782129, 796856, 796857, 796858, 796859, 796870, 796871, 866324, 866325, 87174, 87175,
87176, 87177, 88110, 88111, 886008, 886009, 968272, 968273, 781094, 781095}
henry_hudson_bridge = {1681043, 1681042, 542015, 542014, 88230, 88231}
robert_f_kennedy_bridge = {1235912, 1235913, 1247588, 1247589, 21094, 21095, 23616, 23617, 29774, 29775, 30814,
30815, 763932, 763933, 782436, 782437, 782438, 782439, 782440, 782441, 782560, 782561,
782570, 782571, 782702, 782703, 782706, 782707, 782708, 782709, 782718, 782719, 870348,
870349, 782720, 782721, 782722, 782723, 782724, 782725, 782726, 782727, 782728, 782729,
782914, 782915, 853900, 853901, 1230075, 1233314, 1233315, 1299262, 1299263, 1299264,
1299265, 1299266, 1299267, 1299268, 1299269, 1299274, 1299275, 1299278, 1299279, 958834,
958835, 958836, 958837, 916655, 1041132, 1041133, 1078046, 1078047, 1078048, 1078049,
1078050, 1078051, 1078052, 1078053, 1078056, 1078057, 1078058, 1078059, 1078060, 1078061,
1089632, 1089633, 1089634, 1089635, 1101864, 1101865, 1101866, 1101867, 1230068, 1230069,
1230070, 1230071, 1230072, 1230073, 1230074, 916652, 916653, 916654, 757589, 757588,
853929, 853928, 779898, 779899, 1339888, 1339889, 1339890, 1339891, 1433020, 1433021,
154, 155, 731748, 731749, 731752, 731753, 731754, 731755, 731766, 731767, 731768, 731769,
731770, 731771, 731786, 731787, 853892, 853893, 868400, 868401, 868410, 868411}
queens_midtown_tunnel = {1367889, 1367888, 487778, 487779}
hugh_l_carey_tunnel = {1071576, 1071577, 1109400, 1109401, 13722, 13723, 1658828, 1658829, 19836, 19837}
bronx_whitestone_bridge = {62416, 62417, 729848, 729849, 765882, 765883, 853914, 853915}
throgs_neck_bridge = {1090614, 1090615, 1090616, 1090617, 1090618, 1090619, 765880, 765881}
varrazzano_narrows_bridge = {788119, 788118, 1341065, 1341064, 788122, 788123, 788140, 788141}
marine_parkwaygil_hodges_memorial_bridge = {1750240, 1750241, 53416, 53417, 732358, 732359, 761184, 761185, 761186,
761187, 793744, 793745}
cross_bay_veterans_memorial_bridge = {1139186, 1139187, 1139198, 1139199, 1139200, 1139201, 1139208, 1139209,
1139214, 1139215, 1139222, 1139223, 1139300, 1139301, 1139302, 1139303,
1517804, 1517805, 1517806, 1517807, 1517808, 1517809, 1743514, 1743515,
1749330, 1749331, 1749332, 1749333, 48132, 48133, 51618, 51619, 51620, 51621,
59452, 59453, 68364, 68365, 793786, 793787, 865036, 865037, 865060, 865061,
865062, 865063, 953766, 953767, 953768, 953769, 999610, 999611, 999626,
999627, 999628, 999629, 1297379}
mta_briges_tunnels_links = holland_tunnel \
.union(linkoln_tunnel) \
.union(george_washingtone_bridge) \
.union(henry_hudson_bridge) \
.union(robert_f_kennedy_bridge) \
.union(queens_midtown_tunnel) \
.union(hugh_l_carey_tunnel) \
.union(bronx_whitestone_bridge) \
.union(throgs_neck_bridge) \
.union(varrazzano_narrows_bridge) \
.union(marine_parkwaygil_hodges_memorial_bridge) \
.union(cross_bay_veterans_memorial_bridge)
s3path = get_output_path_from_s3_url(s3url)
events_file_path = "{0}/ITERS/it.{1}/{1}.events.csv.gz".format(s3path, iteration)
columns = ['type', 'person', 'vehicle', 'vehicleType', 'links', 'time', 'driver']
pte = pd.concat([df[(df['type'] == 'PersonEntersVehicle') | (df['type'] == 'PathTraversal')][columns]
for df in pd.read_csv(events_file_path, chunksize=100000, low_memory=False)])
print('read pev and pt events of shape:', pte.shape)
pev = pte[(pte['type'] == 'PersonEntersVehicle')][['type', 'person', 'vehicle', 'time']]
pte = pte[(pte['type'] == 'PathTraversal')][['type', 'vehicle', 'vehicleType', 'links', 'time', 'driver']]
walk_transit_modes = {'BUS-DEFAULT', 'RAIL-DEFAULT', 'SUBWAY-DEFAULT'}
drivers = set(pte[pte['vehicleType'].isin(walk_transit_modes)]['driver'])
pev = pev[~pev['person'].isin(drivers)]
def get_gtfs_agency(row):
veh_id = row['vehicle'].split(":")
if len(veh_id) > 1:
agency = veh_id[0]
return agency
return ""
def car_by_mta_bridges_tunnels(row):
if pd.isnull(row['links']):
return False
for link_str in row['links'].split(","):
link = int(link_str)
if link in mta_briges_tunnels_links:
return True
return False
pte['carMtaRelated'] = pte.apply(car_by_mta_bridges_tunnels, axis=1)
pte['gtfsAgency'] = pte.apply(get_gtfs_agency, axis=1)
vehicle_info = pte.groupby('vehicle')[['vehicleType', 'gtfsAgency']].first().reset_index()
pev_advanced = pd.merge(pev, vehicle_info, on='vehicle')
pev_advanced = pev_advanced.sort_values('time', ignore_index=True)
gtfs_agency_to_count = pev_advanced.groupby('gtfsAgency')['person'].count()
# calculate car
car_mode = {'Car', 'Car-rh-only', 'PHEV', 'BUS-DEFAULT'}
car_mta_related = pte[(pte['vehicleType'].isin(car_mode)) &
(pte['carMtaRelated'])]['time'].count()
transit_car_to_count = gtfs_agency_to_count.append(pd.Series([car_mta_related], index=['Car']))
# calculate subway
person_pevs = pev_advanced.groupby('person').agg(list)[['vehicleType', 'gtfsAgency']]
def calc_number_of_subway_trips(row):
vehicle_list = row['vehicleType']
count_of_trips = 0
last_was_subway = False
for vehicle in vehicle_list:
if vehicle == 'SUBWAY-DEFAULT':
if not last_was_subway:
count_of_trips = count_of_trips + 1
last_was_subway = True
else:
last_was_subway = False
return count_of_trips
person_pevs['subway_trips'] = person_pevs.apply(calc_number_of_subway_trips, axis=1)
subway_trips = person_pevs['subway_trips'].sum()
triptype_to_count = transit_car_to_count.append(pd.Series([subway_trips], index=['Subway']))
triptype_to_count = triptype_to_count.to_frame().reset_index()
print('calculated:\n', pev_advanced.groupby('vehicleType')['person'].count())
return triptype_to_count
def calculate_nyc_ridership_and_save_to_s3_if_not_calculated(s3url, iteration, aws_access_key_id, aws_secret_access_key,
force=False, output_bucket='beam-outputs'):
if force:
print('"force" set to True, so, ridership will be recalculated independent of it existence in s3')
else:
print('"force" set to False (by default) so, ridership will be calculated only if it does not exist in s3')
import boto3
s3 = boto3.resource('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
s3_additional_output = 'scripts_output'
ridership = None
require_string = 'index.html#'
if require_string not in s3url:
print(
's3url does not contain "{}". That means there is no way to save result of the function. Calculation '
'cancelled.'.format(
require_string))
else:
ridership_file_name = '{}.nyc_mta_ridership.csv.gz'.format(iteration)
folder_path = s3url.split('#')[1].strip()
s3path = get_output_path_from_s3_url(s3url)
path = "{}/{}/{}".format(s3path, s3_additional_output, ridership_file_name)
def calculate():
print("Ridership calculation...")
ridership_df = read_nyc_ridership_counts_absolute_numbers_for_mta_comparison(s3url, iteration)
ridership_df.to_csv(ridership_file_name)
out_path = "{}/{}/{}".format(folder_path, s3_additional_output, ridership_file_name)
s3.meta.client.upload_file(ridership_file_name, output_bucket, out_path)
print('\nuploaded\nto: backet {}, path {}\n\n'.format(output_bucket, out_path))
return ridership_df
if force:
ridership = calculate()
else:
try:
ridership = pd.read_csv(path, low_memory=False)
print("file exist with path '{}'".format(path))
except HTTPError:
print("Looks like file does not exits with path '{}'".format(path))
ridership = calculate()
return ridership
def calculate_ridership_and_fake_walkers_for_s3urls(s3urls, iteration, aws_access_key_id, aws_secret_access_key):
for s3url in s3urls:
print(s3url)
ridership = calculate_nyc_ridership_and_save_to_s3_if_not_calculated(s3url, iteration, aws_access_key_id,
aws_secret_access_key)
print('ridership done\n')
for s3url in s3urls:
print(s3url)
fake_walkers_file_name = "{}.fake_real_walkers.csv.gz".format(iteration)
walkers = get_from_s3(s3url, fake_walkers_file_name)
if walkers is None:
walkers = get_fake_real_walkers(s3url, iteration)
save_to_s3(s3url, walkers, fake_walkers_file_name, aws_access_key_id, aws_secret_access_key)
else:
print('file {} already exist for url {}'.format(fake_walkers_file_name, s3url))
print(walkers)
def read_nyc_gtfs_trip_id_to_route_id():
base_path = "https://beam-outputs.s3.us-east-2.amazonaws.com/new_city/newyork/gtfs_trips_only_per_agency/"
files = ['MTA_Bronx_20200121_trips.csv.gz', 'MTA_Brooklyn_20200118_trips.csv.gz',
'MTA_Manhattan_20200123_trips.csv.gz', 'MTA_Queens_20200118_trips.csv.gz',
'MTA_Staten_Island_20200118_trips.csv.gz', 'NJ_Transit_Bus_20200210_trips.csv.gz']
urls = map(lambda file_name: base_path + file_name, files)
trip_id_to_route_id = {}
for url in urls:
trips = pd.read_csv(url.strip(), low_memory=False)[['route_id', 'trip_id']]
for index, row in trips.iterrows():
trip_id_to_route_id[str(row['trip_id'])] = row['route_id']
print(len(trip_id_to_route_id))
return trip_id_to_route_id
def read_bus_ridership_by_route_and_hour(s3url, gtfs_trip_id_to_route_id=None, iteration=0):
if not gtfs_trip_id_to_route_id:
gtfs_trip_id_to_route_id = read_nyc_gtfs_trip_id_to_route_id()
s3path = get_output_path_from_s3_url(s3url)
events_file_path = "{0}/ITERS/it.{1}/{1}.events.csv.gz".format(s3path, iteration)
columns = ['type', 'person', 'vehicle', 'vehicleType', 'time', 'driver']
pte = pd.concat([df[(df['type'] == 'PersonEntersVehicle') | (df['type'] == 'PathTraversal')][columns]
for df in pd.read_csv(events_file_path, chunksize=100000, low_memory=False)])
print('read PEV and PT events of shape:', pte.shape)
pev = pte[(pte['type'] == 'PersonEntersVehicle')][['person', 'vehicle', 'time']]
pev['hour'] = pev['time'] // 3600
pte = pte[(pte['type'] == 'PathTraversal') & (pte['vehicleType'] == 'BUS-DEFAULT')]
drivers = set(pte['driver'])
pev = pev[~pev['person'].isin(drivers)]
print('got PEV {} and PT {}'.format(pev.shape, pte.shape))
def get_gtfs_agency_trip_id_route_id(row):
agency = ""
trip_id = ""
route_id = ""
veh_id = row['vehicle'].split(":")
if len(veh_id) > 1:
agency = veh_id[0]
trip_id = str(veh_id[1])
route_id = gtfs_trip_id_to_route_id.get(trip_id, "")
return [agency, trip_id, route_id]
pte[['gtfsAgency', 'gtfsTripId', 'gtfsRouteId']] = pte \
.apply(get_gtfs_agency_trip_id_route_id, axis=1, result_type="expand")
print('calculated gtfs agency, tripId and routeId')
columns = ['vehicleType', 'gtfsAgency', 'gtfsTripId', 'gtfsRouteId']
vehicle_info = pte.groupby('vehicle')[columns].first().reset_index()
pev = pd.merge(pev, vehicle_info, on='vehicle')
print('got advanced version of PEV:', pev.shape, 'with columns:', pev.columns)
walk_transit_modes = {'BUS-DEFAULT'} # ,'RAIL-DEFAULT', 'SUBWAY-DEFAULT'
bus_to_agency_to_trip_to_hour = pev[(pev['vehicleType'].isin(walk_transit_modes))] \
.groupby(['gtfsAgency', 'gtfsRouteId', 'hour'])['person'].count()
return bus_to_agency_to_trip_to_hour
def plot_nyc_ridership(s3url_to_ridership, function_get_run_name_from_s3url, names_to_plot_separately=None, multiplier=20, figsize=(20, 7)):
columns = ['date', 'subway', 'bus', 'rail', 'car', 'transit (bus + subway)']
suffix = '\n mta.info'
reference_mta_info = [['09 2020' + suffix, 1489413, 992200, 130600, 810144, 2481613],
['08 2020' + suffix, 1348202, 1305000, 94900, 847330, 2653202],
['07 2020' + suffix, 1120537, 1102200, 96500, 779409, 2222737],
['06 2020' + suffix, 681714, 741200, 56000, 582624, 1422914],
['05 2020' + suffix, 509871, 538800, 29200, 444179, 1048671],
['04 2020' + suffix, 516174, 495400, 24100, 342222, 1011574],
[' 2019' + suffix, 5491213, 2153913, 622000, 929951, 7645126]]
def get_graph_data_row_from_dataframe(triptype_to_count_df, run_name, agency_column='index', value_column='0'):
def get_agency_data(agency):
return triptype_to_count_df[triptype_to_count_df[agency_column] == agency][value_column].values[0]
def get_sum_agency_data(agencies):
agencies_sum = 0
for agency in agencies:
agencies_sum = agencies_sum + get_agency_data(agency)
return agencies_sum
mta_bus = get_sum_agency_data(['MTA_Bronx_20200121', 'MTA_Brooklyn_20200118',
'MTA_Manhattan_20200123', 'MTA_Queens_20200118',
'MTA_Staten_Island_20200118'])
mta_rail = get_sum_agency_data(['Long_Island_Rail_20200215',
'Metro-North_Railroad_20200215'])
mta_subway = get_agency_data('Subway')
car = get_agency_data('Car')
transit = mta_subway + mta_bus
return [run_name,
mta_subway * multiplier,
mta_bus * multiplier,
mta_rail * multiplier,
car * multiplier,
transit * multiplier]
graph_data = []
for s3url, triptype_to_count in s3url_to_ridership.items():
title = function_get_run_name_from_s3url(s3url)
row = get_graph_data_row_from_dataframe(triptype_to_count, title)
graph_data.append(row)
result = pd.DataFrame(graph_data, columns=columns)
reference_df = pd.DataFrame(reference_mta_info, columns=columns)
result = result.append(reference_df).groupby('date').sum()
def plot_bars(df, ax, ax_title, columns_to_plot):
df[columns_to_plot].plot(kind='bar', ax=ax)
# ax.grid('on', which='major', axis='y')
ax.set_title(ax_title)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.7))
fig, axs = plt.subplots(1, 1, sharex='all', figsize=figsize)
ax_main = axs
plot_bars(result, ax_main,
'reference from mta.info vs BEAM simulation\nrun data multiplied by {}'.format(multiplier),
['subway', 'bus', 'rail', 'car', 'transit (bus + subway)'])
if names_to_plot_separately:
def plot_bars_2(df, ax, ax_title, columns_to_plot):
df[columns_to_plot].plot(kind='bar', ax=ax)
ax.set_title(ax_title)
# ax.legend(loc='upper right')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.7))
result_t = result[['subway', 'bus', 'rail', 'car']].transpose()
fig, axs = plt.subplots(1, len(names_to_plot_separately), sharey='all', figsize=figsize)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
if len(names_to_plot_separately) == 1:
axs = [axs]
for (name, ax) in zip(names_to_plot_separately, axs):
selected_columns = []
for column in result_t.columns:
if str(column).startswith(name):
selected_columns.append(column)
plot_bars_2(result_t, ax, "", selected_columns)
plt.suptitle('reference from mta.info vs BEAM simulation\nrun data multiplied by {}'.format(20))
def read_ridership_from_s3_output(s3url, iteration):
ridership = None
s3_additional_output = 'scripts_output'
require_string = 'index.html#'
if require_string not in s3url:
print(
's3url does not contain "{}". That means there is no way read prepared output.'.format(require_string))
else:
ridership_file_name = '{}.nyc_mta_ridership.csv.gz'.format(iteration)
s3path = get_output_path_from_s3_url(s3url)
path = "{}/{}/{}".format(s3path, s3_additional_output, ridership_file_name)
try:
ridership = pd.read_csv(path, low_memory=False)
print("downloaded ridership from ", path)
except HTTPError:
print("Looks like file does not exits -> '{}'".format(path))
return ridership
def compare_riderships_vs_baserun_and_benchmark(title_to_s3url, iteration, s3url_base_run, date_to_calc_diff=None,
figsize=(20, 5), rot=15, suptitle="",
plot_columns=None, plot_reference=True):
columns = ['date', 'subway', 'bus', 'rail', 'car', 'transit']
suffix = '\n mta.info'
benchmark_mta_info = [['09 2020' + suffix, -72.90, -54.00, -78.86, -12.90, -68.42],
['08 2020' + suffix, -75.50, -40.00, -83.32, -08.90, -66.68],
['07 2020' + suffix, -79.60, -49.00, -83.91, -16.20, -71.90],
['06 2020' + suffix, -87.60, -66.00, -90.95, -37.40, -82.17],
['05 2020' + suffix, -90.70, -75.00, -95.00, -52.30, -86.89],
['04 2020' + suffix, -90.60, -77.00, -96.13, -63.20, -87.47]]
if not plot_columns:
plot_columns = columns[1:]
date_to_benchmark = {}
for row in benchmark_mta_info:
date_to_benchmark[row[0]] = row[1:]
print('reference dates:', date_to_benchmark.keys())
def column_name_to_passenger_multiplier(column_name):
if column_name == '0':
return 1
delimeter = '-'
if delimeter in column_name:
nums = column_name.split(delimeter)
return (int(nums[0]) + int(nums[1])) // 2
else:
return int(column_name)
def get_sum_of_passenger_per_trip(df, ignore_hour_0=True):
sum_df = df.sum()
total_sum = 0
for column in df.columns:
if column == 'hours':
continue
if ignore_hour_0 and column == '0':
continue
multiplier = column_name_to_passenger_multiplier(column)
total_sum = total_sum + sum_df[column] * multiplier
return total_sum
def get_car_bus_subway_trips(beam_s3url):
s3path = get_output_path_from_s3_url(beam_s3url)
def read_csv(filename):
file_url = s3path + "/ITERS/it.{0}/{0}.{1}.csv".format(iteration, filename)
try:
return pd.read_csv(file_url)
except HTTPError:
print('was not able to download', file_url)
sub_trips = read_csv('passengerPerTripSubway')
bus_trips = read_csv('passengerPerTripBus')
car_trips = read_csv('passengerPerTripCar')
rail_trips = read_csv('passengerPerTripRail')
sub_trips_sum = get_sum_of_passenger_per_trip(sub_trips, ignore_hour_0=True)
bus_trips_sum = get_sum_of_passenger_per_trip(bus_trips, ignore_hour_0=True)
car_trips_sum = get_sum_of_passenger_per_trip(car_trips, ignore_hour_0=False)
rail_trips_sum = get_sum_of_passenger_per_trip(rail_trips, ignore_hour_0=True)
return car_trips_sum, bus_trips_sum, sub_trips_sum, rail_trips_sum
(base_car, base_bus, base_sub, base_rail) = get_car_bus_subway_trips(s3url_base_run)
graph_data = []
for (run_title, s3url_run) in title_to_s3url:
(minus_car, minus_bus, minus_sub, minus_rail) = get_car_bus_subway_trips(s3url_run)
def calc_diff(base_run_val, minus_run_val):
return (minus_run_val - base_run_val) / base_run_val * 100
diff_transit = calc_diff(base_sub + base_bus + base_rail, minus_sub + minus_bus + minus_rail)
diff_sub = calc_diff(base_sub, minus_sub)
diff_bus = calc_diff(base_bus, minus_bus)
diff_car = calc_diff(base_car, minus_car)
diff_rail = calc_diff(base_rail, minus_rail)
graph_data.append(['{0}'.format(run_title), diff_sub, diff_bus, diff_rail, diff_car, diff_transit])
def plot_bars(df, ax, title, columns_to_plot):
df.groupby('date').sum()[columns_to_plot].plot(kind='bar', ax=ax, rot=rot)
ax.grid('on', which='major', axis='y')
ax.set_title(title)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.7))
if date_to_calc_diff:
fig, axs = plt.subplots(1, 2, sharey='all', figsize=figsize)
ax_main = axs[0]
else:
fig, axs = plt.subplots(1, 1, sharey='all', figsize=figsize)
ax_main = axs
fig.tight_layout(pad=0.1)
fig.subplots_adjust(wspace=0.25, hspace=0.1)
plt.suptitle('Comparison of difference vs baseline and vs real data from MTI.info\n{}'.format(suptitle), y=1.2,
fontsize=17)
result = pd.DataFrame(graph_data, columns=columns)
if plot_reference:
reference_df = pd.DataFrame(benchmark_mta_info, columns=columns)
result = result.append(reference_df)
plot_bars(result, ax_main, 'reference from mta.info vs BEAM simulation', plot_columns)
if date_to_calc_diff:
df_to_compare = pd.DataFrame(graph_data, columns=columns)
diff = df_to_compare[columns[1:]].sub(date_to_benchmark[date_to_calc_diff + suffix], axis=1)
diff[columns[0]] = df_to_compare[columns[0]]
plot_bars(diff, axs[1], 'runs minus reference at {}'.format(date_to_calc_diff), plot_columns)
def people_flow_in_cbd_s3(s3url, iteration):
s3path = get_output_path_from_s3_url(s3url)
events_file_path = s3path + "/ITERS/it.{0}/{0}.events.csv.gz".format(iteration)
return people_flow_in_cbd_file_path(events_file_path)
def people_flow_in_cbd_file_path(events_file_path, chunksize=100000):
events = pd.concat([events[events['type'] == 'PathTraversal'] for events in
pd.read_csv(events_file_path, low_memory=False, chunksize=chunksize)])
return people_flow_in_cdb(events)
def diff_people_flow_in_cbd_s3(s3url, iteration, s3url_base, iteration_base):
s3path = get_output_path_from_s3_url(s3url)
events_file_path = s3path + "/ITERS/it.{0}/{0}.events.csv.gz".format(iteration)
s3path_base = get_output_path_from_s3_url(s3url_base)
events_file_path_base = s3path_base + "/ITERS/it.{0}/{0}.events.csv.gz".format(iteration_base)
return diff_people_flow_in_cbd_file_path(events_file_path, events_file_path_base)
def diff_people_flow_in_cbd_file_path(events_file_path, events_file_path_base, chunksize=100000):
events = pd.concat([events[events['type'] == 'PathTraversal'] for events in
pd.read_csv(events_file_path, low_memory=False, chunksize=chunksize)])
events_base = pd.concat([events[events['type'] == 'PathTraversal'] for events in
pd.read_csv(events_file_path_base, low_memory=False, chunksize=chunksize)])
return diff_people_in(events, events_base)
def people_flow_in_cdb(df):
polygon = Polygon([
(-74.005088, 40.779100),
(-74.034957, 40.680314),
(-73.968867, 40.717604),
(-73.957924, 40.759091)
])
def inside(x, y):
point = Point(x, y)
return polygon.contains(point)
def num_people(row):
mode = row['mode']
if mode in ['walk', 'bike']:
return 1
elif mode == 'car':
return 1 + row['numPassengers']
else:
return row['numPassengers']
def benchmark():
data = """mode,Entering,Leaving
subway,2241712,2241712
car,877978,877978
bus,279735,279735
rail,338449,338449
ferry,66932,66932
bike,33634,33634
tram,3528,3528
"""
return pd.read_csv(StringIO(data)).set_index('mode')
f = df[(df['type'] == 'PathTraversal')][['mode', 'numPassengers', 'startX', 'startY', 'endX', 'endY']].copy(
deep=True)
f['numPeople'] = f.apply(lambda row: num_people(row), axis=1)
f = f[f['numPeople'] > 0]
f['startIn'] = f.apply(lambda row: inside(row['startX'], row['startY']), axis=1)
f['endIn'] = f.apply(lambda row: inside(row['endX'], row['endY']), axis=1)
f['numIn'] = f.apply(lambda row: row['numPeople'] if not row['startIn'] and row['endIn'] else 0, axis=1)
s = f.groupby('mode')[['numIn']].sum()
b = benchmark()
t = pd.concat([s, b], axis=1)
t.fillna(0, inplace=True)
t['percentIn'] = t['numIn'] * 100 / t['numIn'].sum()
t['percent_ref'] = t['Entering'] * 100 / t['Entering'].sum()
t = t[['numIn', 'Entering', 'percentIn', 'percent_ref']]
t['diff'] = t['percentIn'] - t['percent_ref']
t['diff'].plot(kind='bar', title="Diff: current - reference, %", figsize=(7, 5), legend=False, fontsize=12)
t.loc["Total"] = t.sum()
return t
def get_people_in(df):
polygon = Polygon([
(-74.005088, 40.779100),
(-74.034957, 40.680314),
(-73.968867, 40.717604),
(-73.957924, 40.759091)
])
def inside(x, y):
point = Point(x, y)
return polygon.contains(point)
def num_people(row):
mode = row['mode']
if mode in ['walk', 'bike']:
return 1
elif mode == 'car':
return 1 + row['numPassengers']
else:
return row['numPassengers']
f = df[(df['type'] == 'PathTraversal') & (df['mode'].isin(['car', 'bus', 'subway']))][
['mode', 'numPassengers', 'startX', 'startY', 'endX', 'endY']].copy(deep=True)
f['numPeople'] = f.apply(lambda row: num_people(row), axis=1)
f = f[f['numPeople'] > 0]
f['startIn'] = f.apply(lambda row: inside(row['startX'], row['startY']), axis=1)
f['endIn'] = f.apply(lambda row: inside(row['endX'], row['endY']), axis=1)
f['numIn'] = f.apply(lambda row: row['numPeople'] if not row['startIn'] and row['endIn'] else 0, axis=1)
s = f.groupby('mode')[['numIn']].sum()
s.fillna(0, inplace=True)
s['percentIn'] = s['numIn'] * 100 / s['numIn'].sum()
return s['percentIn']
def diff_people_in(current, base):
def reference():
data = """date,subway,bus,car
07/05/2020,-77.8,-35,-21.8
06/05/2020,-87.2,-64,-30.8
05/05/2020,-90.5,-73,-50.3
04/05/2020,-90.5,-71,-78.9
03/05/2020,0.0,4,-0.1
"""
ref = pd.read_csv(StringIO(data), parse_dates=['date'])
ref.sort_values('date', inplace=True)
ref['month'] = ref['date'].dt.month_name()
ref = ref.set_index('month').drop('date', 1)
return ref
b = get_people_in(base)
c = get_people_in(current)
b.name = 'base'
c.name = 'current'
t = pd.concat([b, c], axis=1)
t['increase'] = t['current'] - t['base']
pc = reference()
run = t['increase'].to_frame().T
run = run.reset_index().drop('index', 1)
run['month'] = 'Run'
run = run.set_index('month')
result = | pd.concat([run, pc], axis=0) | pandas.concat |
# encoding: utf-8
# (c) 2017-2021 Open Risk (https://www.openriskmanagement.com)
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pprint as pp
import pandas as pd
from transitionMatrix.utils.converters import frame_to_array, datetime_to_float
from transitionMatrix.utils.preprocessing import transitions_summary, validate_absorbing_state
""" Examples of using transitionMatrix to prepare data sets (data cleansing). The functionality is primarily based on pandas, with transition data specific procedures supported by the utils sub-package. For some operations (and large datasets) it might be advisable to work with numpy arrays
"""
# Load the raw data into a pandas frame
raw_data = | pd.read_csv('../../datasets/rating_data_raw.csv') | pandas.read_csv |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputy',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputy`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('yearly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('yearly.html',sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
#############################Dashboard#######################################
#yearly
@app.route('/youtgraph', methods = ['GET','POST'])
def youtgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputy` GROUP BY `Model`")
sfile=cur.fetchall()
global yqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
yqst=qlst.values
con.close()
return render_template('ydashboard.html',qulist=yqst)
@app.route('/youtgraph1', methods = ['GET', 'POST'])
def youtgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputy` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date']
index=np.concatenate((indx,edata['Date'].values),axis=0)
yindx=[]
for var in index:
var1 = var[:4]
yindx.append(var1)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('ydashboard.html',mon=value,qulist=yqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=yindx,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#monthly
@app.route('/moutgraph', methods = ['GET','POST'])
def moutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutput` GROUP BY `Model`")
sfile=cur.fetchall()
global mqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
mqst=qlst.values
con.close()
return render_template('mdashboard.html',qulist=mqst)
@app.route('/moutgraph1', methods = ['GET', 'POST'])
def moutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutput` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = | pd.DataFrame(csdata) | pandas.DataFrame |
# Copyright 2021 The Funnel Rocket Maintainers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
from contextlib import contextmanager
from dataclasses import dataclass
from enum import auto
from typing import List
import numpy as np
import pytest
from pandas import RangeIndex, Series, DataFrame
from frocket.common.dataset import DatasetPartsInfo, DatasetId, DatasetPartId, PartNamingMethod, DatasetInfo, \
DatasetColumnType, DatasetShortSchema
from frocket.common.serializable import AutoNamedEnum
from frocket.worker.runners.part_loader import shared_part_loader
from tests.utils.base_test_utils import temp_filename, TEMP_DIR, DisablePyTestCollectionMixin
from tests.utils.mock_s3_utils import SKIP_S3_TESTS, new_mock_s3_bucket
class TestColumn(DisablePyTestCollectionMixin, str, AutoNamedEnum):
int_64_userid = auto()
int_64_ts = auto()
int_u32 = auto()
float_64_ts = auto()
float_all_none = auto()
float_32 = auto()
float_category = auto()
str_userid = auto()
str_and_none = auto()
str_all_none = auto()
str_object_all_none = auto()
str_category_userid = auto()
str_category_few = auto()
str_category_many = auto()
bool = auto()
unsupported_datetimes = auto()
unsupported_lists = auto()
DEFAULT_GROUP_COUNT = 200
DEFAULT_ROW_COUNT = 1000
DEFAULT_GROUP_COLUMN = TestColumn.int_64_userid.value
DEFAULT_TIMESTAMP_COLUMN = TestColumn.int_64_ts.value
BASE_TIME = 1609459200000 # Start of 2021, UTC
BASE_USER_ID = 100000
TIME_SHIFT = 10000
UNSUPPORTED_COLUMN_DTYPES = {TestColumn.unsupported_datetimes: 'datetime64[ns]',
TestColumn.unsupported_lists: 'object'}
STR_AND_NONE_VALUES = ["1", "2", "3"]
STR_CAT_FEW_WEIGHTS = [0.9, 0.07, 0.02, 0.01]
STR_CAT_MANY_WEIGHTS = [0.5, 0.2] + [0.01] * 30
def test_colname_to_coltype(name: str) -> DatasetColumnType:
prefix_to_type = {
'int': DatasetColumnType.INT,
'float': DatasetColumnType.FLOAT,
'str': DatasetColumnType.STRING,
'bool': DatasetColumnType.BOOL,
'unsupported': None
}
coltype = prefix_to_type[name.split('_')[0]]
return coltype
def datafile_schema(part: int = 0) -> DatasetShortSchema:
# noinspection PyUnresolvedReferences
result = DatasetShortSchema(
min_timestamp=float(BASE_TIME),
max_timestamp=float(BASE_TIME + TIME_SHIFT),
source_categoricals=[TestColumn.str_category_userid, TestColumn.str_category_many],
potential_categoricals=[TestColumn.str_and_none, TestColumn.str_category_few],
columns={col.value: test_colname_to_coltype(col)
for col in TestColumn
if test_colname_to_coltype(col)})
# print(f"Test dataset short schema is:\n{result.to_json(indent=2)}")
return result
def weighted_list(size: int, weights: list) -> list:
res = []
for idx, w in enumerate(weights):
v = str(idx)
vlen = size * w
res += [v] * int(vlen)
assert len(res) == size
return res
def str_and_none_column_values(part: int = 0, with_none: bool = True) -> List[str]:
result = [*STR_AND_NONE_VALUES, f"part-{part}"]
if with_none:
result.append(None)
return result
def create_datafile(part: int = 0, size: int = DEFAULT_ROW_COUNT, filename: str = None) -> str:
# First, prepare data for columns
# Each part has a separate set of user (a.k.a. group) IDs
initial_user_id = BASE_USER_ID * part
min_user_id = initial_user_id
max_user_id = initial_user_id + DEFAULT_GROUP_COUNT - 1
# To each tests, ensure that each user ID appears in the file at least once, by including the whole range,
# then add random IDs in the range
int64_user_ids = \
list(range(min_user_id, max_user_id + 1)) + \
random.choices(range(min_user_id, max_user_id + 1), k=size - DEFAULT_GROUP_COUNT)
# And also represent as strings in another column
str_user_ids = [str(uid) for uid in int64_user_ids]
# Timestamp: each part has a range of values of size TIME_SHIFT
min_ts = BASE_TIME + (TIME_SHIFT * part)
max_ts = BASE_TIME + (TIME_SHIFT * (part + 1))
# Ensure that min & max timestamps appear exactly once, and fill the rest randomly in the range
int_timestamps = \
[min_ts, max_ts] + \
random.choices(range(min_ts + 1, max_ts), k=size-2)
# Now as floats and as (incorrect!) datetimes (datetimes currently unsupported)
float_timestamps = [ts + random.random() for ts in int_timestamps]
# More test columns
int_u32_values = random.choices(range(100), k=size)
float_32_values = [np.nan, *[random.random() for _ in range(size - 2)], np.nan]
str_and_none_values = random.choices(str_and_none_column_values(part), k=size)
bool_values = random.choices([True, False], k=size)
# For yet-unsupported columns below
lists_values = [[1, 2, 3]] * size
datetimes = [ts * 1000000 for ts in float_timestamps]
# Now create all series
idx = RangeIndex(size)
columns = {
TestColumn.int_64_userid: Series(data=int64_user_ids),
TestColumn.int_64_ts: Series(data=int_timestamps),
TestColumn.int_u32: Series(data=int_u32_values, dtype='uint32'),
TestColumn.float_64_ts: Series(data=float_timestamps),
TestColumn.float_all_none: Series(data=None, index=idx, dtype='float64'),
TestColumn.float_32: Series(data=float_32_values, dtype='float32'),
TestColumn.float_category: Series(data=float_timestamps, index=idx, dtype='category'),
TestColumn.str_userid: | Series(data=str_user_ids) | pandas.Series |
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import os
from PIL import Image
import numpy as np
# import torch
import json
import sys
from tqdm import tqdm, trange
from pycocotools.coco import COCO
import skimage.io as io
import pylab
from convert_fat_coco import *
from mpl_toolkits.axes_grid1 import ImageGrid
from lib.utils.mkdir_if_missing import mkdir_if_missing
from lib.render_glumpy.render_py import Render_Py
from lib.pair_matching import RT_transform
import pcl
from pprint import pprint
import calendar
import time
import yaml
import argparse
import scipy.io as scio
import shutil
ROS_PYTHON2_PKG_PATH = ['/opt/ros/kinetic/lib/python2.7/dist-packages',
'/usr/local/lib/python2.7/dist-packages/',
'/media/aditya/A69AFABA9AFA85D9/Cruzr/code/DOPE/catkin_ws/devel/lib/python2.7/dist-packages']
ROS_PYTHON3_PKG_PATH = '/media/aditya/A69AFABA9AFA85D9/Cruzr/code/ros_python3_ws/devel/lib/python3/dist-packages'
# ROS_PYTHON3_PKG_PATH = '/media/sbpl/Data/Aditya/code/ros_python3_ws/devel/lib/python3/dist-packages'
# ROS_PYTHON3_PKG_PATH = '/home/jessy/projects/ros_python3_ws/install/lib/python3/dist-packages'
class FATImage:
def __init__(self,
coco_annotation_file=None, coco_image_directory=None,
depth_factor=1000,
model_dir='/media/aditya/A69AFABA9AFA85D9/Datasets/YCB_Video_Dataset/models/',
model_mesh_in_mm=False,
model_mesh_scaling_factor=1,
models_flipped=False,
model_type="default",
env_config="pr2_env_config.yaml",
planner_config="pr2_planner_config.yaml",
img_width=960,
img_height=540,
distance_scale=100,
table_ransac_threshold=0.05,
perch_debug_dir=None,
python_debug_dir="./model_outputs",
dataset_type="ycb",
analysis_output_dir=None
):
'''
env_config : env_config.yaml in sbpl_perception/config to use with PERCH
planner_config : planner_config.yaml in sbpl_perception/config to use with PERCH
distance_scale : 100 if units in database are in cm
'''
self.width = img_width
self.height = img_height
self.distance_scale = distance_scale
self.dataset_type = dataset_type
self.perch_debug_dir = perch_debug_dir
self.python_debug_dir = python_debug_dir
self.table_ransac_threshold = table_ransac_threshold
mkdir_if_missing(self.python_debug_dir)
if analysis_output_dir is not None:
self.analysis_output_dir = analysis_output_dir
mkdir_if_missing(self.analysis_output_dir)
self.coco_image_directory = coco_image_directory
self.model_type = model_type
self.fixed_transforms_dict = None
self.scene_cloud_pub = None
self.camera_intrinsic_matrix = None
if coco_annotation_file is not None:
self.example_coco = COCO(coco_annotation_file)
example_coco = self.example_coco
self.category_id_to_names = example_coco.loadCats(example_coco.getCatIds())
self.category_names_to_id = {}
self.category_ids = example_coco.getCatIds(catNms=['square', 'shape'])
for category in self.category_id_to_names:
self.category_names_to_id[category['name']] = category['id']
self.category_names = list(self.category_names_to_id.keys())
print('Custom COCO categories: \n{}\n'.format(' '.join(self.category_names)))
# print(coco_predictions)
# print(all_predictions[:5])
# ## Load Image from COCO Dataset
self.image_ids = example_coco.getImgIds(catIds=self.category_ids)
if "viewpoints" in example_coco.dataset:
self.viewpoints_xyz = np.array(example_coco.dataset['viewpoints'])
self.inplane_rotations = np.array(example_coco.dataset['inplane_rotations'])
if "fixed_transforms" in example_coco.dataset:
self.fixed_transforms_dict = example_coco.dataset['fixed_transforms']
if "camera_intrinsic_settings" in example_coco.dataset:
self.camera_intrinsics = example_coco.dataset['camera_intrinsic_settings']
if self.camera_intrinsics is not None:
# Can be none in case of YCB
self.camera_intrinsic_matrix = \
np.array([[self.camera_intrinsics['fx'], 0, self.camera_intrinsics['cx']],
[0, self.camera_intrinsics['fy'], self.camera_intrinsics['cy']],
[0, 0, 1]])
if "camera_intrinsic_matrix" in example_coco.dataset:
self.camera_intrinsic_matrix = np.array(example_coco.dataset['camera_intrinsic_matrix'])
self.depth_factor = depth_factor
self.world_to_fat_world = {}
self.world_to_fat_world['location'] = [0,0,0]
# self.world_to_fat_world['quaternion_xyzw'] = [0.853, -0.147, -0.351, -0.357]
self.world_to_fat_world['quaternion_xyzw'] = [0,-math.sqrt(5),0,math.sqrt(5)]
self.model_dir = model_dir
self.model_params = {
'mesh_in_mm' : model_mesh_in_mm,
'mesh_scaling_factor' : model_mesh_scaling_factor,
'flipped' : models_flipped
}
# self.rendered_root_dir = os.path.join(self.model_dir, "rendered")
self.rendered_root_dir = os.path.join(os.path.abspath(os.path.join(self.model_dir, os.pardir)), "rendered")
print("Rendering or Poses output dir : {}".format(self.rendered_root_dir))
mkdir_if_missing(self.rendered_root_dir)
self.search_resolution_translation = 0.08
self.search_resolution_yaw = 0.3926991
# This matrix converts camera frame (X pointing out) to camera optical frame (Z pointing out)
# Multiply by this matrix to convert camera frame to camera optical frame
# Multiply by inverse of this matrix to convert camera optical frame to camera frame
self.cam_to_body = np.array([[0, 0, 1, 0],
[-1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 0, 1]])
# self.world_to_fat_world['quaternion_xyzw'] = [0.7071, 0, 0, -0.7071]
self.symmetry_info = {
"025_mug" : 0,
"004_sugar_box" : 1,
"008_pudding_box" : 1,
"009_gelatin_box" : 1,
"010_potted_meat_can" : 1,
"024_bowl" : 2,
"003_cracker_box" : 1,
"002_master_chef_can" : 2,
"006_mustard_bottle" : 1,
"pepsi" : 2,
"coke" : 2,
"sprite" : 2,
"pepsi_can" : 2,
"coke_can" : 2,
"sprite_can" : 2,
"7up_can" : 2,
"coke_bottle" : 2,
"sprite_bottle" : 2,
"fanta_bottle" : 2,
"crate_test" : 0,
"035_power_drill" : 0,
"005_tomato_soup_can" : 2
}
self.env_config = env_config
self.planner_config = planner_config
def get_random_image(self, name=None, required_objects=None):
# image_data = self.example_coco.loadImgs(self.image_ids[np.random.randint(0, len(self.image_ids))])[0]
if name is not None:
found = False
print("Tying to get image from DB : {}".format(name))
for i in range(len(self.image_ids)):
image_data = self.example_coco.loadImgs(self.image_ids[i])[0]
if image_data['file_name'] == name:
found = True
break
if found == False:
return None, None
else:
image_data = self.example_coco.loadImgs(self.image_ids[7000])[0]
# image_data = self.example_coco.loadImgs(self.image_ids[0])[0]
print(image_data)
annotation_ids = self.example_coco.getAnnIds(imgIds=image_data['id'], catIds=self.category_ids, iscrowd=None)
annotations = self.example_coco.loadAnns(annotation_ids)
self.example_coco.showAnns(annotations)
# print(annotations)
if required_objects is not None:
filtered_annotations = []
for annotation in annotations:
class_name = self.category_id_to_names[annotation['category_id']]['name']
if class_name in required_objects:
filtered_annotations.append(annotation)
return image_data, filtered_annotations
return image_data, annotations
def get_database_stats(self):
image_ids = self.example_coco.getImgIds(catIds=self.category_ids)
print("Number of images : {}".format(len(image_ids)))
image_category_count = {}
for i in range(len(self.image_ids)):
image_data = self.example_coco.loadImgs(self.image_ids[i])[0]
annotation_ids = self.example_coco.getAnnIds(imgIds=image_data['id'], catIds=self.category_ids, iscrowd=None)
annotations = self.example_coco.loadAnns(annotation_ids)
for annotation in annotations:
class_name = self.category_id_to_names[annotation['category_id']]['name']
if class_name in image_category_count:
image_category_count[class_name] += 1
else:
image_category_count[class_name] = 0
print(image_category_count)
def copy_database(self, destination, required_object):
from shutil import copy
mkdir_if_missing(destination)
image_ids = self.example_coco.getImgIds(catIds=self.category_ids)
print("Number of images : {}".format(len(image_ids)))
copied_camera = False
non_obj_images = 0
for i in trange(len(self.image_ids)):
image_data = self.example_coco.loadImgs(self.image_ids[i])[0]
color_img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
annotation_file_path = self.get_annotation_file_path(color_img_path)
annotation_ids = self.example_coco.getAnnIds(imgIds=image_data['id'], catIds=self.category_ids, iscrowd=None)
annotations = self.example_coco.loadAnns(annotation_ids)
has_object = False
for annotation in annotations:
class_name = self.category_id_to_names[annotation['category_id']]['name']
if required_object == class_name:
has_object = True
if has_object:
copy(color_img_path, os.path.join(destination, self.get_clean_name(image_data['file_name']) + ".jpg"))
copy(annotation_file_path, os.path.join(destination, self.get_clean_name(image_data['file_name']) + ".json"))
# copy(color_img_path, os.path.join(destination, str(i).zfill(6) + ".left.jpg"))
# copy(annotation_file_path, os.path.join(destination, str(i).zfill(6) + ".left.json"))
if not copied_camera:
camera_file_path = self.get_camera_settings_file_path(color_img_path)
object_file_path = self.get_object_settings_file_path(color_img_path)
copy(camera_file_path, destination)
copy(object_file_path, destination)
copied_camera = True
elif non_obj_images < 3000 and np.random.rand() < 0.2:
# Need non-object images for DOPE training
copy(color_img_path, os.path.join(destination, self.get_clean_name(image_data['file_name']) + ".jpg"))
copy(annotation_file_path, os.path.join(destination, self.get_clean_name(image_data['file_name']) + ".json"))
non_obj_images += 1
def save_yaw_only_dataset(self, scene='all'):
print("Processing {} images".format(len(self.image_ids)))
num_images = len(self.image_ids)
# num_images = 10
for i in range(num_images):
image_data = self.example_coco.loadImgs(self.image_ids[i])[0]
if scene != 'all' and image_data['file_name'].startswith(scene) == False:
continue
annotation_ids = self.example_coco.getAnnIds(imgIds=image_data['id'], catIds=self.category_ids, iscrowd=None)
annotations = self.example_coco.loadAnns(annotation_ids)
yaw_only_objects, _ = fat_image.visualize_pose_ros(image_data, annotations, frame='table', camera_optical_frame=False)
def visualize_image_annotations(self, image_data, annotations):
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
image = io.imread(img_path)
count = 1
fig = plt.figure(2, (4., 4.), dpi=1000)
plt.axis("off")
grid = ImageGrid(fig, 111,
nrows_ncols=(1, len(annotations)+1),
axes_pad=0.1,
)
grid[0].imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
grid[0].axis("off")
for annotation in annotations:
print("Annotated viewpoint_id : {}".format(annotation['viewpoint_id']))
theta, phi = get_viewpoint_rotations_from_id(viewpoints_xyz, annotation['viewpoint_id'])
inplane_rotation_angle = get_inplane_rotation_from_id(
self.inplane_rotations, annotation['inplane_rotation_id']
)
xyz_rotation_angles = [phi, theta, inplane_rotation_angle]
class_name = self.category_id_to_names[annotation['category_id']]['name']
print("*****{}*****".format(class_name))
print("Recovered rotation : {}".format(xyz_rotation_angles))
quat = annotation['quaternion_xyzw']
print("Actual rotation : {}".format(RT_transform.quat2euler(get_wxyz_quaternion(quat))))
fixed_transform = self.fixed_transforms_dict[class_name]
rgb_gl, depth_gl = render_pose(
class_name, fixed_transform, self.camera_intrinsics, xyz_rotation_angles, annotation['location']
)
grid[count].imshow(cv2.cvtColor(rgb_gl, cv2.COLOR_BGR2RGB))
grid[count].axis("off")
count += 1
plt.savefig('image_annotations_output.png')
def get_ros_pose(self, location, quat, units='cm'):
from geometry_msgs.msg import Pose, PoseStamped, PoseArray, Quaternion
p = Pose()
if units == 'cm':
p.position.x, p.position.y, p.position.z = [i/self.distance_scale for i in location]
else:
p.position.x, p.position.y, p.position.z = [i for i in location]
p.orientation.x, p.orientation.y, p.orientation.z, p.orientation.w = quat[0], quat[1], quat[2], quat[3]
return p
def update_coordinate_max_min(self, max_min_dict, location):
location = [i/self.distance_scale for i in location]
if location[0] > max_min_dict['xmax']:
max_min_dict['xmax'] = location[0]
if location[1] > max_min_dict['ymax']:
max_min_dict['ymax'] = location[1]
if location[2] > max_min_dict['zmax']:
max_min_dict['zmax'] = location[2]
if location[0] < max_min_dict['xmin']:
max_min_dict['xmin'] = location[0]
if location[1] < max_min_dict['ymin']:
max_min_dict['ymin'] = location[1]
if location[2] < max_min_dict['zmin']:
max_min_dict['zmin'] = location[2]
return max_min_dict
def get_world_point(self, point) :
camera_fx_reciprocal_ = 1.0 / self.camera_intrinsic_matrix[0, 0]
camera_fy_reciprocal_ = 1.0 / self.camera_intrinsic_matrix[1, 1]
world_point = np.zeros(3)
world_point[2] = point[2]
world_point[0] = (point[0] - self.camera_intrinsic_matrix[0,2]) * point[2] * (camera_fx_reciprocal_)
world_point[1] = (point[1] - self.camera_intrinsic_matrix[1,2]) * point[2] * (camera_fy_reciprocal_)
return world_point
def get_scene_cloud(self, image_data, downsampling_leaf_size):
'''
Get PCL point cloud in camera frame from depth image
'''
import rospy
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
from PIL import Image
color_img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
depth_img_path = self.get_depth_img_path(color_img_path)
print("depth_img_path : {}".format(depth_img_path))
depth_image = cv2.imread(depth_img_path, cv2.IMREAD_ANYDEPTH)
K_inv = np.linalg.inv(self.camera_intrinsic_matrix)
points_3d = np.zeros((depth_image.shape[0]*depth_image.shape[1], 3), dtype=np.float32)
count = 0
cloud = pcl.PointCloud()
depth_image_pil = np.asarray(Image.open(depth_img_path), dtype=np.float16)
for x in range(depth_image.shape[1]):
for y in range(depth_image.shape[0]):
point = np.array([x,y,depth_image[y,x]/self.depth_factor])
w_point = self.get_world_point(point)
points_3d[count, :] = w_point.tolist()
count += 1
cloud.from_array(points_3d)
seg = cloud.make_segmenter()
seg.set_optimize_coefficients (True)
seg.set_model_type (pcl.SACMODEL_PLANE)
seg.set_method_type (pcl.SAC_RANSAC)
seg.set_distance_threshold (0.02)
inliers, model = seg.segment()
# print(inliers)
# points_3d_filtered = points_3d[..., [i for i in range(points_3d.shape[0]) if i not in inliers]]
# points_3d_filtered = points_3d[points_3d != points_3d[inliers]]
points_3d_filtered = np.delete(points_3d, inliers, axis = 0)
cloud_n = pcl.PointCloud()
cloud_n.from_array(points_3d_filtered)
sor = cloud_n.make_voxel_grid_filter()
sor.set_leaf_size(downsampling_leaf_size, downsampling_leaf_size, downsampling_leaf_size)
cloud_n = sor.filter()
cloud_n_array = np.asarray(cloud_n)
cloud_color = np.zeros(cloud_n_array.shape[0])
pose_cloud_msg = self.xyzrgb_array_to_pointcloud2(
cloud_n_array, cloud_color, rospy.Time.now(), "camera"
)
if self.scene_cloud_pub is not None:
self.scene_cloud_pub.publish(pose_cloud_msg)
return cloud_n
def get_table_pose(self, depth_img_path, frame):
'''
Creates a point cloud in camera frame and calculates table pose using RANSAC
'''
import rospy
# from tf.transformations import quaternion_from_euler
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
from PIL import Image
depth_image = cv2.imread(depth_img_path, cv2.IMREAD_ANYDEPTH)
K_inv = np.linalg.inv(self.camera_intrinsic_matrix)
# points_3d = np.zeros((depth_image.shape[0]*depth_image.shape[1], 4), dtype=np.float32)
points_3d = []
count = 0
cloud = pcl.PointCloud_PointXYZRGB()
depth_image_pil = np.asarray(Image.open(depth_img_path), dtype=np.float16)
# TODO : replace this with numpy based point cloud creation - check densefusion
for x in range(depth_image.shape[1]):
for y in range(depth_image.shape[0]):
# point = np.array([x,y,1])
# t_point = np.matmul(K_inv, point)
# print("point : {},{}".format(t_point, depth_image[y,x]))
# print("point : {},{}".format(t_point, depth_image_pil[y,x]/65536 * 10))
# points_3d[count, :] = t_point[:2].tolist() + \
# [(depth_image[y,x]/self.depth_factor)] +\
# [255 << 16 | 255 << 8 | 255]
point = np.array([x,y,depth_image[y,x]/self.depth_factor])
w_point = self.get_world_point(point)
# Table cant be above camera
if w_point[1] < 0.0:
continue
points_3d.append(w_point.tolist() + \
[255 << 16 | 255 << 8 | 255])
# points_3d[count, :] = w_point.tolist() + \
# [255 << 16 | 255 << 8 | 255]
count += 1
points_3d = np.array(points_3d).astype(np.float32)
cloud.from_array(points_3d)
seg = cloud.make_segmenter()
# Optional
seg.set_optimize_coefficients (True)
# Mandatory
seg.set_model_type (pcl.SACMODEL_PLANE)
seg.set_method_type (pcl.SAC_RANSAC)
seg.set_distance_threshold (self.table_ransac_threshold)
# ros_msg = self.xyzrgb_array_to_pointcloud2(
# points_3d[:,:3], points_3d[:,3], rospy.Time.now(), frame
# )
# print(ros_msg)
# pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients)
# pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
inliers, model = seg.segment()
# if inliers.size
# return
# end
# print (model)
angles = []
# projection on x,y axis to get yaw
yaw = np.arctan(model[1]/model[0])
# Add pi for jenga clutter cam 2 to rotate the table pose
# yaw = np.arctan(model[1]/model[0]) + np.pi
# pitch = np.arcsin(model[2]/np.linalg.norm(model[:3]))
# projection on z,y axis to get pitch
pitch = np.arctan(model[2]/model[1])+np.pi/2
# pitch = np.arctan(model[2]/model[1])
roll = 0
# r is probably for tait-brian angles meaning we can use roll pitch yaw
# x in sequence means angle with that x-axis excluded (vector projected in other two axis)
q = get_xyzw_quaternion(RT_transform.euler2quat(roll,pitch,yaw, 'ryxz').tolist())
# for i in range(3):
# angle = model[i]/np.linalg.norm(model[:3])
# angles.append(np.arccos(angle))
# print(inliers)
inlier_points = points_3d[inliers]
# ros_msg = self.xyzrgb_array_to_pointcloud2(
# inlier_points[:,:3], inlier_points[:,3], rospy.Time.now(), frame
# )
ros_msg = self.xyzrgb_array_to_pointcloud2(
points_3d[:,:3], points_3d[:,3], rospy.Time.now(), frame
)
location = np.mean(inlier_points[:,:3], axis=0) * self.distance_scale
# for i in inliers:
# inlier_points.append(points_3d[inliers,:])
# inlier_points = np.array(inlier_points)
# q_rot =
print("Table location : {}".format(location))
return ros_msg, location, q
def xyzrgb_array_to_pointcloud2(self, points, colors, stamp=None, frame_id=None, seq=None):
'''
Create a sensor_msgs.PointCloud2 from an array
of points.
'''
from sensor_msgs.msg import PointCloud2, PointField
msg = PointCloud2()
# assert(points.shape == colors.shape)
colors = np.zeros(points.shape)
buf = []
if stamp:
msg.header.stamp = stamp
if frame_id:
msg.header.frame_id = frame_id
if seq:
msg.header.seq = seq
if len(points.shape) == 3:
msg.height = points.shape[1]
msg.width = points.shape[0]
else:
N = len(points)
xyzrgb = np.array(np.hstack([points, colors]), dtype=np.float32)
msg.height = 1
msg.width = N
msg.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('r', 12, PointField.FLOAT32, 1),
PointField('g', 16, PointField.FLOAT32, 1),
PointField('b', 20, PointField.FLOAT32, 1)
]
msg.is_bigendian = False
msg.point_step = 24
msg.row_step = msg.point_step * N
msg.is_dense = True
msg.data = xyzrgb.tostring()
return msg
def get_camera_pose_relative_table(self, depth_img_path, type='quat', cam_to_body=None):
if '/opt/ros/kinetic/lib/python2.7/dist-packages' not in sys.path:
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
import rospy
# if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
# if '/media/aditya/A69AFABA9AFA85D9/Cruzr/code/ros_python3_ws/install/lib/python3/dist-packages' not in sys.path:
# sys.path.append('/media/aditya/A69AFABA9AFA85D9/Cruzr/code/ros_python3_ws/install/lib/python3/dist-packages')
# # These packages need to be python3 specific, tf is built using python3
# import tf2_ros
# from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import Pose, PoseStamped, PoseArray, Quaternion
table_pose_msg = PoseStamped()
table_pose_msg.header.frame_id = 'camera'
table_pose_msg.header.stamp = rospy.Time.now()
scene_cloud, table_location, table_quat = self.get_table_pose(depth_img_path, 'camera')
table_pose_msg.pose = self.get_ros_pose(
table_location,
table_quat,
)
camera_pose_table = {
'location_worldframe' : table_location,
'quaternion_xyzw_worldframe': table_quat
}
print("Table pose wrt to camera : {}".format(camera_pose_table))
camera_pose_matrix = np.zeros((4,4))
camera_rotation = RT_transform.quat2mat(get_wxyz_quaternion(camera_pose_table['quaternion_xyzw_worldframe']))
camera_pose_matrix[:3, :3] = camera_rotation
camera_location = [i for i in camera_pose_table['location_worldframe']]
camera_pose_matrix[:, 3] = camera_location + [1]
print("table height : {}".format(table_location))
# Doing inverse gives us location of camera in table frame
camera_pose_matrix = np.linalg.inv(camera_pose_matrix)
# Convert optical frame to body for PERCH
if cam_to_body is not None:
camera_pose_matrix = np.matmul(camera_pose_matrix, np.linalg.inv(cam_to_body))
if type == 'quat':
quat = RT_transform.mat2quat(camera_pose_matrix[:3, :3]).tolist()
camera_pose_table = {
'location_worldframe' : camera_pose_matrix[:3,3],
'quaternion_xyzw_worldframe':get_xyzw_quaternion(quat)
}
return table_pose_msg, scene_cloud, camera_pose_table
elif type == 'rot':
return table_pose_msg, scene_cloud, camera_pose_matrix
def visualize_pose_ros(
self, image_data, annotations, frame='camera', camera_optical_frame=True, num_publish=10,
write_poses=False, ros_publish=True, get_table_pose=False, input_camera_pose=None
):
'''
Function to visualize poses, get pose of table from RANSAC, convert poses to table frame, write poses to file
'''
if ros_publish:
print("ROS visualizing")
if '/opt/ros/kinetic/lib/python2.7/dist-packages' not in sys.path:
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
import rospy
import rospkg
import rosparam
from geometry_msgs.msg import Pose, PoseStamped, PoseArray, Quaternion
from sensor_msgs.msg import Image, PointCloud2
for python2_path in ROS_PYTHON2_PKG_PATH:
if python2_path in sys.path:
sys.path.remove(python2_path)
if ROS_PYTHON3_PKG_PATH not in sys.path:
sys.path.append(ROS_PYTHON3_PKG_PATH)
# These packages need to be python3 specific, cv2 is imported from environment, cv_bridge is built using python3
import cv2
from cv_bridge import CvBridge, CvBridgeError
rospy.init_node('fat_pose')
self.ros_rate = rospy.Rate(5)
self.objects_pose_pub = rospy.Publisher('fat_image/objects_pose', PoseArray, queue_size=1, latch=True)
self.camera_pose_pub = rospy.Publisher('fat_image/camera_pose', PoseStamped, queue_size=1, latch=True)
self.scene_color_image_pub = rospy.Publisher("fat_image/scene_color_image", Image)
self.table_pose_pub = rospy.Publisher("fat_image/table_pose", PoseStamped, queue_size=1, latch=True)
self.scene_cloud_pub = rospy.Publisher("fat_image/scene_cloud", PointCloud2, queue_size=1, latch=True)
self.bridge = CvBridge()
color_img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
cv_scene_color_image = cv2.imread(color_img_path)
# cv2.imshow("cv_scene_color_image", cv_scene_color_image)
# image = io.imread(img_path)
# plt.imshow(image); plt.axis('off')
# plt.show()
object_pose_msg = PoseArray()
object_pose_msg.header.frame_id = frame
object_pose_msg.header.stamp = rospy.Time.now()
camera_pose_msg = PoseStamped()
camera_pose_msg.header.frame_id = frame
camera_pose_msg.header.stamp = rospy.Time.now()
max_min_dict = {
'xmin' : np.inf,
'ymin' : np.inf,
'zmin' : np.inf,
'xmax' : -np.inf,
'ymax' : -np.inf,
'zmax' : -np.inf
}
cam_to_body = self.cam_to_body if camera_optical_frame == False else None
camera_pose_table = None
if input_camera_pose is None:
if (frame == 'camera' and get_table_pose) or frame =='table':
depth_img_path = self.get_depth_img_path(color_img_path)
print("depth_img_path : {}".format(depth_img_path))
table_pose_msg, scene_cloud, camera_pose_table = self.get_camera_pose_relative_table(depth_img_path)
print("camera_pose_table from depth image : {}".format(camera_pose_table))
else:
# Use the camera pose input in table frame to transform ground truth
camera_pose_table = input_camera_pose
# while not rospy.is_shutdown():
rendered_pose_list_out = {}
transformed_annotations = []
for i in range(num_publish):
yaw_only_objects = []
count = 0
units = 'cm'
for annotation in annotations:
class_name = self.category_id_to_names[annotation['category_id']]['name']
if frame == 'camera':
location, quat = annotation['location'], annotation['quaternion_xyzw']
if frame == 'table':
location, quat = get_object_pose_in_world(annotation, camera_pose_table)
# location, quat = get_object_pose_in_world(annotation, camera_pose_table, self.world_to_fat_world)
# Transform ground truth to original model frame from NDDS
location, quat = self.get_object_pose_with_fixed_transform(
class_name, location,
RT_transform.quat2euler(get_wxyz_quaternion(quat)),
'quat',
use_fixed_transform=True,
invert_fixed_transform=False
)
# units = 'm'
location = (np.array(location)*self.distance_scale).tolist()
if class_name == 'sprite_bottle' or class_name == 'coke_bottle':
location[2] = 0
camera_location, camera_quat = camera_pose_table['location_worldframe'], camera_pose_table['quaternion_xyzw_worldframe']
if frame == 'fat_world':
location, quat = get_object_pose_in_world(annotation, annotation['camera_pose'])
camera_location, camera_quat = get_camera_pose_in_world(annotation['camera_pose'], None, type='quat', cam_to_body=cam_to_body)
if frame == 'world':
location, quat = get_object_pose_in_world(annotation, annotation['camera_pose'], self.world_to_fat_world)
camera_location, camera_quat = get_camera_pose_in_world(
annotation['camera_pose'], self.world_to_fat_world, type='quat', cam_to_body=cam_to_body
)
if ros_publish:
object_pose_ros = self.get_ros_pose(location, quat, units)
object_pose_msg.poses.append(object_pose_ros)
max_min_dict = self.update_coordinate_max_min(max_min_dict, location)
if input_camera_pose is None:
if ((frame == 'camera' and get_table_pose) or frame == 'table') and ros_publish:
self.table_pose_pub.publish(table_pose_msg)
self.scene_cloud_pub.publish(scene_cloud)
if frame != 'camera' and ros_publish:
camera_pose_msg.pose = self.get_ros_pose(camera_location, camera_quat)
self.camera_pose_pub.publish(camera_pose_msg)
rotation_angles = RT_transform.quat2euler(get_wxyz_quaternion(quat), 'rxyz')
if ros_publish:
# print("Location for {} : {}".format(class_name, location))
# print("Rotation Eulers for {} : {}".format(class_name, rotation_angles))
# print("ROS Pose for {} : {}".format(class_name, object_pose_ros))
# print("Rotation Quaternion for {} : {}\n".format(class_name, quat))
try:
self.scene_color_image_pub.publish(self.bridge.cv2_to_imgmsg(cv_scene_color_image, "bgr8"))
except CvBridgeError as e:
print(e)
if np.all(np.isclose(np.array(rotation_angles[:2]), np.array([-np.pi/2, 0]), atol=0.1)):
yaw_only_objects.append({'annotation_id' : annotation['id'],'class_name' : class_name})
if i == 0:
transformed_annotations.append({
'location' : location,
'quaternion_xyzw' : quat,
'category_id' : self.category_names_to_id[class_name],
'id' : count
})
count += 1
if class_name not in rendered_pose_list_out:
rendered_pose_list_out[class_name] = []
# rendered_pose_list_out[class_name].append(location.tolist() + list(rotation_angles))
rendered_pose_list_out[class_name].append(location + quat)
if ros_publish:
self.objects_pose_pub.publish(object_pose_msg)
self.ros_rate.sleep()
# pprint(rendered_pose_list_out)
if write_poses:
for label, poses in rendered_pose_list_out.items():
rendered_dir = os.path.join(self.rendered_root_dir, label)
mkdir_if_missing(rendered_dir)
pose_rendered_file = os.path.join(
rendered_dir,
"poses.txt",
)
poses = np.array(poses)
# Convert to meters for PERCH
poses[:,:3] /= 100
np.savetxt(pose_rendered_file, np.around(poses, 4))
# max_min_dict['ymax'] = max_min_dict['ymin'] + 2 * self.search_resolution_translation
max_min_dict['ymax'] += 0.10
max_min_dict['ymin'] -= 0.10
max_min_dict['xmax'] += 0.10
max_min_dict['xmin'] -= 0.10
# max_min_dict['ymax'] = 2.00
# max_min_dict['ymin'] = -2.00
# max_min_dict['xmax'] = 2.00
# max_min_dict['xmin'] = -2.00
# max_min_dict['zmin'] = table_pose_msg.pose.position.z
# print("Yaw only objects in the image : {}".format(yaw_only_objects))
return yaw_only_objects, max_min_dict, transformed_annotations, camera_pose_table
def get_depth_img_path(self, color_img_path):
# For FAT/NDDS
if self.dataset_type == "ndds":
return color_img_path.replace(os.path.splitext(color_img_path)[1], '.depth.png')
# For YCB
elif self.dataset_type == "ycb":
return color_img_path.replace('color', 'depth')
elif self.dataset_type == "jenga":
return color_img_path.replace('color', 'depth').replace("jpg", "png")
else:
return color_img_path.replace('color', 'depth')
def get_mask_img_path(self, color_img_path):
# For YCB
if self.dataset_type == "ycb":
return color_img_path.replace('color', 'label')
else:
return color_img_path.replace('color', 'mask')
def get_annotation_file_path(self, color_img_path):
return color_img_path.replace(os.path.splitext(color_img_path)[1], '.json')
def get_camera_settings_file_path(self, color_img_path):
return color_img_path.replace(os.path.basename(color_img_path), '_camera_settings.json')
def get_object_settings_file_path(self, color_img_path):
return color_img_path.replace(os.path.basename(color_img_path), '_object_settings.json')
def get_renderer(self, class_name):
width = self.width
height = self.height
if self.camera_intrinsic_matrix is not None:
camera_intrinsic_matrix = self.camera_intrinsic_matrix
else:
# If using only render without dataset or when dataset has multiple camera matrices
camera_intrinsic_matrix = np.array([[619.274, 0, 324.285],
[0, 619.361, 238.717],
[0, 0, 1]])
ZNEAR = 0.1
ZFAR = 20
# model_dir = os.path.join(self.model_dir, "models", class_name)
# Get Path to original YCB models for obj files for rendering
model_dir = os.path.join(os.path.abspath(os.path.join(self.model_dir, os.pardir)), "models")
model_dir = os.path.join(model_dir, class_name)
render_machine = Render_Py(model_dir, camera_intrinsic_matrix, width, height, ZNEAR, ZFAR)
return render_machine
def get_object_pose_with_fixed_transform(
self, class_name, location, rotation_angles, type, use_fixed_transform=True, invert_fixed_transform=False
):
# Location in cm
# Add fixed transform to given object transform so that it can be applied to a model
object_world_transform = np.zeros((4,4))
object_world_transform[:3,:3] = RT_transform.euler2mat(rotation_angles[0],rotation_angles[1],rotation_angles[2])
object_world_transform[:,3] = [i/self.distance_scale for i in location] + [1]
if use_fixed_transform and self.fixed_transforms_dict is not None:
fixed_transform = np.transpose(np.array(self.fixed_transforms_dict[class_name]))
fixed_transform[:3,3] = [i/self.distance_scale for i in fixed_transform[:3,3]]
if invert_fixed_transform:
total_transform = np.matmul(object_world_transform, np.linalg.inv(fixed_transform))
else:
total_transform = np.matmul(object_world_transform, fixed_transform)
else:
total_transform = object_world_transform
if type == 'quat':
quat = RT_transform.mat2quat(total_transform[:3, :3]).tolist()
return total_transform[:3,3].tolist(), get_xyzw_quaternion(quat)
elif type == 'rot':
return total_transform
elif type == 'euler':
return total_transform[:3,3], RT_transform.mat2euler(total_transform[:3,:3])
def get_bbox(self, img):
mask_args = np.argwhere(img > 0)
rmin, rmax, cmin, cmax = \
np.min(mask_args[:,0]), np.max(mask_args[:,0]), np.min(mask_args[:,1]), np.max(mask_args[:,1])
return [cmin, rmin, cmax, rmax]
def render_pose(self, class_name, rotation_angles, location):
# Takes rotation and location in camera frame for object and renders and image for it
# Expects location in cm
# fixed_transform = np.transpose(np.array(self.fixed_transforms_dict[class_name]))
# fixed_transform[:3,3] = [i/100 for i in fixed_transform[:3,3]]
# object_world_transform = np.zeros((4,4))
# object_world_transform[:3,:3] = RT_transform.euler2mat(rotation_angles[0],rotation_angles[1],rotation_angles[2])
# object_world_transform[:,3] = location + [1]
# total_transform = np.matmul(object_world_transform, fixed_transform)
if not hasattr(self, "render_machines"):
self.render_machines = {}
if class_name not in self.render_machines:
self.render_machines[class_name] = self.get_renderer(class_name)
render_machine = self.render_machines[class_name]
total_transform = self.get_object_pose_with_fixed_transform(class_name, location, rotation_angles, 'rot')
pose_rendered_q = RT_transform.mat2quat(total_transform[:3,:3]).tolist() + total_transform[:3,3].flatten().tolist()
rgb_gl, depth_gl = render_machine.render(
pose_rendered_q[:4], np.array(pose_rendered_q[4:])
)
rgb_gl = rgb_gl.astype("uint8")
depth_gl = (depth_gl * 1000).astype(np.uint16)
return rgb_gl, depth_gl
def render_perch_poses(self, max_min_dict, required_object, camera_pose, render_dir=None):
# Renders equidistant poses in 3D discretized space with both color and depth images
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
if render_dir is None:
render_dir = self.rendered_root_dir
render_machine = self.get_renderer(required_object)
idx = 0
rendered_dir = os.path.join(render_dir, required_object)
mkdir_if_missing(rendered_dir)
rendered_pose_list_out = []
for x in np.arange(max_min_dict['xmin'], max_min_dict['xmax'], self.search_resolution_translation):
# for y in np.arange(max_min_dict['ymin'], max_min_dict['ymax'], self.search_resolution_translation):
y = (max_min_dict['ymin'] + max_min_dict['ymax'])/2
for theta in np.arange(0, 2 * np.pi, self.search_resolution_yaw):
# original_point = np.array([x, y, (max_min_dict['zmin']+max_min_dict['zmin'])/2-0.1913/2, 1])
# original_point = np.array([x, y, (max_min_dict['zmin']+max_min_dict['zmin'])/2, 1])
original_point = [x, y, max_min_dict['zmin']]
# subtract half height of object so that base is on the table
# TODO take from database, right now this is for mustard bottle
# new_point = np.copy(original_point)
# new_point[2] += 0.1913
object_world_transform = np.zeros((4,4))
object_world_transform[:3,:3] = RT_transform.euler2mat(theta, 0, 0)
object_world_transform[:,3] = [i*100 for i in original_point] + [1]
if camera_pose is not None:
# Doing in a frame where z is up
total_transform = np.matmul(np.linalg.inv(camera_pose), object_world_transform)
else:
# Doing in camera frame
total_transform = object_world_transform
# Make it far from camera to so we can see everything
total_transform[2, 3] = max_min_dict['zmax']*100
rgb_gl, depth_gl = self.render_pose(
required_object, render_machine,
RT_transform.mat2euler(total_transform[:3,:3]),
total_transform[:3,3].flatten().tolist()
)
image_file = os.path.join(
rendered_dir,
"{}-color.png".format(idx),
)
depth_file = os.path.join(
rendered_dir,
"{}-depth.png".format(idx),
)
cv2.imwrite(image_file, rgb_gl)
cv2.imwrite(depth_file, depth_gl)
rendered_pose_list_out.append(object_world_transform[:,3].tolist() + [0,0,theta])
idx += 1
pose_rendered_file = os.path.join(
rendered_dir,
"poses.txt",
)
np.savetxt(pose_rendered_file, np.around(rendered_pose_list_out, 4))
def read_perch_output(self, output_dir_name):
from perch import FATPerch
fat_perch = FATPerch(
object_names_to_id=self.category_names_to_id,
output_dir_name=output_dir_name,
models_root=self.model_dir,
model_params=self.model_params,
model_type=self.model_type,
symmetry_info=self.symmetry_info,
read_results_only=True,
perch_debug_dir=self.perch_debug_dir,
distance_scale=self.distance_scale
)
perch_annotations = fat_perch.read_pose_results()
return perch_annotations
def visualize_perch_output(self, image_data, annotations, max_min_dict, frame='fat_world',
use_external_render=0, required_object='004_sugar_box', camera_optical_frame=True,
use_external_pose_list=0, model_poses_file=None, use_centroid_shifting=0, predicted_mask_path=None,
gt_annotations=None, input_camera_pose=None, num_cores=6, table_height=0.004,
compute_type=1
):
'''
@compute_type : specified in perch_fat.cpp, 0 - greedyicp, 1 - greedy perch 2.0, 2 - perch cpu
'''
from perch import FATPerch
print("camera instrinsics : {}".format(self.camera_intrinsic_matrix))
print("max_min_ranges : {}".format(max_min_dict))
cam_to_body = self.cam_to_body if camera_optical_frame == False else None
color_img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
depth_img_path = self.get_depth_img_path(color_img_path)
print("depth_img_path : {}".format(depth_img_path))
# Get camera pose for PERCH and rendering objects if needed
if input_camera_pose is None:
if frame == 'fat_world':
camera_pose = get_camera_pose_in_world(annotations[0]['camera_pose'], None, 'rot', cam_to_body=cam_to_body)
camera_pose[:3, 3] /= self.distance_scale
if frame == 'world':
camera_pose = get_camera_pose_in_world(annotations[0]['camera_pose'], self.world_to_fat_world, 'rot', cam_to_body=cam_to_body)
camera_pose[:3, 3] /= self.distance_scale
if frame == 'table':
_, _, camera_pose = self.get_camera_pose_relative_table(depth_img_path, type='rot', cam_to_body=cam_to_body)
camera_pose[:3, 3] /= self.distance_scale
if frame == 'camera':
# For 6D version we run in camera frame
camera_pose = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
if cam_to_body is not None:
camera_pose = np.matmul(camera_pose, np.linalg.inv(cam_to_body))
else:
# Using hardcoded input camera pose from somewhere
camera_pose = get_camera_pose_in_world(input_camera_pose, type='rot', cam_to_body=cam_to_body)
camera_pose[:3, 3] /= self.distance_scale
print("camera_pose : {}".format(camera_pose))
# Prepare data to send to PERCH
input_image_files = {
'input_color_image' : color_img_path,
'input_depth_image' : depth_img_path,
}
if predicted_mask_path is not None:
input_image_files['predicted_mask_image'] = predicted_mask_path
# Render poses if necessary
if use_external_render == 1:
self.render_perch_poses(max_min_dict, required_object, camera_pose)
# if use_external_pose_list == 1:
# models_root = os.path.join(self.model_dir, 'aligned_cm')
# else:
# models_root = os.path.join(self.model_dir, 'models')
camera_pose = camera_pose.flatten().tolist()
if self.perch_debug_dir is None:
self.perch_debug_dir = os.path.join(rospack.get_path('sbpl_perception'), "visualization")
params = {
'x_min' : max_min_dict['xmin'],
'x_max' : max_min_dict['xmax'],
'y_min' : max_min_dict['ymin'],
'y_max' : max_min_dict['ymax'],
# 'x_min' : max_min_dict['xmin'],
# 'x_max' : max_min_dict['xmax'] + self.search_resolution_translation,
# 'y_min' : max_min_dict['ymin'],
# 'y_max' : max_min_dict['ymin'] + 2 * self.search_resolution_translation,
'required_object' : required_object,
# 'table_height' : max_min_dict['zmin'],
'table_height' : table_height,
'use_external_render' : use_external_render,
'camera_pose': camera_pose,
'reference_frame_': frame,
'search_resolution_translation': self.search_resolution_translation,
'search_resolution_yaw': self.search_resolution_yaw,
'image_debug' : 0,
'use_external_pose_list': use_external_pose_list,
'depth_factor': self.depth_factor,
'shift_pose_centroid': use_centroid_shifting,
'use_icp': 1,
'rendered_root_dir' : self.rendered_root_dir,
'perch_debug_dir' : self.perch_debug_dir,
'compute_type' : compute_type
}
camera_params = {
'camera_width' : self.width,
'camera_height' : self.height,
'camera_fx' : self.camera_intrinsic_matrix[0, 0],
'camera_fy' : self.camera_intrinsic_matrix[1, 1],
'camera_cx' : self.camera_intrinsic_matrix[0, 2],
'camera_cy' : self.camera_intrinsic_matrix[1, 2],
'camera_znear' : 0.1,
'camera_zfar' : 20,
}
fat_perch = FATPerch(
params=params,
input_image_files=input_image_files,
camera_params=camera_params,
object_names_to_id=self.category_names_to_id,
output_dir_name=self.get_clean_name(image_data['file_name']),
models_root=self.model_dir,
model_params=self.model_params,
model_type=self.model_type,
symmetry_info=self.symmetry_info,
env_config=self.env_config,
planner_config=self.planner_config,
perch_debug_dir=self.perch_debug_dir,
distance_scale=self.distance_scale
)
perch_annotations = fat_perch.run_perch_node(model_poses_file, num_cores)
return perch_annotations
def get_clean_name(self, name):
return name.replace('.jpg', '').replace('.png', '').replace('/', '_').replace('.', '_')
def reject_outliers(self, data, m = 2.):
d = np.abs(data - np.mean(data))
mdev = np.std(d)
s = d/mdev if mdev else 0.
return data[s<m]
def init_model(self,
cfg_file='/media/aditya/A69AFABA9AFA85D9/Cruzr/code/fb_mask_rcnn/maskrcnn-benchmark/configs/fat_pose/e2e_mask_rcnn_R_50_FPN_1x_test_cocostyle.yaml',
model_weights=None,
print_poses=False,
required_objects=None,
min_image_size=750):
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
args = {
'config_file' : cfg_file,
'confidence_threshold' : 0.70,
'min_image_size' : min_image_size,
'masks_per_dim' : 10,
'show_mask_heatmaps' : False
}
cfg.merge_from_file(args['config_file'])
if model_weights is not None:
cfg.MODEL.WEIGHT = model_weights
cfg.freeze()
if print_poses:
self.render_machines = {}
for name in required_objects:
self.render_machines[name] = self.get_renderer(name)
self.coco_demo = COCODemo(
cfg,
confidence_threshold=args['confidence_threshold'],
show_mask_heatmaps=args['show_mask_heatmaps'],
masks_per_dim=args['masks_per_dim'],
min_image_size=args['min_image_size'],
categories = self.category_names,
# topk_rotations=9
topk_viewpoints=4,
topk_inplane_rotations=4
)
def get_2d_iou(self, boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
if interArea == 0:
return 0
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def get_rotation_samples(self, label, num_samples):
from sphere_fibonacci_grid_points import sphere_fibonacci_grid_points_with_sym_metric
all_rots = []
name_sym_dict = {
# Discribe the symmetric feature of the object:
# First Item is for the sphere symmetric. the second is for the yaw
# Second for changing raw. Here may need to rewrite the render or transition matrix!!!!!!!
# First (half: 0, whole: 1) Second (0:0, 1:0-pi, 2:0-2pi)
"002_master_chef_can": [0,0], #half_0
"003_cracker_box": [0,0], #half_0-pi #0,0 is fine with gicp
# "003_cracker_box": [0,6], #half_0-pi #0,0 is fine with gicp
"004_sugar_box": [0,3], #half_0-pi
"005_tomato_soup_can": [0,0], #half_0
"006_mustard_bottle": [0,0], #whole_0-pi
"007_tuna_fish_can": [0,0], #half_0
"008_pudding_box": [0,1], #half_0-pi
"009_gelatin_box": [0,0], #half_0-pi
"010_potted_meat_can": [0,0], #half_0-pi
"011_banana": [1,0], #whole_0-2pi #from psc
"019_pitcher_base": [0,0], #whole_0-2pi
"021_bleach_cleanser": [0,0], #whole_0-2pi, 55 and
# "021_bleach_cleanser": [0,2], #whole_0-2pi, 55 and
"024_bowl": [1,0], #whole_0
"025_mug": [0,1], #whole_0-2pi
"035_power_drill" : [0,7], #whole_0-2pi
"036_wood_block": [0,0], #half_0-pi
"037_scissors": [0,2], #whole_0-2pi
"040_large_marker" : [1,0], #whole_0
# "051_large_clamp": [1,1], #whole_0-pi
"052_extra_large_clamp": [0, 7], #whole_0-pi
"051_large_clamp": [0,7],
"061_foam_brick": [0,0], #half_0-pi
"color_block_0": [0,8], #half_0-pi
"color_block_1": [0,8], #half_0-pi
"color_block_2": [0,8], #half_0-pi
"color_block_3": [0,8], #half_0-pi
"color_block_4": [0,8], #half_0-pi
"color_block_5": [0,8], #half_0-pi
"color_block_6": [0,8], #half_0-pi
"color_block_7": [0,8], #half_0-pi
"color_block_8": [0,8], #half_0-pi
"color_block_9": [0,8], #half_0-pi
"color_block_10": [0,8], #half_0-pi
"color_block_11": [0,8], #half_0-pi
"color_block_12": [0,8] #half_0-pi
}
viewpoints_xyz = sphere_fibonacci_grid_points_with_sym_metric(num_samples,name_sym_dict[label][0])
# if name_sym_dict[label][1] == 0:
for viewpoint in viewpoints_xyz:
r, theta, phi = cart2sphere(viewpoint[0], viewpoint[1], viewpoint[2])
theta, phi = sphere2euler(theta, phi)
if name_sym_dict[label][1] == 0:
xyz_rotation_angles = [-phi, theta, 0]
all_rots.append(xyz_rotation_angles)
elif name_sym_dict[label][1] == 1:
step_size = math.pi/2
for yaw_temp in np.arange(0,math.pi, step_size):
xyz_rotation_angles = [-phi, yaw_temp, theta]
# xyz_rotation_angles = [yaw_temp, -phi, theta]
all_rots.append(xyz_rotation_angles)
elif name_sym_dict[label][1] == 2:
step_size = math.pi/4
for yaw_temp in np.arange(0,math.pi, step_size):
xyz_rotation_angles = [-phi, yaw_temp, theta]
# xyz_rotation_angles = [yaw_temp, -phi, theta]
all_rots.append(xyz_rotation_angles)
elif name_sym_dict[label][1] == 3:
xyz_rotation_angles = [-phi, 0, theta]
all_rots.append(xyz_rotation_angles)
# xyz_rotation_angles = [-phi, math.pi/2, theta]
# all_rots.append(xyz_rotation_angles)
xyz_rotation_angles = [-phi, 2*math.pi/3, theta]
all_rots.append(xyz_rotation_angles)
elif name_sym_dict[label][1] == 4:
# For upright sugar box
xyz_rotation_angles = [-phi, math.pi+theta, 0]
all_rots.append(xyz_rotation_angles)
elif name_sym_dict[label][1] == 5:
xyz_rotation_angles = [phi, theta, math.pi]
all_rots.append(xyz_rotation_angles)
elif name_sym_dict[label][1] == 6:
# This causes sampling of inplane along z
xyz_rotation_angles = [-phi, 0, theta]
all_rots.append(xyz_rotation_angles)
xyz_rotation_angles = [-phi, math.pi/3, theta]
all_rots.append(xyz_rotation_angles)
xyz_rotation_angles = [-phi, 2*math.pi/3, theta]
all_rots.append(xyz_rotation_angles)
elif name_sym_dict[label][1] == 7:
# This causes sampling of inplane along z
# xyz_rotation_angles = [-phi, 0, theta]
# all_rots.append(xyz_rotation_angles)
# xyz_rotation_angles = [-phi, math.pi/3, theta]
# all_rots.append(xyz_rotation_angles)
# xyz_rotation_angles = [-phi, 2*math.pi/3, theta]
# all_rots.append(xyz_rotation_angles)
# xyz_rotation_angles = [-phi, math.pi, theta]
# all_rots.append(xyz_rotation_angles)
step_size = math.pi/2
for yaw_temp in np.arange(0, 2*math.pi, step_size):
xyz_rotation_angles = [-phi, yaw_temp, theta]
# xyz_rotation_angles = [yaw_temp, -phi, theta]
all_rots.append(xyz_rotation_angles)
elif name_sym_dict[label][1] == 8:
step_size = math.pi/3
for yaw_temp in np.arange(0, math.pi, step_size):
xyz_rotation_angles = [yaw_temp, -phi, theta]
all_rots.append(xyz_rotation_angles)
return all_rots
def get_posecnn_bbox(self, idx, posecnn_rois):
border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
rmin = int(posecnn_rois[idx][3]) + 1
rmax = int(posecnn_rois[idx][5]) - 1
cmin = int(posecnn_rois[idx][2]) + 1
cmax = int(posecnn_rois[idx][4]) - 1
r_b = rmax - rmin
for tt in range(len(border_list)):
if r_b > border_list[tt] and r_b < border_list[tt + 1]:
r_b = border_list[tt + 1]
break
c_b = cmax - cmin
for tt in range(len(border_list)):
if c_b > border_list[tt] and c_b < border_list[tt + 1]:
c_b = border_list[tt + 1]
break
center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)]
rmin = center[0] - int(r_b / 2)
rmax = center[0] + int(r_b / 2)
cmin = center[1] - int(c_b / 2)
cmax = center[1] + int(c_b / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > self.width:
delt = rmax - self.width
rmax = self.width
rmin -= delt
if cmax > self.height:
delt = cmax - self.height
cmax = self.height
cmin -= delt
return rmin, rmax, cmin, cmax
def get_posecnn_mask(self, mask_image_id=None, centroid_type="roi", annotations=None):
'''
Uses posecnn mask and computes centroid using different methods for rendering shift
'''
posecnn_meta = scio.loadmat('{0}/results_PoseCNN_RSS2018/{1}.mat'.format(self.coco_image_directory, str(mask_image_id).zfill(6)))
overall_mask = np.array(posecnn_meta['labels'])
posecnn_rois = np.array(posecnn_meta['rois'])
lst = posecnn_rois[:, 1:2].flatten()
labels = []
centroids_2d = []
masks = []
boxes = []
for idx in range(len(lst)):
itemid = int(lst[idx])
# print(itemid)
label = self.category_id_to_names[itemid-1]['name']
labels.append(label)
mask = np.copy(overall_mask)
mask[mask != itemid] = 0
masks.append(mask)
# rmin is min of row in np 2d array, #cmin is min of column in 2d np array
rmin, rmax, cmin, cmax = None, None, None, None
if centroid_type == "roi":
rmin, rmax, cmin, cmax = self.get_posecnn_bbox(idx, posecnn_rois)
elif centroid_type == "mask":
mask_args = np.argwhere(mask > 0)
rmin, rmax, cmin, cmax = np.min(mask_args[:,0]), np.max(mask_args[:,0]), np.min(mask_args[:,1]), np.max(mask_args[:,1])
elif centroid_type == "box_gt":
for ann in annotations:
# box contains X(along width), Y, top left and bottom right
if itemid-1 == ann['category_id']:
box = ann['bbox']
# boxes_all.append([int(x) for x in box])
# X (along width), Y
# centroids_2d_all.append(np.array([(box[0]+box[2])/2, (box[1]+box[3])/2]))
rmin, rmax, cmin, cmax = box[1], box[1] + box[3], box[0], box[0] + box[2]
# rmin, rmax, cmin, cmax = box[1], box[3], box[0], box[2]
break
if rmin is None:
# bleach 1340, when using bbox gt, its not found in the annotations
mask_args = np.argwhere(mask > 0)
rmin, rmax, cmin, cmax = np.min(mask_args[:,0]), np.max(mask_args[:,0]), np.min(mask_args[:,1]), np.max(mask_args[:,1])
boxes.append([cmin, rmin, cmax, rmax])
centroids_2d.append(np.array([(cmin+cmax)/2, (rmin+rmax)/2]))
# print(boxes)
return labels, masks, boxes, centroids_2d
def get_gt_mask(self, image_data, annotations, mask_image_id=None, centroid_type="mask"):
'''
Uses posecnn mask and computes centroid using different methods for rendering shift
'''
labels = []
centroids_2d = []
masks = []
boxes = []
# for class_id in class_ids:
for ann in annotations:
# mask_path = os.path.join(self.coco_image_directory, "clutter/1",
# "Masks", "{}_color_class_crop{}.png".format(str(0).zfill(4), ann['category_id']))
# overall_mask = np.array(Image.open(mask_path))
overall_mask = self.example_coco.annToMask(ann)
label = self.category_id_to_names[ann['category_id']]['name']
# print(label)
# print(np.count_nonzero(overall_mask))
# if np.count_nonzero(overall_mask) == 0:
# continue
# print((overall_mask[overall_mask != 0]))
labels.append(label)
mask = np.copy(overall_mask)
masks.append(mask)
# rmin is min of row in np 2d array, #cmin is min of column in 2d np array
if centroid_type == "mask":
rmin, rmax, cmin, cmax = None, None, None, None
mask_args = np.argwhere(mask > 0)
rmin, rmax, cmin, cmax = np.min(mask_args[:,0]), np.max(mask_args[:,0]), np.min(mask_args[:,1]), np.max(mask_args[:,1])
elif centroid_type == "box_gt":
box = ann['bbox']
# X min (along width), Y min, width, height
rmin, rmax, cmin, cmax = box[1], box[1] + box[3], box[0], box[0] + box[2]
boxes.append([cmin, rmin, cmax, rmax])
centroids_2d.append(np.array([(cmin+cmax)/2, (rmin+rmax)/2]))
# print(boxes)
return labels, masks, boxes, centroids_2d
def overlay_masks(self, cv_image, bboxes, masks, labels, centroids):
import cv2
color = (0, 255, 0)
for box_id in range(len(labels)):
label = labels[box_id]
if box_id >= len(bboxes) or box_id >= len(masks):
continue
box = [int(x) for x in bboxes[box_id]]
center = [int(x) for x in centroids[box_id]]
mask = masks[box_id].astype(np.uint8)
# print(mask)
top_left, bottom_right = box[:2], box[2:]
cv_image = cv2.rectangle(
cv_image, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)
cv_image = cv2.drawContours(cv_image, contours, -1, color, 3)
x, y = box[:2]
# # x, y = centroids[box_id]
# x, y = np.mean(np.argwhere(mask > 0), axis=0)
# x = int(y)
# y = int(x)
s = "{}".format(label)
cv2.putText(
cv_image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 0, 0), 1
)
cv_image = cv2.circle(cv_image, tuple(center), 8, (0, 0, 0), -1)
return cv_image
def visualize_sphere_sampling(
self, image_data, annotations=None, print_poses=True, required_objects=None, num_samples=80, mask_type='mask_rcnn', mask_image_id=None):
from maskrcnn_benchmark.config import cfg
from dipy.core.geometry import cart2sphere, sphere2cart
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
print("Mask type : {}".format(mask_type))
# Load GT mask
color_img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
color_img = cv2.imread(color_img_path)
# depth_img_path = color_img_path.replace('.jpg', '.depth.png')
depth_img_path = self.get_depth_img_path(color_img_path)
depth_image = cv2.imread(depth_img_path, cv2.IMREAD_ANYDEPTH)
rotation_output_dir = os.path.join(self.python_debug_dir, self.get_clean_name(image_data['file_name']))
# if print_poses:
shutil.rmtree(rotation_output_dir, ignore_errors=True)
mkdir_if_missing(rotation_output_dir)
if mask_type == "mask_rcnn":
predicted_mask_path = os.path.join(os.path.dirname(depth_img_path), os.path.splitext(os.path.basename(color_img_path))[0] + '.predicted_mask.png')
composite, mask_list_all, labels_all, centroids_2d_all, boxes_all, overall_binary_mask \
= self.coco_demo.run_on_opencv_image(color_img, use_thresh=True)
# if print_poses:
# composite_image_path = '{}/mask_mask_rcnn.png'.format(rotation_output_dir)
composite_image_path = '{}/mask_mask_rcnn_{}.png'.format(self.python_debug_dir, self.get_clean_name(image_data['file_name']))
# cv2.imwrite(composite_image_path, composite)
# print(rotation_list['top_viewpoint_ids'])
# labels_all = rotation_list['labels']
elif mask_type == "posecnn":
predicted_mask_path = os.path.join(os.path.dirname(depth_img_path), os.path.splitext(os.path.basename(color_img_path))[0] + '.predicted_mask_posecnn.png')
labels_all, mask_list_all, boxes_all, centroids_2d_all = self.get_posecnn_mask(mask_image_id, centroid_type="mask")
composite = self.overlay_masks(color_img, boxes_all, mask_list_all, labels_all, centroids_2d_all)
composite_image_path = '{}/mask_posecnn.png'.format(rotation_output_dir)
# composite_image_path = '{}/mask.png'.format(rotation_output_dir)
# cv2.imwrite(composite_image_path, composite)
# print(labels_all)
elif mask_type == "posecnn_gt_bbox":
predicted_mask_path = os.path.join(os.path.dirname(depth_img_path), os.path.splitext(os.path.basename(color_img_path))[0] + '.predicted_mask_posecnn.png')
labels_all, mask_list_all, boxes_all, centroids_2d_all = self.get_posecnn_mask(mask_image_id, centroid_type="box_gt", annotations=annotations)
# boxes_all = []
# centroids_2d_all = []
# for label in labels_all:
# # print(label)
# for ann in annotations:
# # print(self.category_id_to_names[ann['category_id']])
# if label == self.category_id_to_names[ann['category_id']]['name']:
# box = ann['bbox']
# boxes_all.append([int(x) for x in box])
# # X (along width), Y
# centroids_2d_all.append(np.array([(box[0]+box[2])/2, (box[1]+box[3])/2]))
# import pdb
# pdb.set_trace()
composite = self.overlay_masks(color_img, boxes_all, mask_list_all, labels_all, centroids_2d_all)
composite_image_path = '{}/mask_posecnn_gt_bbox.png'.format(rotation_output_dir)
elif mask_type == "jenga":
predicted_mask_path = os.path.join(os.path.dirname(depth_img_path), os.path.splitext(os.path.basename(color_img_path))[0] + '.jenga_mask.png')
labels_all, mask_list_all, boxes_all, centroids_2d_all = self.get_gt_mask(image_data, annotations, mask_image_id=mask_image_id, centroid_type="mask")
composite = self.overlay_masks(color_img, boxes_all, mask_list_all, labels_all, centroids_2d_all)
composite_image_path = '{}/mask_posecnn_jenga_bbox.png'.format(rotation_output_dir)
elif mask_type == "gt":
predicted_mask_path = os.path.join(os.path.dirname(depth_img_path), os.path.splitext(os.path.basename(color_img_path))[0] + '.gt_mask.png')
labels_all, mask_list_all, boxes_all, centroids_2d_all = self.get_gt_mask(image_data, annotations, mask_image_id=mask_image_id, centroid_type="box_gt")
composite = self.overlay_masks(color_img, boxes_all, mask_list_all, labels_all, centroids_2d_all)
composite_image_path = '{}/mask_bbox_gt.png'.format(rotation_output_dir)
cv2.imwrite(composite_image_path, composite)
# return None, None, None, None
labels = labels_all
mask_list = mask_list_all
boxes = boxes_all
centroids_2d = centroids_2d_all
# Select only those labels from network output that are required objects
if required_objects is not None:
labels = []
boxes = []
mask_list = []
centroids_2d = []
overall_binary_mask = np.zeros((self.height, self.width))
mask_label_i = 1
for label in required_objects:
if label in labels_all:
mask_i = labels_all.index(label)
# print(str(mask_i) + " found")
filter_mask = mask_list_all[mask_i]
object_depth_mask = np.copy(depth_image)
object_depth_mask[filter_mask == 0] = 0
if np.count_nonzero(object_depth_mask) == 0:
continue
# Use binary mask to assign label in overall mask
overall_binary_mask[filter_mask > 0] = mask_label_i
labels.append(label)
boxes.append(boxes_all[mask_i])
mask_list.append(filter_mask)
centroids_2d.append(centroids_2d_all[mask_i])
mask_label_i += 1
cv2.imwrite(predicted_mask_path, overall_binary_mask)
# Sample rotations
viewpoints_xyz = sphere_fibonacci_grid_points(num_samples)
annotations = []
grid_i = 0
for box_id in range(len(labels)):
label = labels[box_id]
bbox = boxes[box_id]
centroid = centroids_2d[box_id]
object_depth_mask = np.copy(depth_image)
object_depth_mask[mask_list[box_id] == 0] = 0
object_depth = np.mean(object_depth_mask)
min_depth = np.min(object_depth_mask[object_depth_mask > 0])/self.depth_factor
max_depth = np.max(object_depth_mask[object_depth_mask > 0])/self.depth_factor
print("Min depth :{} , Max depth : {} from mask".format(min_depth, max_depth))
# if print_poses:
# render_machine = self.render_machines[label]
cnt = 0
object_rotation_list = []
rotation_samples = self.get_rotation_samples(label, num_samples)
# Sample sphere and collect rotations
# for viewpoint in viewpoints_xyz:
# r, theta, phi = cart2sphere(viewpoint[0], viewpoint[1], viewpoint[2])
# theta, phi = sphere2euler(theta, phi)
# xyz_rotation_angles = [phi, theta, 0]
# print("Recovered rotation : {}".format(xyz_rotation_angles))
# quaternion = get_xyzw_quaternion(RT_transform.euler2quat(phi, theta, 0).tolist())
# # object_rotation_list.append(quaternion)
for xyz_rotation_angles in rotation_samples:
print("Recovered rotation : {}".format(xyz_rotation_angles))
quaternion = get_xyzw_quaternion(RT_transform.euler2quat(xyz_rotation_angles[0], xyz_rotation_angles[1], xyz_rotation_angles[2]).tolist())
object_rotation_list.append(quaternion)
if print_poses:
rgb_gl, depth_gl = self.render_pose(
label, xyz_rotation_angles, [0, 0, 1*self.distance_scale]
)
render_bbox = self.get_bbox(rgb_gl)
render_centroid = [(render_bbox[0] + render_bbox[2])/2, (render_bbox[1] + render_bbox[3])/2]
translation = centroid - render_centroid
render_bbox[0] += translation[0]
render_bbox[2] += translation[0]
render_bbox[1] += translation[1]
render_bbox[3] += translation[1]
print("Render BBOX : {}".format(render_bbox))
print("Observed BBOX : {}".format(bbox))
iou_2d = self.get_2d_iou(bbox, render_bbox)
print("2D IOU : {}".format(iou_2d))
top_left, bottom_right = bbox[:2], bbox[2:]
color = (0, 255, 0)
rgb_gl = cv2.rectangle(
rgb_gl, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
render_bbox = [int(x) for x in render_bbox]
top_left, bottom_right = render_bbox[:2], render_bbox[2:]
rgb_gl = cv2.rectangle(
rgb_gl, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
cv2.imwrite("{}/label_{}_{}.png".format(rotation_output_dir, label, cnt), rgb_gl)
cnt += 1
if label == "037_scissors":
resolution = 0.01
else:
resolution = 0.02
# mask = np.argwhere(mask_list[box_id] > 0)
# centroid = [(np.max(mask[:,0])+np.min(mask[:,0]))/2, (np.max(mask[:,1])+np.min(mask[:,1]))/2]
# centroid = np.flip(centroid)
centroid = centroids_2d[box_id]
print("Centroid from min/max mask (X (along width), Y) : {}".format(centroid))
# print("Centroid from GT (X (along width), Y): {}".format(centroids_2d[box_id]))
# if label == "021_bleach_cleanser" or label == "037_scissors":
# centroid_max = np.flip(np.max(np.argwhere(mask_list[box_id] > 0), axis=0))
# centroid_min = np.flip(np.min(np.argwhere(mask_list[box_id] > 0), axis=0))
# centroid = np.array([centroid_max[0]*0.7+centroid_min[0]*0.5, centroid_max[1]*0.6+centroid_min[1]* 0.4])
for _, depth in enumerate(np.arange(min_depth, max_depth + resolution, resolution)):
## Vary depth only
centre_world_point = self.get_world_point(centroid.tolist() + [depth])
for quaternion in object_rotation_list:
annotations.append({
'location' : (centre_world_point*100).tolist(),
'quaternion_xyzw' : quaternion,
'category_id' : self.category_names_to_id[label],
'id' : grid_i
})
grid_i += 1
return labels, annotations, predicted_mask_path, composite_image_path
def visualize_model_output(self, image_data, use_thresh=False, use_centroid=True, print_poses=True, required_objects=None):
from maskrcnn_benchmark.config import cfg
from dipy.core.geometry import cart2sphere, sphere2cart
# plt.figure()
# img_path = os.path.join(image_directory, image_data['file_name'])
# image = io.imread(img_path)
# plt.imshow(image); plt.axis('off')
# # Running model on image
# from predictor import COCODemo
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
color_img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
color_img = cv2.imread(color_img_path)
composite, mask_list_all, rotation_list, centroids_2d_all, boxes_all, overall_binary_mask \
= self.coco_demo.run_on_opencv_image(color_img, use_thresh=use_thresh)
model_output_dir = os.path.join(self.python_debug_dir, self.get_clean_name(image_data['file_name']))
composite_image_path = '{}/mask_{}.png'.format(model_output_dir, self.get_clean_name(image_data['file_name']))
cv2.imwrite(composite_image_path, composite)
# depth_img_path = color_img_path.replace('.jpg', '.depth.png')
depth_img_path = self.get_depth_img_path(color_img_path)
depth_image = cv2.imread(depth_img_path, cv2.IMREAD_ANYDEPTH)
predicted_mask_path = os.path.join(os.path.dirname(depth_img_path), os.path.splitext(os.path.basename(color_img_path))[0] + '.predicted_mask.png')
top_viewpoint_ids = rotation_list['top_viewpoint_ids']
top_inplane_rotation_ids = rotation_list['top_inplane_rotation_ids']
labels = rotation_list['labels']
mask_list = mask_list_all
boxes = boxes_all
centroids_2d = centroids_2d_all
print(rotation_list['top_viewpoint_ids'])
# Select only those labels from network output that are required objects
if required_objects is not None:
top_viewpoint_ids = []
top_inplane_rotation_ids = []
labels = []
boxes = []
mask_list = []
centroids_2d = []
overall_binary_mask = np.zeros((self.height, self.width))
mask_label_i = 1
for label in required_objects:
if label in rotation_list['labels']:
mask_i = rotation_list['labels'].index(label)
# print(str(mask_i) + " found")
filter_mask = mask_list_all[mask_i]
# if np.count_nonzero(filter_mask) < 4000:
# continue
# print(filter_mask > 0)
# Use binary mask to assign label in overall mask
overall_binary_mask[filter_mask > 0] = mask_label_i
labels.append(label)
top_viewpoint_ids.append(rotation_list['top_viewpoint_ids'][mask_i].tolist())
top_inplane_rotation_ids.append(rotation_list['top_inplane_rotation_ids'][mask_i].tolist())
boxes.append(boxes_all[mask_i])
mask_list.append(filter_mask)
centroids_2d.append(centroids_2d_all[mask_i])
mask_label_i += 1
top_viewpoint_ids = np.array(top_viewpoint_ids)
top_inplane_rotation_ids = np.array(top_inplane_rotation_ids)
# if print_poses:
cv2.imwrite(predicted_mask_path, overall_binary_mask)
if print_poses:
fig = plt.figure(None, (5., 5.), dpi=3000)
plt.axis("off")
plt.tight_layout()
if use_thresh == False:
grid_size = len(top_viewpoint_ids)+1
grid = ImageGrid(fig, 111,
nrows_ncols=(1, grid_size),
axes_pad=0.1,
)
grid[0].imshow(cv2.cvtColor(composite, cv2.COLOR_BGR2RGB))
grid[0].axis("off")
else:
grid_size = top_viewpoint_ids[0,:].shape[0]*top_inplane_rotation_ids[0,:].shape[0]+1
# grid_size = top_inplane_rotation_ids[0,:].shape[0]+1
grid = ImageGrid(fig, 111,
nrows_ncols=(top_viewpoint_ids.shape[0], grid_size),
axes_pad=0.1,
)
# print("Camera matrix : {}".format(self.camera_intrinsic_matrix))
# K_inv = np.linalg.inv(self.camera_intrinsic_matrix)
print("Predicted top_viewpoint_ids : {}".format(top_viewpoint_ids))
print("Predicted top_inplane_rotation_ids : {}".format(top_inplane_rotation_ids))
print("Predicted boxes : {}".format(boxes))
print("Predicted labels : {}".format(labels))
print("Predicted mask path : {}".format(predicted_mask_path))
img_list = []
annotations = []
top_model_annotations = []
depth_range = []
if use_thresh == False:
for i in range(len(top_viewpoint_ids)):
viewpoint_id = top_viewpoint_ids[i]
inplane_rotation_id = top_inplane_rotation_ids[i]
label = labels[i]
fixed_transform = self.fixed_transforms_dict[label]
theta, phi = get_viewpoint_rotations_from_id(self.viewpoints_xyz, viewpoint_id)
inplane_rotation_angle = get_inplane_rotation_from_id(self.inplane_rotations, inplane_rotation_id)
xyz_rotation_angles = [phi, theta, inplane_rotation_angle]
print("Recovered rotation : {}".format(xyz_rotation_angles))
rgb_gl, depth_gl = render_pose(
label, fixed_transform, self.camera_intrinsics, xyz_rotation_angles, [0,0,100]
)
grid[i+1].imshow(cv2.cvtColor(rgb_gl, cv2.COLOR_BGR2RGB))
grid[i+1].axis("off")
else:
grid_i = 0
for box_id in range(top_viewpoint_ids.shape[0]):
# plt.figure()
# print(mask_list[box_id])
object_depth_mask = np.copy(depth_image)
object_depth_mask[mask_list[box_id] == 0] = 0
# plt.imshow(object_depth_mask)
# plt.show()
# object_depth_mask /= self.depth_factor
# object_depth_mask = object_depth_mask.flatten()
# object_depth_mask = self.reject_outliers(object_depth_mask)
object_depth = np.mean(object_depth_mask)
min_depth = np.min(object_depth_mask[object_depth_mask > 0])/self.depth_factor
max_depth = np.max(object_depth_mask[object_depth_mask > 0])/self.depth_factor
print("Min depth :{} , Max depth : {} from mask".format(min_depth, max_depth))
object_rotation_list = []
label = labels[box_id]
if print_poses:
# plt.show()
grid[grid_i].imshow(cv2.cvtColor(composite, cv2.COLOR_BGR2RGB))
# grid[grid_i].scatter(centroids_2d[box_id][0], centroids_2d[box_id][1], s=1)
grid[grid_i].axis("off")
grid_i += 1
render_machine = self.render_machines[label]
# rendered_dir = os.path.join(self.rendered_root_dir, label)
# mkdir_if_missing(rendered_dir)
# rendered_pose_list_out = []
top_prediction_recorded = False
## For topk combine viewpoint and inplane rotations
for viewpoint_id in top_viewpoint_ids[box_id, :]:
for inplane_rotation_id in top_inplane_rotation_ids[box_id, :]:
# for viewpoint_id, inplane_rotation_id in zip(top_viewpoint_ids[box_id, :],top_inplane_rotation_ids[box_id, :]):
# fixed_transform = self.fixed_transforms_dict[label]
theta, phi = get_viewpoint_rotations_from_id(self.viewpoints_xyz, viewpoint_id)
inplane_rotation_angle = get_inplane_rotation_from_id(self.inplane_rotations, inplane_rotation_id)
xyz_rotation_angles = [phi, theta, inplane_rotation_angle]
# centroid = np.matmul(K_inv, np.array(centroids_2d[box_id].tolist() + [1]))
centroid_world_point = self.get_world_point(np.array(centroids_2d[box_id].tolist() + [object_depth]))
print("{}. Recovered rotation, centroid : {}, {}".format(grid_i, xyz_rotation_angles, centroid_world_point))
if print_poses:
rgb_gl, depth_gl = self.render_pose(
label, render_machine, xyz_rotation_angles, (centroid_world_point*self.distance_scale).tolist()
)
# rotated_centeroid_2d = np.flip(np.mean(np.argwhere(rgb_gl[:,:,0] > 0), axis=0))
# shifted_centeroid_2d = centroids_2d[box_id] - (rotated_centeroid_2d - centroids_2d[box_id])
# shifted_centroid = np.matmul(K_inv, np.array(rotated_centeroid_2d.tolist() + [1]))
# print("{}. Recovered rotation, shifted centroid : {}, {}".format(grid_i, xyz_rotation_angles, rotated_centeroid_2d))
# centroid = shifted_centroid
# print("Center after rotation : {}".format())
grid[grid_i].imshow(cv2.cvtColor(rgb_gl, cv2.COLOR_BGR2RGB))
# grid[grid_i].scatter(centroids_2d[box_id][0], centroids_2d[box_id][1], s=1)
# grid[grid_i].scatter(shifted_centeroid_2d[0], shifted_centeroid_2d[1], s=1)
grid[grid_i].axis("off")
grid_i += 1
quaternion = get_xyzw_quaternion(RT_transform.euler2quat(phi, theta, inplane_rotation_angle).tolist())
if not top_prediction_recorded:
top_model_annotations.append({
'location' : (centroid_world_point*100).tolist(),
'quaternion_xyzw' : quaternion,
'category_id' : self.category_names_to_id[label],
'id' : grid_i
})
top_prediction_recorded = True
if use_centroid:
## Collect final annotations with centroid
annotations.append({
'location' : (centroid_world_point*100).tolist(),
'quaternion_xyzw' : quaternion,
'category_id' : self.category_names_to_id[label],
'id' : grid_i
})
else :
## Collect rotations only for this object
object_rotation_list.append(quaternion)
use_xy = False
if label == "010_potted_meat_can" or label == "025_mug":
resolution = 0.02
print("Using lower z resolution for smaller objects : {}".format(resolution))
else:
resolution = 0.02
print("Using higher z resolution for larger objects : {}".format(resolution))
## Create translation hypothesis for every rotation, if not using centroid
# resolution = 0.05
if use_centroid == False:
# Add predicted rotations in depth range
for _, depth in enumerate(np.arange(min_depth, max_depth, resolution)):
# for depth in (np.linspace(min_depth, max_depth+0.05, 5)):
if use_xy:
## Vary x and y in addition to depth also
x_y_min_point = self.get_world_point(np.array(boxes[box_id][0] + [depth]))
x_y_max_point = self.get_world_point(np.array(boxes[box_id][1] + [depth]))
# print(x_y_min_point)
# print(x_y_max_point)
for x in np.arange(x_y_min_point[0], x_y_max_point[0], resolution):
for y in np.arange(x_y_min_point[1], x_y_max_point[1], resolution):
for quaternion in object_rotation_list:
annotations.append({
'location' : [x*100, y*100, depth*100],
'quaternion_xyzw' : quaternion,
'category_id' : self.category_names_to_id[label],
'id' : grid_i
})
else:
## Vary depth only
centre_world_point = self.get_world_point(np.array(centroids_2d[box_id].tolist() + [depth]))
for quaternion in object_rotation_list:
annotations.append({
'location' : (centre_world_point*100).tolist(),
'quaternion_xyzw' : quaternion,
'category_id' : self.category_names_to_id[label],
'id' : grid_i
})
model_poses_file = None
if print_poses:
model_poses_file = '{}/model_output_{}.png'.format(model_output_dir, self.get_clean_name(image_data['file_name']))
plt.savefig(
model_poses_file,
dpi=1000, bbox_inches = 'tight', pad_inches = 0
)
plt.close(fig)
# plt.show()
return labels, annotations, model_poses_file, predicted_mask_path, top_model_annotations
def init_dope_node(self):
# if '/media/aditya/A69AFABA9AFA85D9/Cruzr/code/DOPE/catkin_ws/devel/lib/python2.7/dist-packages' not in sys.path:
# sys.path.append('/media/aditya/A69AFABA9AFA85D9/Cruzr/code/DOPE/catkin_ws/devel/lib/python2.7/dist-packages')
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
if ROS_PYTHON3_PKG_PATH not in sys.path:
sys.path.append(ROS_PYTHON3_PKG_PATH)
# These packages need to be python3 specific, cv2 is imported from environment, cv_bridge is built using python3
from dope_image import DopeNode
if '/opt/ros/kinetic/lib/python2.7/dist-packages' not in sys.path:
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
import rospy
import rospkg
import subprocess
def load_ros_param_from_file(param_file_path):
command = "rosparam load {}".format(param_file_path)
print(command)
subprocess.call(command, shell=True)
rospack = rospkg.RosPack()
dope_path = rospack.get_path('dope')
# print(dope_path)
config_1 = "{}/config/config_pose.yaml".format(dope_path)
config_2 = "{}/config/camera_info.yaml".format(dope_path)
load_ros_param_from_file(config_1)
# load_ros_param_from_file(config_2)
rospy.set_param('camera_info_url', 'package://dope/config/camera_info.yaml')
mkdir_if_missing("dope_outputs")
self.dopenode = DopeNode(fixed_transforms_dict = self.fixed_transforms_dict)
def visualize_dope_output(self, image_data):
color_img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
output_image_filepath = os.path.join("dope_outputs", (self.get_clean_name(image_data['file_name']) + ".png"))
# annotations = self.dopenode.run_on_image(color_img_path, self.category_names_to_id, output_image_filepath)
cloud_scene = self.get_scene_cloud(image_data, 0.015)
annotations, runtime = self.dopenode.run_on_image_icp(
color_img_path, self.category_names_to_id, cloud_scene, output_image_filepath
)
return annotations, runtime
def visualize_densefusion_output(self, image_data, object_name):
sys.path.append("/media/aditya/A69AFABA9AFA85D9/Cruzr/code/DenseFusion")
from densefusion import run_densefusion_image
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
color_img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
depth_img_path = self.get_depth_img_path(color_img_path)
mask_img_path = self.get_mask_img_path(color_img_path)
mask_img = np.array(Image.open(mask_img_path))
cam_int_matrix = self.camera_intrinsic_matrix
bbox = self.get_bbox(mask_img)
my_pred_wo_refine, my_pred = run_densefusion_image(color_img_path,
depth_img_path,
mask_img_path,
cam_int_matrix,
self.depth_factor,
bbox)
quat = my_pred[:4].tolist()
loc = my_pred[4:].tolist()
# xyz_rotation_angles = RT_transform.quat2euler(get_wxyz_quaternion(quat))
xyz_rotation_angles = RT_transform.quat2euler(quat)
rgb_gl, depth_gl = self.render_pose(
object_name, self.get_renderer(object_name), xyz_rotation_angles, loc
)
cv2.imwrite("test.png", rgb_gl)
return None, None
# output_image_filepath = os.path.join("dope_outputs", (self.get_clean_name(image_data['file_name']) + ".png"))
# # annotations = self.dopenode.run_on_image(color_img_path, self.category_names_to_id, output_image_filepath)
# cloud_scene = self.get_scene_cloud(image_data, 0.015)
# annotations, runtime = self.dopenode.run_on_image_icp(
# color_img_path, self.category_names_to_id, cloud_scene, output_image_filepath
# )
# return annotations, runtime
def get_model_path(self, object_name):
if self.model_type == "default":
model_path = os.path.join(self.model_dir, object_name, 'textured.ply')
elif self.model_type == "upright":
# For things like drill which need to be made upright
temp_path = os.path.join(self.model_dir, object_name, 'textured_upright.ply')
if os.path.exists(temp_path):
model_path = temp_path
else:
model_path = os.path.join(self.model_dir, object_name, 'textured.ply')
return model_path
def compare_clouds(self, annotations_1, annotations_2, downsample=False, use_add_s=True, convert_annotation_2=False, use_points_file=False):
from plyfile import PlyData, PlyElement
import scipy
from sklearn.metrics import pairwise_distances_chunked, pairwise_distances_argmin_min
result_add_dict = {}
result_add_s_dict = {}
for i in range(len(annotations_2)):
annotation_2 = annotations_2[i]
# Find matching annotation from ground truth
annotation_1 = [annotations_1[j] for j in range(len(annotations_1)) if annotation_2['category_id'] == annotations_1[j]['category_id']]
# annotation_1 = annotation_1[0]
# print(annotation_1)
if len(annotation_1) == 0:
# Possible in DOPE/Mask-RCNN where wrong object may be detected
print("False positive detection, no matching object to ground truth found in scene")
# print(annotation_2)
continue
# There might two occurences of same category, take ground truth closer to prediction - For 6D version
# TODO think of better ways
min_ann_dist = 10000
for ann in annotation_1:
dist = np.linalg.norm(np.array(ann['location'])-np.array(annotation_2['location']))
if dist < min_ann_dist:
min_ann_dist = dist
min_ann = ann
annotation_1 = min_ann
object_name = self.category_id_to_names[annotation_1['category_id']]['name']
# model_file_path = os.path.join(self.model_dir, object_name, "textured.ply")
model_file_path = self.get_model_path(object_name)
if self.model_type == "upright":
downsampled_cloud_path = object_name + "_upright.npy"
else:
downsampled_cloud_path = object_name + ".npy"
if not downsample or (downsample and not os.path.isfile(downsampled_cloud_path)):
# If no downsample or yes downsample but without file
if not use_points_file:
cloud = PlyData.read(model_file_path).elements[0].data
cloud = np.transpose(np.vstack((cloud['x'], cloud['y'], cloud['z'])))
else:
model_points_file_path = os.path.join(self.model_dir, object_name, "points.xyz")
cloud = np.loadtxt(model_points_file_path)
if downsample:
# Do downsample and save file
print("Before downsammpling : {}".format(cloud.shape))
cloud = cloud.astype('float32')
pcl_cloud = pcl.PointCloud()
pcl_cloud.from_array(cloud)
sor = pcl_cloud.make_voxel_grid_filter()
sor.set_leaf_size(0.01, 0.01, 0.01)
cloud_filtered = sor.filter()
cloud = cloud_filtered.to_array()
np.save(downsampled_cloud_path, cloud)
print("After downsampling: {}".format(cloud.shape))
elif downsample and os.path.isfile(downsampled_cloud_path):
# Load downsampled directly from path to save time
cloud = np.load(downsampled_cloud_path)
cloud = np.hstack((cloud, np.ones((cloud.shape[0], 1))))
# print(cloud)
annotation_1_cat = self.category_id_to_names[annotation_1['category_id']]['name']
annotation_2_cat = self.category_id_to_names[annotation_2['category_id']]['name']
print("Locations {} {}: {}, {}".format(
annotation_1_cat, annotation_2_cat, annotation_1['location'], annotation_2['location'])
)
print("Quaternions {} {}: {}, {}".format(
annotation_1_cat, annotation_2_cat, annotation_1['quaternion_xyzw'], annotation_2['quaternion_xyzw'])
)
# Get GT transform matrix
total_transform_1 = self.get_object_pose_with_fixed_transform(
object_name, annotation_1['location'], RT_transform.quat2euler(get_wxyz_quaternion(annotation_1['quaternion_xyzw'])), 'rot',
use_fixed_transform=False
)
transformed_cloud_1 = np.matmul(total_transform_1, np.transpose(cloud))
# Get predicted transform matrix
if convert_annotation_2 == False:
# Coming directly from perch output file
total_transform_2 = annotation_2['transform_matrix']
else:
# Convert quaternion to matrix
total_transform_2 = self.get_object_pose_with_fixed_transform(
object_name, annotation_2['location'],
RT_transform.quat2euler(get_wxyz_quaternion(annotation_2['quaternion_xyzw'])), 'rot',
use_fixed_transform=False
)
transformed_cloud_2 = np.matmul(total_transform_2, np.transpose(cloud))
# Mean of corresponding points
mean_dist = np.linalg.norm(transformed_cloud_1-transformed_cloud_2, axis=0)
# print(mean_dist.shape)
mean_dist_add = np.sum(mean_dist)/cloud.shape[0]
print("Average pose distance - ADD (in m) : {}".format(mean_dist_add))
result_add_dict[object_name] = mean_dist_add
# if self.symmetry_info[annotation_1_cat] == 2 or use_add_s:
if use_add_s:
# Do ADD-S for symmetric objects or every object if true
transformed_cloud_1 = np.transpose(transformed_cloud_1)
transformed_cloud_2 = np.transpose(transformed_cloud_2)
# For below func matrix should be samples x features
pairwise_distances = pairwise_distances_argmin_min(
transformed_cloud_1, transformed_cloud_2, metric='euclidean', metric_kwargs={'n_jobs':6}
)
# Mean of nearest points
mean_dist_add_s = np.mean(pairwise_distances[1])
print("Average pose distance - ADD-S (in m) : {}".format(mean_dist_add_s))
result_add_s_dict[object_name] = mean_dist_add_s
return result_add_dict, result_add_s_dict
# scaling_transform = np.zeros((4,4))
# scaling_transform[3,3] = 1
# scaling_transform[0,0] = 0.0275
# scaling_transform[1,1] = 0.0275
# scaling_transform[2,2] = 0.0275
# scaling_transform_flip = np.copy(scaling_transform)
# scaling_transform_flip[2,2] = -0.0275
# total_transform_1 = np.matmul(total_transform_1, scaling_transform_flip)
# scaling_transform = annotation_2['preprocessing_transform_matrix']
# scaling_transform[2,3] = 0
# total_transform_1 = np.matmul(total_transform_1, scaling_transform)
# print(total_transform_1)
# transformed_cloud_1 = np.matmul(cloud, total_transform_1)
# print(transformed_cloud_1)
# transformed_cloud_1 = np.divide(transformed_cloud_1[:,:3], transformed_cloud_1[:,3])
# transformed_cloud_1 = transformed_cloud_1[:,:3]/l[:, np.newaxis]
# print(cloud)
# print(transformed_cloud_1)
# total_transform_2 = self.get_object_pose_with_fixed_transform(
# object_name, annotation_2['location'], RT_transform.quat2euler(get_wxyz_quaternion(annotation_2['quaternion_xyzw'])), 'rot',
# use_fixed_transform=False
# )
# total_transform_2 = np.matmul(total_transform_2, scaling_transform)
# transformed_cloud_2 = np.matmul(cloud, total_transform_2)
# transformed_cloud_2 = transformed_cloud_2[:,:3]/l[:, np.newaxis]
# print(transformed_cloud_2)
# import torch
# from torch.autograd import Variable
# transformed_cloud_1 = torch.tensor(np.transpose(transformed_cloud_1)[:,:3]).cuda()
# transformed_cloud_2 = torch.tensor(np.transpose(transformed_cloud_2)[:,:3]).cuda()
# print(transformed_cloud_1)
# print(transformed_cloud_2)
# def similarity_matrix(x, y):
# # get the product x * y
# # here, y = x.t()
# r = torch.mm(x, y.t())
# # get the diagonal elements
# diag = r.diag().unsqueeze(0)
# diag = diag.expand_as(r)
# # compute the distance matrix
# D = diag + diag.t() - 2*r
# return D.sqrt()
# def row_pairwise_distances(x, y=None, dist_mat=None):
# if y is None:
# y = x
# if dist_mat is None:
# dtype = x.data.type()
# dist_mat = Variable(torch.Tensor(x.size()[0]).type(dtype))
# for i, row in enumerate(x.split(1)):
# r_v = row.expand_as(y)
# sq_dist = torch.sum((r_v - y) ** 2, 1)
# dist_mat[i] = torch.min(sq_dist.view(1, -1))
# return dist_mat
# print(similarity_matrix(transformed_cloud_1, transformed_cloud_2))
# torch.cdist(transformed_cloud_1, transformed_cloud_2)
# pdist = torch.nn.PairwiseDistance(p=2)
# pair_dist = pdist(transformed_cloud_1, transformed_cloud_2)
# print(pair_dist.shape)
# set_config(working_memory=4000)
# pairwise_distances = scipy.spatial.distance.cdist(transformed_cloud_1, transformed_cloud_2, metric='euclidean')
# pairwise_distances = pairwise_distances_chunked(transformed_cloud_1, transformed_cloud_2, metric='euclidean')
# min_point_indexes = []
# temp = [0]
# while len(temp) > 0:
# min_point_indexes = next(pairwise_distances)
# min_point_indexes.append(temp)
# print(min_point_indexes)
# print(transformed_cloud_2.shape)
# print(len(min_point_indexes))
# while len(next(pairwise_distances)) > 0:
# continue
# print(mean_dist)
#/cloud.shape[0]
def analyze_maskrcnn_results(self, coco_results_file):
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import torch
coco_predictions = torch.load(coco_results_file).results
print(coco_predictions['bbox'])
for key, value in coco_predictions['bbox'].items():
try:
print("{},{},{},{},{},{},{}".format(
self.category_id_to_names[int(key)]['name'],
value['AP'],
value['AP50'],
value['AP75'],
value['APs'],
value['APm'],
value['APl'],
))
except ValueError:
print(key, value)
continue
for key, value in coco_predictions['segm'].items():
try:
print("{},{},{},{},{},{},{}".format(
self.category_id_to_names[int(key)]['name'],
value['AP'],
value['AP50'],
value['AP75'],
value['APs'],
value['APm'],
value['APl'],
))
except ValueError:
print(key, value)
continue
def reduce_func(D_chunk, start):
neigh = [np.argmin(d).flatten() for i, d in enumerate(D_chunk, start)]
return neigh
def run_6d():
image_directory = '/media/aditya/A69AFABA9AFA85D9/Datasets/fat/mixed/extra'
# annotation_file = '/media/aditya/A69AFABA9AFA85D9/Datasets/fat/mixed/extra/instances_fat_train_pose_2018.json'
# annotation_file = '/media/aditya/A69AFABA9AFA85D9/Datasets/fat/mixed/extra/instances_fat_val_pose_2018.json'
# annotation_file = '/media/aditya/A69AFABA9AFA85D9/Datasets/fat/mixed/extra/instances_fat_train_pose_6_obj_2018.json'
annotation_file = '/media/aditya/A69AFABA9AFA85D9/Datasets/fat/mixed/extra/instances_fat_val_pose_6_obj_2018.json'
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=10000,
model_dir='/media/aditya/A69AFABA9AFA85D9/Datasets/YCB_Video_Dataset/aligned_cm',
model_mesh_in_mm=False,
model_mesh_scaling_factor=1,
models_flipped=False
)
# Running on model and PERCH
mkdir_if_missing('model_outputs')
fat_image.init_model()
ts = calendar.timegm(time.gmtime())
f_accuracy = open('model_outputs/accuracy_6d_{}.txt'.format(ts), "w")
f_runtime = open('model_outputs/runtime_6d_{}.txt'.format(ts), "w")
f_runtime.write("{} {} {}\n".format('name', 'expands', 'runtime'))
#filter_objects = ['010_potted_meat_can']
# filter_objects = ['003_cracker_box']
filter_objects = None
required_objects = fat_image.category_names
f_accuracy.write("name ")
for object_name in required_objects:
f_accuracy.write("{}-add {}-adds ".format(object_name, object_name))
f_accuracy.write("\n")
# couldnt find solution - 14 - occlusion is not possible to solve i think, 152 has high occlusion and 4 objects
skip_list = ['kitchen_4/000006.left.jpg', 'kitchen_4/000014.left.jpg', 'kitchen_4/000169.left.jpg', 'kitchen_4/000177.left.jpg']
# 120 has some bug
# for img_i in range(0,100):
# for img_i in range(100,150):
# for img_i in range(155,177):
#for img_i in list(range(0,100)) + list(range(100,120)) + list(range(155,177)):
# for img_i in [138,142,153,163, 166, 349]:
# for img_i in [0]:
for img_i in range(0,1):
# Get Image
image_name = 'kitchen_4/00{}.left.jpg'.format(str(img_i).zfill(4))
if image_name in skip_list:
continue
# image_data, annotations = fat_image.get_random_image(name='{}_16k/kitchen_4/000005.left.jpg'.format(category_name))
image_data, annotations = fat_image.get_random_image(
name=image_name, required_objects=required_objects
)
# Skip if required image or image name is not in dataset
if image_data is None or annotations is None:
continue
# Do an image only if it has filter object, but still do all objects in scene
if filter_objects is not None:
found_filter_object = False
for anno in annotations:
if fat_image.category_id_to_names[anno['category_id']]['name'] in filter_objects:
found_filter_object = True
if found_filter_object == False:
continue
# print(found_filter_object)
# continue
# TODO
# restrict segmentation - done
# move in x,y in hypothesis - this will help in cases where pose needs to be moved up and down in camera
# try all possible combinations of rotation and viewpoint, increase topk number
# reduce viewpoint number in training
# icp only on pixels of that object
# ratios in losses
# reduce confidence
# try with descretization in ssd paper - done
# in 11,12,21 pose is right but not at right distance from camera - source cost was not getting included
# lower epsilon - done
# use in plane from perch
# train for more iterations
# try without normalize - done - not good
# try with lazy - done
# why is centroid in 2d - calculate centroid of mask. check if centroid of generated poses is actually at object center, 11, 12 - done
# use more rotations for non symmetric objects + 3cm for big and 2 cm for small
# try without depth translation becuase mean of renderd and observed should match
# the centroid alignment doesnt work if the object is not fully inside the camera - kitchen 150s
# issue when objects are too close together
# Visualize ground truth in ros
# yaw_only_objects, max_min_dict_gt, transformed_annotations = fat_image.visualize_pose_ros(
# image_data, annotations, frame='camera', camera_optical_frame=False, num_publish=1, write_poses=False, ros_publish=True
# )
# Run model to get multiple poses for each object
labels, model_annotations, model_poses_file, predicted_mask_path, top_model_annotations = \
fat_image.visualize_model_output(image_data, use_thresh=True, use_centroid=False, print_poses=True)
if True:
# Convert model output poses to table frame and save them to file so that they can be read by perch
_, max_min_dict, _ = fat_image.visualize_pose_ros(
# image_data, model_annotations, frame='table', camera_optical_frame=False, num_publish=1, write_poses=True, ros_publish=False
image_data, model_annotations, frame='camera', camera_optical_frame=False, num_publish=1, write_poses=True, ros_publish=False,
)
# Run perch/ICP on written poses
run_perch = True
if run_perch:
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, model_annotations, max_min_dict, frame='camera',
# use_external_render=0, required_object=[labels[1]],
use_external_render=0, required_object=labels,
camera_optical_frame=False, use_external_pose_list=1,
# model_poses_file=model_poses_file, use_centroid_shifting=0,
model_poses_file=model_poses_file, use_centroid_shifting=1,
predicted_mask_path=predicted_mask_path
)
else:
perch_annotations = top_model_annotations
stats = None
f_accuracy.write("{},".format(image_data['file_name']))
if perch_annotations is not None:
# # # Compare Poses by applying to model and computing distance
add_dict, add_s_dict = fat_image.compare_clouds(annotations, perch_annotations, use_add_s=True, convert_annotation_2=not run_perch)
if add_dict is not None and add_s_dict is not None:
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
if stats is not None:
f_runtime.write("{} {} {}".format(image_data['file_name'], stats['expands'], stats['runtime']))
f_accuracy.write("\n")
f_runtime.write("\n")
f_runtime.close()
f_accuracy.close()
def run_roman_crate(dataset_cfg=None):
image_directory = dataset_cfg['image_dir']
annotation_file = dataset_cfg['image_dir'] + '/instances_newmap1_roman_2018.json'
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=100,
model_dir=dataset_cfg['model_dir'],
model_mesh_in_mm=True,
# model_mesh_scaling_factor=0.005,
model_mesh_scaling_factor=1,
models_flipped=False,
img_width=960,
img_height=540,
distance_scale=100,
env_config="roman_env_config.yaml",
planner_config="roman_planner_config.yaml",
perch_debug_dir=dataset_cfg["perch_debug_dir"],
python_debug_dir=dataset_cfg["python_debug_dir"],
dataset_type=dataset_cfg["type"]
)
f_runtime = open('runtime.txt', "w", 1)
f_accuracy = open('accuracy.txt', "w", 1)
f_runtime.write("{} {} {}\n".format('name', 'expands', 'runtime'))
required_objects = ['crate_test']
f_accuracy.write("name,")
for object_name in required_objects:
f_accuracy.write("{},".format(object_name))
f_accuracy.write("\n")
for img_i in range(0,16):
# for img_i in [16, 17, 19, 22]:
# required_objects = ['coke']
image_name = 'NewMap1_roman/0000{}.left.png'.format(str(img_i).zfill(2))
image_data, annotations = fat_image.get_random_image(name=image_name, required_objects=required_objects)
# In case of crate its hard to get camera pose sometimes as ground is not visible (RANSAC plane estimation will fail)
# So get camera pose from an image where ground is visible and use that
# camera_pose_m = np.array([[0.757996, -0.00567911, 0.652234, -0.779052],
# [0.00430481, 0.999984, 0.00370417, -0.115213],
# [-0.652245, 1.32609e-16, 0.758009, 0.66139],
# [0, 0, 0, 1]])
camera_pose = {
'location_worldframe': np.array([-77.90518933, -11.52125029, 66.13899833]),
'quaternion_xyzw_worldframe': [-0.6445207366760153, 0.6408707673682607, -0.29401548348464, 0.2956899981377745]
}
# Camera pose goes here to get GT in world frame for accuracy computation
yaw_only_objects, max_min_dict, transformed_annotations, _ = \
fat_image.visualize_pose_ros(
image_data, annotations, frame='table', camera_optical_frame=False,
input_camera_pose=camera_pose
)
# max_min_dict['ymax'] = 1
# max_min_dict['ymin'] = -1
# max_min_dict['xmax'] = 0.5
# max_min_dict['xmin'] = -1
max_min_dict['ymax'] = 0.85
max_min_dict['ymin'] = -0.85
max_min_dict['xmax'] = 0.5
max_min_dict['xmin'] = -0.5
fat_image.search_resolution_translation = 0.08
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, annotations, max_min_dict, frame='table',
use_external_render=0, required_object=required_objects,
camera_optical_frame=False, use_external_pose_list=0, gt_annotations=transformed_annotations,
input_camera_pose=camera_pose, table_height=0.006, num_cores=8,
compute_type=2
)
# print(perch_annotations)
# print(transformed_annotations)
f_accuracy.write("{},".format(image_data['file_name']))
add_dict, add_s_dict = fat_image.compare_clouds(transformed_annotations, perch_annotations, downsample=True, use_add_s=True)
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
f_accuracy.write("\n")
f_runtime.write("{} {} {}\n".format(image_name, stats['rendered'], stats['runtime']))
f_runtime.close()
def analyze_roman_results(config=None):
import pandas as pd
dataset_cfg = config['dataset']
for device, analysis_cfg in config['analysis']['device'].items():
overall_stats_dict = {}
# Object wise metrics
print("\n### Object Wise AUC ###")
li = []
for accuracy_file in analysis_cfg['result_files']['accuracy']:
# Read file for every object
print("Accuracy file : {}".format(accuracy_file))
df = pd.read_csv(accuracy_file,
header=None,
index_col=None,
names=["filename", "add", "add-s", "blank"],
skiprows=1,
sep=",")
df = df.drop(columns=["add", "blank"])
df = df.set_index('filename')
add_s = np.copy(df['add-s'].to_numpy())
stats = compute_pose_metrics(add_s)
print("AUC : {}, Pose Percentage : {}, Mean ADD-S : {}".format(
stats['auc'], stats['pose_error_less_perc'], stats['mean_pose_error']))
li.append(df)
overall_stats_dict[get_filename_from_path(accuracy_file)] = stats
# Overall Metrics
# print("Dataframe with add-s")
df_acc = | pd.concat(li, axis=0, ignore_index=False) | pandas.concat |
import numpy as np
import pandas as pd
import os.path
from glob import glob
import scipy.stats as ss
from sklearn.metrics import r2_score, roc_auc_score, average_precision_score
COLORS = {
'orange': '#f0593e',
'dark_red': '#7c2712',
'red': '#ed1d25',
'yellow': '#ed9f22',
'light_green': '#67bec5',
'dark_green': '#018a84',
'light_blue': '#00abe5',
'dark_blue': '#01526e',
'grey': '#a8a8a8'}
DINUCLEOTIDES = {
'AA': 'AA/TT', 'AC': 'AC/GT', 'AG': 'AG/CT',
'CA': 'CA/TG', 'CC': 'CC/GG', 'GA': 'GA/TC'
}
def parse_classifier_stats(dirpath, algorithm, feat_types, sys2com_dict=None):
out_df = pd.DataFrame(
columns=['tf', 'chance', 'feat_type', 'cv', 'auroc', 'auprc'])
for feat_type in feat_types:
print('... working on', feat_type)
subdirs = glob('{}/{}/{}/*'.format(dirpath, feat_type, algorithm))
for subdir in subdirs:
tf = os.path.basename(subdir)
filename = glob('{}/stats.csv*'.format(subdir))[0]
stats_df = pd.read_csv(filename)
filename = glob('{}/preds.csv*'.format(subdir))[0]
preds_df = pd.read_csv(filename)
stats_df['feat_type'] = feat_type
if sys2com_dict is not None:
tf_com = sys2com_dict[tf] if tf in sys2com_dict else tf
stats_df['tf'] = '{} ({})'.format(tf, tf_com)
stats_df['tf_com'] = tf_com
else:
stats_df['tf'] = tf
stats_df['chance'] = np.sum(preds_df['label'] == 1) / preds_df.shape[0]
out_df = out_df.append(stats_df, ignore_index=True)
return out_df
def compare_model_stats(df, metric, comp_groups):
stats_df = | pd.DataFrame(columns=['tf', 'comp_group', 'p_score']) | pandas.DataFrame |
import pandas as pd
from sklearn.base import BaseEstimator
import numpy as np
import warnings
class TopCause(BaseEstimator):
'''TopCause finds the single largest action to improve a performance metric.
Parameters
----------
max_p : float
maximum allowed probability of error (default: 0.05)
percentile : float
ignore high-performing outliers beyond this percentile (default: 0.95)
min_weight : int
minimum samples in a group. Drop groups with fewer (default: 3)
Returns
-------
result_ : DataFrame
rows = features evaluated
columns:
value: best value for this feature,
gain: improvement in y if feature = value
p: probability that this feature does not impact y
type: how this feature's impact was calculated (e.g. `num` or `cat`)
'''
def __init__(
self,
max_p: float = 0.05,
percentile: float = 0.95,
min_weight: float = None,
):
self.min_weight = min_weight
self.max_p = max_p
self.percentile = percentile
def fit(self, X, y, sample_weight=None): # noqa - capital X is a sklearn convention
'''Returns the top causes of y from among X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
n_samples = rows = number of observations.
n_features = columns = number of drivers/causes.
y : array-line of shape (n_samples)
Returns
-------
self : object
Returns the instance itself.
'''
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X) # noqa: N806 X can be in uppercase
if not isinstance(y, pd.Series):
y = pd.Series(y)
if X.shape[0] != y.shape[0]:
raise ValueError(f'X has {X.shape[0]} rows, but y has {y.shape[0]} rows')
# If values contain ±Inf treat it an NaN
with pd.option_context('mode.use_inf_as_na', True):
# If sample weights are not give, treat it as 1 for each row.
# If sample weights are NaN, treat it as 0.
if sample_weight is None:
sample_weight = y.notnull().astype(int)
# If no weights are specified, each category must have at least 3 rows
min_weight = 3 if self.min_weight is None else self.min_weight
elif not isinstance(sample_weight, pd.Series):
sample_weight = pd.Series(sample_weight)
sample_weight.fillna(0)
# Calculate summary stats
n = sample_weight.sum()
weighted_y = y * sample_weight
mean = weighted_y.sum() / n
var = ((y - mean)**2 * sample_weight).sum() / n
# Calculate impact for every column consistently
results = {}
for column, series in X.items():
# Ignore columns identical to y
if (series == y).all():
warnings.warn(f'column {column}: skipped. Identical to y')
# Process column as NUMERIC, ORDERED CATEGORICAL or CATEGORICAL based on dtype
# https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html
kind = series.dtype.kind
# By default, assume that this column can't impact y
result = results[column] = {
'value': np.nan,
'gain': np.nan,
'p': 1.0,
'type': kind
}
# ORDERED CATEGORICAL if kind is signed or unsigned int
# TODO: Currently, it's treated as numeric. Fix this based # of distinct ints.
if kind in 'iu':
series = series.astype(float)
kind = 'f'
# NUMERIC if kind is float
if kind in 'f':
# Drop missing values, pairwise
pair = pd.DataFrame({'values': series, 'weight': sample_weight, 'y': y})
pair.dropna(inplace=True)
# Run linear regression to see if y increases/decreases with column
# TODO: use weighted regression
from scipy.stats import linregress
reg = linregress(pair['values'], pair['y'])
# If slope is +ve, pick value at the 95th percentile
# If slope is -ve, pick value at the 5th percentile
pair = pair.sort_values('values', ascending=True)
top = np.interp(
self.percentile if reg.slope >= 0 else 1 - self.percentile,
pair['weight'].cumsum() / pair['weight'].sum(),
pair['values'])
# Predict the gain based on linear regression
gain = reg.slope * top + reg.intercept - mean
if gain > 0:
result.update(value=top, gain=gain, p=reg.pvalue, type='num')
# CATEGORICAL if kind is boolean, object, str or unicode
elif kind in 'bOSU':
# Group into a DataFrame with 3 columns {value, weight, mean}
# value: Each row has every unique value in the column
# weight: Sum of sample_weights in each group
# mean: mean(y) in each group, weighted by sample_weights
group = pd.DataFrame({
'values': series,
'weight': sample_weight,
'weighted_y': weighted_y
}).dropna().groupby('values', sort=False).sum()
group['mean'] = group['weighted_y'] / group['weight']
# Pick the groups with highest mean(y), at >=95th percentile (or whatever).
# Ensure each group has at least min_weight samples.
group.sort_values('mean', inplace=True, ascending=True)
best_values = group.dropna(subset=['mean'])[
(group['weight'].cumsum() / group['weight'].sum() >= self.percentile) &
(group['weight'] >= min_weight)]
# If there's at least 1 group over 95th percentile with enough weights...
if len(best_values):
# X[series == top] is the largest group (by weights) above the 95th pc
top = best_values.sort_values('weight').iloc[-1]
gain = top['mean'] - mean
# Only consider positive gains
if gain > 0:
# Calculate p value using Welch test: scipy.stats.mstats.ttest_ind()
# https://en.wikipedia.org/wiki/Welch%27s_t-test
# github.com/scipy/scipy/blob/v1.5.4/scipy/stats/mstats_basic.py
subset = series == top.name
subseries = y[subset]
submean, subn = subseries.mean(), sample_weight[subset].sum()
with np.errstate(divide='ignore', invalid='ignore'):
diff = subseries - submean
vn1 = (diff**2 * sample_weight[subset]).sum() / subn
vn2 = var / n
df = (vn1 + vn2)**2 / (vn1**2 / (subn - 1) + vn2**2 / (n - 1))
df = 1 if np.isnan(df) else df
with np.errstate(divide='ignore', invalid='ignore'):
t = gain / (vn1 + vn2)**0.5
import scipy.special as special
p = special.betainc(0.5 * df, 0.5, df / (df + t * t))
# Update the result
result.update(value=top.name, gain=gain, p=p, type='cat')
# WARN if kind is complex, timestamp, datetime, etc
else:
warnings.warn(f'column {column}: unknown type {kind}')
results = | pd.DataFrame(results) | pandas.DataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([( | TS('2015-01-03') | pandas.Timestamp |
from __future__ import absolute_import, division, print_function
import datetime
import pandas as pd
from config import *
def _drop_in_time_slice(m2m, m2b, m5cb, time_slice, to_drop):
"""Drops certain members from data structures, only in a given time slice.
This can be useful for removing people who weren't there on a specific day, or non-participants.
"""
logger.debug("Removing data: {} {}".format(time_slice, to_drop))
m2m.drop(m2m.loc[(time_slice, slice(None), to_drop), :].index, inplace=True)
m2m.drop(m2m.loc[(time_slice, to_drop, slice(None)), :].index, inplace=True)
m2b.drop(m2b.loc[(time_slice, to_drop, slice(None)), :].index, inplace=True)
m5cb.drop(m5cb.loc[(time_slice, to_drop), :].index, inplace=True)
def _clean_m2m(where, participation_dates, battery_sundays):
logger.info('loading m2m')
m2m = pd.read_hdf(dirty_store_path, 'proximity/member_to_member', where=where)
logger.info("original m2m len: {}".format(len(m2m)))
if len(m2m) == 0:
return
logger.info('cleaning m2m')
m2m.reset_index(inplace=True)
# Mark all records as not to keep. This removes all non-participants
m2m['keep'] = False
# For m2m, we need to look on both sides. Therefore, for each participating member, we will
# turn on a "keep" flag if the member is valid on either sides of the connection. Then, we will only keep
# records in which both sides are valid
logger.info('Keeping only dates relevant dates for each participant')
i = 0
total_count = len(participation_dates)
for item, p in participation_dates.iterrows():
i += 1
logger.debug("({}/{}) {},{},{}".format(i, total_count, p.member, p.start_date_ts, p.end_date_ts))
side1_cond = ((m2m.member1 == p.member) & (m2m.datetime >= p.start_date_ts) & (m2m.datetime < p.end_date_ts))
m2m.loc[side1_cond, 'keep_1'] = True
side2_cond = ((m2m.member2 == p.member) & (m2m.datetime >= p.start_date_ts) & (m2m.datetime < p.end_date_ts))
m2m.loc[side2_cond, 'keep_2'] = True
m2m.loc[(m2m.keep_1 == True) & (m2m.keep_2 == True), 'keep'] = True
del m2m['keep_1']
del m2m['keep_2']
logger.info('So far, keeping {} rows'.format(len(m2m[m2m['keep'] == True])))
# Remove times of battery changes
logger.info('Removing times of battery changes')
i = 0
total_count = len(battery_sundays)
for item, s in battery_sundays.iterrows():
i += 1
logger.debug("({}/{}) {},{}".format(i, total_count, s.battery_period_start, s.battery_period_end))
cond = ((m2m.datetime >= s.battery_period_start) & (m2m.datetime <= s.battery_period_end))
m2m.loc[cond, 'keep'] = False
logger.info('So far, keeping {} rows'.format(len(m2m[m2m['keep'] == True])))
m2m = m2m[m2m.keep == True]
logger.info("after cleaning: {}".format(len(m2m)))
del m2m['keep']
m2m.set_index(['datetime','member1','member2'], inplace=True)
logger.info("appending cleaned m2m to {}".format(clean_store_path))
with pd.HDFStore(clean_store_path) as store:
store.append('proximity/member_to_member', m2m)
del m2m
def _clean_m2b(where, participation_dates, battery_sundays):
logger.info('loading m2b')
m2b = pd.read_hdf(dirty_store_path, 'proximity/member_to_beacon', where=where)
logger.info("original m2b len: {}".format(len(m2b)))
if len(m2b) == 0:
return
logger.info("cleaning m2b")
m2b.reset_index(inplace=True)
# Mark all records as not to keep. This removes all non-participants
m2b['keep'] = False
# Only keep data within participation dates
logger.info('Keeping only dates relevant dates for each participant')
i = 0
total_count = len(participation_dates)
for item, p in participation_dates.iterrows():
i += 1
logger.debug("({}/{}) {},{},{}".format(i, total_count, p.member, p.start_date_ts, p.end_date_ts))
side1_cond = ((m2b.member == p.member) & (m2b.datetime >= p.start_date_ts) & (m2b.datetime < p.end_date_ts))
m2b.loc[side1_cond, 'keep'] = True
logger.info('So far, keeping {} rows'.format(len(m2b[m2b['keep'] == True])))
# Remove times of battery changes
logger.info('Removing times of battery changes')
i = 0
total_count = len(battery_sundays)
for item, s in battery_sundays.iterrows():
i += 1
logger.debug("({}/{}) {},{}".format(i, total_count, s.battery_period_start, s.battery_period_end))
cond = ((m2b.datetime >= s.battery_period_start) & (m2b.datetime <= s.battery_period_end))
m2b.loc[cond, 'keep'] = False
logger.info('So far, keeping {} rows'.format(len(m2b[m2b['keep'] == True])))
m2b = m2b[m2b.keep == True]
logger.info("after cleaning: {}".format(len(m2b)))
del m2b['keep']
m2b.set_index(['datetime','member','beacon'], inplace=True)
logger.info("appending cleaned m2b to {}".format(clean_store_path))
with pd.HDFStore(clean_store_path) as store:
store.append('proximity/member_to_beacon', m2b)
del m2b
def _clean_m5cb(where, participation_dates, battery_sundays):
logger.info('loading m2b')
m5cb = pd.read_hdf(dirty_store_path, 'proximity/member_5_closest_beacons', where=where)
logger.info("original m2b len: {}".format(len(m5cb)))
if len(m5cb) == 0:
return
logger.info("cleaning m2b")
m5cb.reset_index(inplace=True)
# Mark all records as not to keep. This removes all non-participants
m5cb['keep'] = False
# Only keep data within participation dates
logger.info('Keeping only dates relevant dates for each participant')
i = 0
total_count = len(participation_dates)
for item, p in participation_dates.iterrows():
i += 1
logger.debug("({}/{}) {},{},{}".format(i, total_count, p.member, p.start_date_ts, p.end_date_ts))
side1_cond = ((m5cb.member == p.member) & (m5cb.datetime >= p.start_date_ts) & (m5cb.datetime < p.end_date_ts))
m5cb.loc[side1_cond, 'keep'] = True
logger.info('So far, keeping {} rows'.format(len(m5cb[m5cb['keep'] == True])))
# Remove times of battery changes
logger.info('Removing times of battery changes')
i = 0
total_count = len(battery_sundays)
for item, s in battery_sundays.iterrows():
i += 1
logger.debug("({}/{}) {},{}".format(i, total_count, s.battery_period_start, s.battery_period_end))
cond = ((m5cb.datetime >= s.battery_period_start) & (m5cb.datetime <= s.battery_period_end))
m5cb.loc[cond, 'keep'] = False
logger.info('So far, keeping {} rows'.format(len(m5cb[m5cb['keep'] == True])))
m5cb = m5cb[m5cb.keep == True]
logger.info("after cleaning: {}".format(len(m5cb)))
del m5cb['keep']
m5cb.set_index(['datetime', 'member'], inplace=True)
logger.info("appending cleaned m5cb to {}".format(clean_store_path))
with pd.HDFStore(clean_store_path) as store:
store.append('proximity/member_5_closest_beacons', m5cb)
del m5cb
def _clean_date_range(start_ts, end_ts, members_metadata):
"""
Clean a given date range for all relevant dataframes
"""
##################################################
# figure out what to drop and what to keep
##################################################
where = "datetime >= '" + str(start_ts) + "' & datetime < '" + str(end_ts) + "'"
# Convert text into timestamps with timezone
period1_start_ts = pd.Timestamp(period1_start, tz=time_zone)
period2_end_ts = pd.Timestamp(period2_end, tz=time_zone)
# Start and end dates for participants
participation_dates = members_metadata[members_metadata['participates'] == 1][['member', 'start_date', 'end_date']]
participation_dates['start_date_ts'] = pd.to_datetime(participation_dates['start_date']).dt.tz_localize(time_zone)
participation_dates['end_date_ts'] = pd.to_datetime(participation_dates['end_date']).dt.tz_localize(time_zone)
del participation_dates['start_date']
del participation_dates['end_date']
# Remove times of battery changes - Sundays between 7:30pm and 11:30pm
# create a list of dates, and choose only sundays
battery_sundays = pd.date_range(period1_start_ts, period2_end_ts, freq='1D', normalize=True). \
to_series(keep_tz=True).to_frame(name="su")
battery_sundays = battery_sundays[battery_sundays.su.dt.dayofweek == 6]
battery_sundays['battery_period_start'] = battery_sundays.su + pd.Timedelta(hours=19, minutes=30)
battery_sundays['battery_period_end'] = battery_sundays.su + pd.Timedelta(hours=23, minutes=30)
##################################################
# Clean
##################################################
logger.info('---------------------------------------')
_clean_m2m(where, participation_dates, battery_sundays)
logger.info('---------------------------------------')
_clean_m2b(where, participation_dates, battery_sundays)
logger.info('---------------------------------------')
_clean_m5cb(where, participation_dates, battery_sundays)
def clean_up_data():
# remove dirty data if already there
try:
os.remove(clean_store_path)
except OSError:
pass
logger.info("Cleaning up the data")
members_metadata = pd.read_csv(members_metadata_path)
members_metadata = members_metadata[members_metadata['member_id'].notnull()]
##################################################
# Create list of dates to process
##################################################
# Convert text into timestamps with timezone
period1_start_ts = pd.Timestamp(period1_start, tz=time_zone)
period1_end_ts = | pd.Timestamp(period1_end, tz=time_zone) | pandas.Timestamp |
"""
Info about all of noaa data can be found at:
http://www.ndbc.noaa.gov/docs/ndbc_web_data_guide.pdf
What all the values mean:
http://www.ndbc.noaa.gov/measdes.shtml
WDIR Wind direction (degrees clockwise from true N).
WSPD Wind speed (m/s) averaged over an eight-minute period.
GST Peak 5 or 8 second gust speed (m/s).
WVHT Significant wave height (meters) is calculated as
the average of the highest one-third of all of the
wave heights during the 20-minute sampling period.
DPD Dominant wave period (seconds) is the period with the maximum wave energy.
APD Average wave period (seconds) of all waves during the 20-minute period.
MWD The direction from which the waves at the dominant period (DPD) are coming
(degrees clockwise from true N).
PRES Sea level pressure (hPa).
ATMP Air temperature (Celsius).
WTMP Sea surface temperature (Celsius).
DEWP Dewpoint temperature.
VIS Station visibility (nautical miles).
PTDY Pressure Tendency.
TIDE The water level in feet above or below Mean Lower Low Water (MLLW).
"""
import pandas as pd
class NDBC():
def __init__(self, buoy):
"""
buoy : int
Buoy number from ndbc.
"""
self.buoy = buoy
def get_data(self, date):
"""
Parameters
----------
date : str
Form: "year-month-day/to/year-month-day"
Gives you data from this time period.
Returns
-------
df : pandas dataframe
Containing the data.
"""
year_start = int(date[0:4])
month_start = date[5:7]
day_start = date[8:10]
year_stop = int(date[14:18])
month_stop = date[19:21]
day_stop = date[22:24]
year_range = (year_start, year_stop)
df = self.get_year_range(year_range)
while str(df.index[0])[0:10] != str(year_start) + "-" + str(month_start) + "-" + str(day_start):
df = df.drop(df.index[0], axis=0)
while str(df.index[-1])[0:10] != str(year_stop) + "-" + str(month_stop) + "-" + str(day_stop):
df = df.drop(df.index[-1], axis=0)
return df
def get_year(self, year):
"""
Parameters
----------
year : int
Year from which the data comes from.
Returns
-------
df : pandas dataframe
Containing the data.
"""
link = 'http://www.ndbc.noaa.gov/view_text_file.php?filename='
link += '{}h{}.txt.gz&dir=data/historical/'.format(self.buoy, year)
link = link + 'stdmet/'
try:
df = pd.read_csv(link, header=0, delim_whitespace=True, dtype=object, na_values={99, 999, 9999, 99., 999.,
9999.})
except:
return Warning(print('You are trying to get data that does not exists or is not usable from buoy: '
+ str(self.buoy) + ' in year ' + str(year)
+ '. Please try a different year or year range without year: ' + str(year)))
# 2007 and on format.
if df.iloc[0, 0] == '#yr':
df = df.rename(columns={'#YY': 'YY'}) # Get rid of hash.
# Make the indices.
df.drop(0, inplace=True) # First row is units, so drop them.
d = df.YY + ' ' + df.MM + ' ' + df.DD + ' ' + df.hh + ' ' + df.mm
ind = pd.to_datetime(d, format="%Y %m %d %H %M")
df.index = ind
# Drop useless columns and rename the ones we want.
df.drop(['YY', 'MM', 'DD', 'hh', 'mm'], axis=1, inplace=True)
df.columns = ['WDIR', 'WSPD', 'GST', 'WVHT', 'DPD', 'APD', 'MWD',
'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE']
# Before 2006 to 2000.
else:
date_str = df.YYYY + ' ' + df.MM + ' ' + df.DD + ' ' + df.hh
ind = pd.to_datetime(date_str, format="%Y %m %d %H")
df.index = ind
# Some data has a minute column. Some doesn't.
if 'mm' in df.columns:
df.drop(['YYYY', 'MM', 'DD', 'hh', 'mm'], axis=1, inplace=True)
else:
df.drop(['YYYY', 'MM', 'DD', 'hh'], axis=1, inplace=True)
df.columns = ['WDIR', 'WSPD', 'GST', 'WVHT', 'DPD', 'APD',
'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE']
# All data should be floats.
df = df.astype('float')
return df
def get_year_range(self, year_range):
"""
Parameters
----------
year_range : tuple
From year to year.
Returns
-------
df : pandas dataframe
Contains all the data from all the years that were specified
in year_range.
"""
start, stop = year_range
df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.